--- /dev/null
+What: /sys/devices/platform/docg3/f[0-3]_dps[01]_is_keylocked
+Date: November 2011
+KernelVersion: 3.3
+Contact: Robert Jarzmik <robert.jarzmik@free.fr>
+Description:
+ Show whether the floor (0 to 4), protection area (0 or 1) is
+ keylocked. Each docg3 chip (or floor) has 2 protection areas,
+ which can cover any part of it, block aligned, called DPS.
+ The protection has information embedded whether it blocks reads,
+ writes or both.
+ The result is:
+ 0 -> the DPS is not keylocked
+ 1 -> the DPS is keylocked
+Users: None identified so far.
+
+What: /sys/devices/platform/docg3/f[0-3]_dps[01]_protection_key
+Date: November 2011
+KernelVersion: 3.3
+Contact: Robert Jarzmik <robert.jarzmik@free.fr>
+Description:
+ Enter the protection key for the floor (0 to 4), protection area
+ (0 or 1). Each docg3 chip (or floor) has 2 protection areas,
+ which can cover any part of it, block aligned, called DPS.
+ The protection has information embedded whether it blocks reads,
+ writes or both.
+ The protection key is a string of 8 bytes (value 0-255).
+ Entering the correct value toggle the lock, and can be observed
+ through f[0-3]_dps[01]_is_keylocked.
+ Possible values are:
+ - 8 bytes
+ Typical values are:
+ - "00000000"
+ - "12345678"
+Users: None identified so far.
--- /dev/null
+GPIO assisted NAND flash
+
+The GPIO assisted NAND flash uses a memory mapped interface to
+read/write the NAND commands and data and GPIO pins for the control
+signals.
+
+Required properties:
+- compatible : "gpio-control-nand"
+- reg : should specify localbus chip select and size used for the chip. The
+ resource describes the data bus connected to the NAND flash and all accesses
+ are made in native endianness.
+- #address-cells, #size-cells : Must be present if the device has sub-nodes
+ representing partitions.
+- gpios : specifies the gpio pins to control the NAND device. nwp is an
+ optional gpio and may be set to 0 if not present.
+
+Optional properties:
+- bank-width : Width (in bytes) of the device. If not present, the width
+ defaults to 1 byte.
+- chip-delay : chip dependent delay for transferring data from array to
+ read registers (tR). If not present then a default of 20us is used.
+- gpio-control-nand,io-sync-reg : A 64-bit physical address for a read
+ location used to guard against bus reordering with regards to accesses to
+ the GPIO's and the NAND flash data bus. If present, then after changing
+ GPIO state and before and after command byte writes, this register will be
+ read to ensure that the GPIO accesses have completed.
+
+Examples:
+
+gpio-nand@1,0 {
+ compatible = "gpio-control-nand";
+ reg = <1 0x0000 0x2>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ gpios = <&banka 1 0 /* rdy */
+ &banka 2 0 /* nce */
+ &banka 3 0 /* ale */
+ &banka 4 0 /* cle */
+ 0 /* nwp */>;
+
+ partition@0 {
+ ...
+ };
+};
size_t retlen;
if (!strcmp(mtd->name, "MAC-Address")) {
- mtd->read(mtd, 0, ETH_ALEN, &retlen, mac_addr);
+ mtd_read(mtd, 0, ETH_ALEN, &retlen, mac_addr);
if (retlen == ETH_ALEN)
pr_info("Read MAC addr from SPI Flash: %pM\n",
mac_addr);
*/
int blockstat;
do {
- blockstat = main_mtd->block_isbad(main_mtd,
- ptable_sector);
+ blockstat = mtd_block_isbad(main_mtd, ptable_sector);
if (blockstat < 0)
ptable_sector = 0; /* read error */
else if (blockstat)
} while (blockstat && ptable_sector);
#endif
if (ptable_sector) {
- main_mtd->read(main_mtd, ptable_sector, PAGESIZE,
- &len, page);
+ mtd_read(main_mtd, ptable_sector, PAGESIZE, &len,
+ page);
ptable_head = &((struct partitiontable *) page)->head;
}
}
};
+static const char *bcm63xx_part_types[] = { "bcm63xxpart", NULL };
+
static struct physmap_flash_data flash_data = {
.width = 2,
.nr_parts = ARRAY_SIZE(mtd_partitions),
.parts = mtd_partitions,
+ .part_probe_types = bcm63xx_part_types,
};
static struct resource mtd_resources[] = {
#define TAGINFO1_LEN 30 /* Length of vendor information field1 in tag */
#define FLASHLAYOUTVER_LEN 4 /* Length of Flash Layout Version String tag */
#define TAGINFO2_LEN 16 /* Length of vendor information field2 in tag */
-#define CRC_LEN 4 /* Length of CRC in bytes */
#define ALTTAGINFO_LEN 54 /* Alternate length for vendor information; Pirelli */
#define NUM_PIRELLI 2
/* 192-195: Version flash layout */
char flash_layout_ver[FLASHLAYOUTVER_LEN];
/* 196-199: kernel+rootfs CRC32 */
- char fskernel_crc[CRC_LEN];
+ __u32 fskernel_crc;
/* 200-215: Unused except on Alice Gate where is is information */
char information2[TAGINFO2_LEN];
/* 216-219: CRC32 of image less imagetag (kernel for Alice Gate) */
- char image_crc[CRC_LEN];
+ __u32 image_crc;
/* 220-223: CRC32 of rootfs partition */
- char rootfs_crc[CRC_LEN];
+ __u32 rootfs_crc;
/* 224-227: CRC32 of kernel partition */
- char kernel_crc[CRC_LEN];
+ __u32 kernel_crc;
/* 228-235: Unused at present */
char reserved1[8];
/* 236-239: CRC32 of header excluding last 20 bytes */
- char header_crc[CRC_LEN];
+ __u32 header_crc;
/* 240-255: Unused at present */
char reserved2[16];
};
---help---
TI AR7 partitioning support
+config MTD_BCM63XX_PARTS
+ tristate "BCM63XX CFE partitioning support"
+ depends on BCM63XX
+ select CRC32
+ help
+ This provides partions parsing for BCM63xx devices with CFE
+ bootloaders.
+
comment "User Modules And Translation Layers"
config MTD_CHAR
obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
+obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
# 'Users' - code which presents functionality to userspace.
obj-$(CONFIG_MTD_CHAR) += mtdchar.o
size_t sz;
int ret;
- ret = mtd->read(mtd, ptr, sizeof(fs), &sz, (u_char *) &fs);
+ ret = mtd_read(mtd, ptr, sizeof(fs), &sz, (u_char *)&fs);
if (ret >= 0 && sz != sizeof(fs))
ret = -EINVAL;
int ret, i;
memset(iis, 0, sizeof(*iis));
- ret = mtd->read(mtd, ptr, sizeof(*iis), &sz, (u_char *) iis);
+ ret = mtd_read(mtd, ptr, sizeof(*iis), &sz, (u_char *)iis);
if (ret < 0)
goto failed;
do { /* Try 10 blocks starting from master->erasesize */
offset = pre_size;
- master->read(master, offset,
- sizeof(header), &len, (uint8_t *)&header);
+ mtd_read(master, offset, sizeof(header), &len,
+ (uint8_t *)&header);
if (!strncmp((char *)&header, "TIENV0.8", 8))
ar7_parts[1].offset = pre_size;
if (header.checksum == LOADER_MAGIC1)
case LOADER_MAGIC1:
while (header.length) {
offset += sizeof(header) + header.length;
- master->read(master, offset, sizeof(header),
- &len, (uint8_t *)&header);
+ mtd_read(master, offset, sizeof(header), &len,
+ (uint8_t *)&header);
}
root_offset = offset + sizeof(header) + 4;
break;
case LOADER_MAGIC2:
while (header.length) {
offset += sizeof(header) + header.length;
- master->read(master, offset, sizeof(header),
- &len, (uint8_t *)&header);
+ mtd_read(master, offset, sizeof(header), &len,
+ (uint8_t *)&header);
}
root_offset = offset + sizeof(header) + 4 + 0xff;
root_offset &= ~(uint32_t)0xff;
break;
}
- master->read(master, root_offset,
- sizeof(header), &len, (u8 *)&header);
+ mtd_read(master, root_offset, sizeof(header), &len, (u8 *)&header);
if (header.checksum != SQUASHFS_MAGIC) {
root_offset += master->erasesize - 1;
root_offset &= ~(master->erasesize - 1);
--- /dev/null
+/*
+ * BCM63XX CFE image tag parser
+ *
+ * Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org>
+ * Mike Albon <malbon@openwrt.org>
+ * Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net>
+ * Copyright © 2011 Jonas Gorski <jonas.gorski@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/crc32.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/mach-bcm63xx/bcm963xx_tag.h>
+#include <asm/mach-bcm63xx/board_bcm963xx.h>
+
+#define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */
+
+#define BCM63XX_MIN_CFE_SIZE 0x10000 /* always at least 64KiB */
+#define BCM63XX_MIN_NVRAM_SIZE 0x10000 /* always at least 64KiB */
+
+#define BCM63XX_CFE_MAGIC_OFFSET 0x4e0
+
+static int bcm63xx_detect_cfe(struct mtd_info *master)
+{
+ char buf[9];
+ int ret;
+ size_t retlen;
+
+ ret = mtd_read(master, BCM963XX_CFE_VERSION_OFFSET, 5, &retlen,
+ (void *)buf);
+ buf[retlen] = 0;
+
+ if (ret)
+ return ret;
+
+ if (strncmp("cfe-v", buf, 5) == 0)
+ return 0;
+
+ /* very old CFE's do not have the cfe-v string, so check for magic */
+ ret = mtd_read(master, BCM63XX_CFE_MAGIC_OFFSET, 8, &retlen,
+ (void *)buf);
+ buf[retlen] = 0;
+
+ return strncmp("CFE1CFE1", buf, 8);
+}
+
+static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ /* CFE, NVRAM and global Linux are always present */
+ int nrparts = 3, curpart = 0;
+ struct bcm_tag *buf;
+ struct mtd_partition *parts;
+ int ret;
+ size_t retlen;
+ unsigned int rootfsaddr, kerneladdr, spareaddr;
+ unsigned int rootfslen, kernellen, sparelen, totallen;
+ unsigned int cfelen, nvramlen;
+ int namelen = 0;
+ int i;
+ u32 computed_crc;
+
+ if (bcm63xx_detect_cfe(master))
+ return -EINVAL;
+
+ cfelen = max_t(uint32_t, master->erasesize, BCM63XX_MIN_CFE_SIZE);
+ nvramlen = max_t(uint32_t, master->erasesize, BCM63XX_MIN_NVRAM_SIZE);
+
+ /* Allocate memory for buffer */
+ buf = vmalloc(sizeof(struct bcm_tag));
+ if (!buf)
+ return -ENOMEM;
+
+ /* Get the tag */
+ ret = mtd_read(master, cfelen, sizeof(struct bcm_tag), &retlen,
+ (void *)buf);
+
+ if (retlen != sizeof(struct bcm_tag)) {
+ vfree(buf);
+ return -EIO;
+ }
+
+ computed_crc = crc32_le(IMAGETAG_CRC_START, (u8 *)buf,
+ offsetof(struct bcm_tag, header_crc));
+ if (computed_crc == buf->header_crc) {
+ char *boardid = &(buf->board_id[0]);
+ char *tagversion = &(buf->tag_version[0]);
+
+ sscanf(buf->kernel_address, "%u", &kerneladdr);
+ sscanf(buf->kernel_length, "%u", &kernellen);
+ sscanf(buf->total_length, "%u", &totallen);
+
+ pr_info("CFE boot tag found with version %s and board type %s\n",
+ tagversion, boardid);
+
+ kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
+ rootfsaddr = kerneladdr + kernellen;
+ spareaddr = roundup(totallen, master->erasesize) + cfelen;
+ sparelen = master->size - spareaddr - nvramlen;
+ rootfslen = spareaddr - rootfsaddr;
+ } else {
+ pr_warn("CFE boot tag CRC invalid (expected %08x, actual %08x)\n",
+ buf->header_crc, computed_crc);
+ kernellen = 0;
+ rootfslen = 0;
+ rootfsaddr = 0;
+ spareaddr = cfelen;
+ sparelen = master->size - cfelen - nvramlen;
+ }
+
+ /* Determine number of partitions */
+ namelen = 8;
+ if (rootfslen > 0) {
+ nrparts++;
+ namelen += 6;
+ }
+ if (kernellen > 0) {
+ nrparts++;
+ namelen += 6;
+ }
+
+ /* Ask kernel for more memory */
+ parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
+ if (!parts) {
+ vfree(buf);
+ return -ENOMEM;
+ }
+
+ /* Start building partition list */
+ parts[curpart].name = "CFE";
+ parts[curpart].offset = 0;
+ parts[curpart].size = cfelen;
+ curpart++;
+
+ if (kernellen > 0) {
+ parts[curpart].name = "kernel";
+ parts[curpart].offset = kerneladdr;
+ parts[curpart].size = kernellen;
+ curpart++;
+ }
+
+ if (rootfslen > 0) {
+ parts[curpart].name = "rootfs";
+ parts[curpart].offset = rootfsaddr;
+ parts[curpart].size = rootfslen;
+ if (sparelen > 0)
+ parts[curpart].size += sparelen;
+ curpart++;
+ }
+
+ parts[curpart].name = "nvram";
+ parts[curpart].offset = master->size - nvramlen;
+ parts[curpart].size = nvramlen;
+
+ /* Global partition "linux" to make easy firmware upgrade */
+ curpart++;
+ parts[curpart].name = "linux";
+ parts[curpart].offset = cfelen;
+ parts[curpart].size = master->size - cfelen - nvramlen;
+
+ for (i = 0; i < nrparts; i++)
+ pr_info("Partition %d is %s offset %lx and length %lx\n", i,
+ parts[i].name, (long unsigned int)(parts[i].offset),
+ (long unsigned int)(parts[i].size));
+
+ pr_info("Spare partition is offset %x and length %x\n", spareaddr,
+ sparelen);
+
+ *pparts = parts;
+ vfree(buf);
+
+ return nrparts;
+};
+
+static struct mtd_part_parser bcm63xx_cfe_parser = {
+ .owner = THIS_MODULE,
+ .parse_fn = bcm63xx_parse_cfe_partitions,
+ .name = "bcm63xxpart",
+};
+
+static int __init bcm63xx_cfe_parser_init(void)
+{
+ return register_mtd_parser(&bcm63xx_cfe_parser);
+}
+
+static void __exit bcm63xx_cfe_parser_exit(void)
+{
+ deregister_mtd_parser(&bcm63xx_cfe_parser);
+}
+
+module_init(bcm63xx_cfe_parser_init);
+module_exit(bcm63xx_cfe_parser_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Dickinson <openwrt@cshore.neomailbox.net>");
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_AUTHOR("Mike Albon <malbon@openwrt.org>");
+MODULE_AUTHOR("Jonas Gorski <jonas.gorski@gmail.com");
+MODULE_DESCRIPTION("MTD partitioning for BCM63XX CFE bootloaders");
}
/* Do some byteswapping if necessary */
- extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
- extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
+ extp->FeatureSupport = cfi32_to_cpu(map, extp->FeatureSupport);
+ extp->BlkStatusRegMask = cfi32_to_cpu(map,
+ extp->BlkStatusRegMask);
#ifdef DEBUG_CFI_FEATURES
/* Tell the user about it in lots of lovely detail */
continue;
}
memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
- ret = mtd->write(mtd, to, ECCBUF_SIZE, &thislen, buffer);
+ ret = mtd_write(mtd, to, ECCBUF_SIZE, &thislen,
+ buffer);
totlen += thislen;
if (ret || thislen != ECCBUF_SIZE)
goto write_error;
to += ECCBUF_SIZE;
}
if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
- ret = mtd->write(mtd, to, ECCBUF_DIV(elem_len), &thislen, elem_base);
+ ret = mtd_write(mtd, to, ECCBUF_DIV(elem_len),
+ &thislen, elem_base);
totlen += thislen;
if (ret || thislen != ECCBUF_DIV(elem_len))
goto write_error;
}
if (buflen) { /* flush last page, even if not full */
/* This is sometimes intended behaviour, really */
- ret = mtd->write(mtd, to, buflen, &thislen, buffer);
+ ret = mtd_write(mtd, to, buflen, &thislen, buffer);
totlen += thislen;
if (ret || thislen != ECCBUF_SIZE)
goto write_error;
config MTD_DOC2000
tristate "M-Systems Disk-On-Chip 2000 and Millennium (DEPRECATED)"
+ depends on MTD_NAND
select MTD_DOCPROBE
select MTD_NAND_IDS
---help---
config MTD_DOC2001
tristate "M-Systems Disk-On-Chip Millennium-only alternative driver (DEPRECATED)"
+ depends on MTD_NAND
select MTD_DOCPROBE
select MTD_NAND_IDS
---help---
config MTD_DOC2001PLUS
tristate "M-Systems Disk-On-Chip Millennium Plus"
+ depends on MTD_NAND
select MTD_DOCPROBE
select MTD_NAND_IDS
---help---
config MTD_DOCG3
tristate "M-Systems Disk-On-Chip G3"
+ select BCH
+ select BCH_CONST_PARAMS
---help---
This provides an MTD device driver for the M-Systems DiskOnChip
G3 devices.
M-Systems and now Sandisk. The support is very experimental,
and doesn't give access to any write operations.
+if MTD_DOCG3
+config BCH_CONST_M
+ default 14
+config BCH_CONST_T
+ default 4
+endif
+
config MTD_DOCPROBE
tristate
select MTD_DOCECC
dev->mtd.flags = MTD_CAP_RAM;
dev->mtd.erase = block2mtd_erase;
dev->mtd.write = block2mtd_write;
- dev->mtd.writev = default_mtd_writev;
+ dev->mtd.writev = mtd_writev;
dev->mtd.sync = block2mtd_sync;
dev->mtd.read = block2mtd_read;
dev->mtd.priv = dev;
mtd->type = MTD_NANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
- mtd->size = 0;
- mtd->erasesize = 0;
mtd->writesize = 512;
mtd->oobsize = 16;
mtd->owner = THIS_MODULE;
mtd->erase = doc_erase;
- mtd->point = NULL;
- mtd->unpoint = NULL;
mtd->read = doc_read;
mtd->write = doc_write;
mtd->read_oob = doc_read_oob;
mtd->write_oob = doc_write_oob;
- mtd->sync = NULL;
-
- this->totlen = 0;
- this->numchips = 0;
-
this->curfloor = -1;
this->curchip = -1;
mutex_init(&this->lock);
mtd->type = MTD_NANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
- mtd->size = 0;
/* FIXME: erase size is not always 8KiB */
mtd->erasesize = 0x2000;
-
mtd->writesize = 512;
mtd->oobsize = 16;
mtd->owner = THIS_MODULE;
mtd->erase = doc_erase;
- mtd->point = NULL;
- mtd->unpoint = NULL;
mtd->read = doc_read;
mtd->write = doc_write;
mtd->read_oob = doc_read_oob;
mtd->write_oob = doc_write_oob;
- mtd->sync = NULL;
-
- this->totlen = 0;
- this->numchips = 0;
this->curfloor = -1;
this->curchip = -1;
mtd->type = MTD_NANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
- mtd->size = 0;
-
- mtd->erasesize = 0;
mtd->writesize = 512;
mtd->oobsize = 16;
mtd->owner = THIS_MODULE;
mtd->erase = doc_erase;
- mtd->point = NULL;
- mtd->unpoint = NULL;
mtd->read = doc_read;
mtd->write = doc_write;
mtd->read_oob = doc_read_oob;
mtd->write_oob = doc_write_oob;
- mtd->sync = NULL;
-
- this->totlen = 0;
- this->numchips = 0;
this->curfloor = -1;
this->curchip = -1;
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/bitmap.h>
+#include <linux/bitrev.h>
+#include <linux/bch.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
*
* As no specification is available from M-Systems/Sandisk, this drivers lacks
* several functions available on the chip, as :
- * - block erase
- * - page write
* - IPL write
- * - ECC fixing (lack of BCH algorith understanding)
- * - powerdown / powerup
*
* The bus data width (8bits versus 16bits) is not handled (if_cfg flag), and
* the driver assumes a 16bits data bus.
* DocG3 relies on 2 ECC algorithms, which are handled in hardware :
* - a 1 byte Hamming code stored in the OOB for each page
* - a 7 bytes BCH code stored in the OOB for each page
- * The BCH part is only used for check purpose, no correction is available as
- * some information is missing. What is known is that :
+ * The BCH ECC is :
* - BCH is in GF(2^14)
* - BCH is over data of 520 bytes (512 page + 7 page_info bytes
* + 1 hamming byte)
*
*/
+static unsigned int reliable_mode;
+module_param(reliable_mode, uint, 0);
+MODULE_PARM_DESC(reliable_mode, "Set the docg3 mode (0=normal MLC, 1=fast, "
+ "2=reliable) : MLC normal operations are in normal mode");
+
+/**
+ * struct docg3_oobinfo - DiskOnChip G3 OOB layout
+ * @eccbytes: 8 bytes are used (1 for Hamming ECC, 7 for BCH ECC)
+ * @eccpos: ecc positions (byte 7 is Hamming ECC, byte 8-14 are BCH ECC)
+ * @oobfree: free pageinfo bytes (byte 0 until byte 6, byte 15
+ * @oobavail: 8 available bytes remaining after ECC toll
+ */
+static struct nand_ecclayout docg3_oobinfo = {
+ .eccbytes = 8,
+ .eccpos = {7, 8, 9, 10, 11, 12, 13, 14},
+ .oobfree = {{0, 7}, {15, 1} },
+ .oobavail = 8,
+};
+
+/**
+ * struct docg3_bch - BCH engine
+ */
+static struct bch_control *docg3_bch;
+
static inline u8 doc_readb(struct docg3 *docg3, u16 reg)
{
u8 val = readb(docg3->base + reg);
static inline void doc_writeb(struct docg3 *docg3, u8 val, u16 reg)
{
writeb(val, docg3->base + reg);
- trace_docg3_io(1, 16, reg, val);
+ trace_docg3_io(1, 8, reg, val);
}
static inline void doc_writew(struct docg3 *docg3, u16 val, u16 reg)
{
int i;
- doc_dbg("NOP x %d\n", nbNOPs);
+ doc_vdbg("NOP x %d\n", nbNOPs);
for (i = 0; i < nbNOPs; i++)
doc_writeb(docg3, 0, DOC_NOP);
}
/**
* doc_read_data_area - Read data from data area
* @docg3: the device
- * @buf: the buffer to fill in
- * @len: the lenght to read
+ * @buf: the buffer to fill in (might be NULL is dummy reads)
+ * @len: the length to read
* @first: first time read, DOC_READADDRESS should be set
*
* Reads bytes from flash data. Handles the single byte / even bytes reads.
dst16 = buf;
for (i = 0; i < len4; i += 2) {
data16 = doc_readw(docg3, DOC_IOSPACE_DATA);
- *dst16 = data16;
- dst16++;
+ if (dst16) {
+ *dst16 = data16;
+ dst16++;
+ }
}
if (cdr) {
dst8 = (u8 *)dst16;
for (i = 0; i < cdr; i++) {
data8 = doc_readb(docg3, DOC_IOSPACE_DATA);
- *dst8 = data8;
- dst8++;
+ if (dst8) {
+ *dst8 = data8;
+ dst8++;
+ }
}
}
}
/**
- * doc_set_data_mode - Sets the flash to reliable data mode
+ * doc_write_data_area - Write data into data area
+ * @docg3: the device
+ * @buf: the buffer to get input bytes from
+ * @len: the length to write
+ *
+ * Writes bytes into flash data. Handles the single byte / even bytes writes.
+ */
+static void doc_write_data_area(struct docg3 *docg3, const void *buf, int len)
+{
+ int i, cdr, len4;
+ u16 *src16;
+ u8 *src8;
+
+ doc_dbg("doc_write_data_area(buf=%p, len=%d)\n", buf, len);
+ cdr = len & 0x3;
+ len4 = len - cdr;
+
+ doc_writew(docg3, DOC_IOSPACE_DATA, DOC_READADDRESS);
+ src16 = (u16 *)buf;
+ for (i = 0; i < len4; i += 2) {
+ doc_writew(docg3, *src16, DOC_IOSPACE_DATA);
+ src16++;
+ }
+
+ src8 = (u8 *)src16;
+ for (i = 0; i < cdr; i++) {
+ doc_writew(docg3, DOC_IOSPACE_DATA | DOC_READADDR_ONE_BYTE,
+ DOC_READADDRESS);
+ doc_writeb(docg3, *src8, DOC_IOSPACE_DATA);
+ src8++;
+ }
+}
+
+/**
+ * doc_set_data_mode - Sets the flash to normal or reliable data mode
* @docg3: the device
*
* The reliable data mode is a bit slower than the fast mode, but less errors
* occur. Entering the reliable mode cannot be done without entering the fast
* mode first.
+ *
+ * In reliable mode, pages 2*n and 2*n+1 are clones. Writing to page 0 of blocks
+ * (4,5) make the hardware write also to page 1 of blocks blocks(4,5). Reading
+ * from page 0 of blocks (4,5) or from page 1 of blocks (4,5) gives the same
+ * result, which is a logical and between bytes from page 0 and page 1 (which is
+ * consistent with the fact that writing to a page is _clearing_ bits of that
+ * page).
*/
static void doc_set_reliable_mode(struct docg3 *docg3)
{
- doc_dbg("doc_set_reliable_mode()\n");
- doc_flash_sequence(docg3, DOC_SEQ_SET_MODE);
- doc_flash_command(docg3, DOC_CMD_FAST_MODE);
- doc_flash_command(docg3, DOC_CMD_RELIABLE_MODE);
+ static char *strmode[] = { "normal", "fast", "reliable", "invalid" };
+
+ doc_dbg("doc_set_reliable_mode(%s)\n", strmode[docg3->reliable]);
+ switch (docg3->reliable) {
+ case 0:
+ break;
+ case 1:
+ doc_flash_sequence(docg3, DOC_SEQ_SET_FASTMODE);
+ doc_flash_command(docg3, DOC_CMD_FAST_MODE);
+ break;
+ case 2:
+ doc_flash_sequence(docg3, DOC_SEQ_SET_RELIABLEMODE);
+ doc_flash_command(docg3, DOC_CMD_FAST_MODE);
+ doc_flash_command(docg3, DOC_CMD_RELIABLE_MODE);
+ break;
+ default:
+ doc_err("doc_set_reliable_mode(): invalid mode\n");
+ break;
+ }
doc_delay(docg3, 2);
}
return 0;
}
+/**
+ * doc_setup_addr_sector - Setup blocks/page/ofs address for one plane
+ * @docg3: the device
+ * @sector: the sector
+ */
+static void doc_setup_addr_sector(struct docg3 *docg3, int sector)
+{
+ doc_delay(docg3, 1);
+ doc_flash_address(docg3, sector & 0xff);
+ doc_flash_address(docg3, (sector >> 8) & 0xff);
+ doc_flash_address(docg3, (sector >> 16) & 0xff);
+ doc_delay(docg3, 1);
+}
+
+/**
+ * doc_setup_writeaddr_sector - Setup blocks/page/ofs address for one plane
+ * @docg3: the device
+ * @sector: the sector
+ * @ofs: the offset in the page, between 0 and (512 + 16 + 512)
+ */
+static void doc_setup_writeaddr_sector(struct docg3 *docg3, int sector, int ofs)
+{
+ ofs = ofs >> 2;
+ doc_delay(docg3, 1);
+ doc_flash_address(docg3, ofs & 0xff);
+ doc_flash_address(docg3, sector & 0xff);
+ doc_flash_address(docg3, (sector >> 8) & 0xff);
+ doc_flash_address(docg3, (sector >> 16) & 0xff);
+ doc_delay(docg3, 1);
+}
+
/**
* doc_seek - Set both flash planes to the specified block, page for reading
* @docg3: the device
if (ret)
goto out;
- sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
doc_flash_sequence(docg3, DOC_SEQ_READ);
+ sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
- doc_delay(docg3, 1);
- doc_flash_address(docg3, sector & 0xff);
- doc_flash_address(docg3, (sector >> 8) & 0xff);
- doc_flash_address(docg3, (sector >> 16) & 0xff);
- doc_delay(docg3, 1);
+ doc_setup_addr_sector(docg3, sector);
sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+ doc_setup_addr_sector(docg3, sector);
doc_delay(docg3, 1);
- doc_flash_address(docg3, sector & 0xff);
- doc_flash_address(docg3, (sector >> 8) & 0xff);
- doc_flash_address(docg3, (sector >> 16) & 0xff);
+
+out:
+ return ret;
+}
+
+/**
+ * doc_write_seek - Set both flash planes to the specified block, page for writing
+ * @docg3: the device
+ * @block0: the first plane block index
+ * @block1: the second plane block index
+ * @page: the page index within the block
+ * @ofs: offset in page to write
+ *
+ * Programs the flash even and odd planes to the specific block and page.
+ * Alternatively, programs the flash to the wear area of the specified page.
+ */
+static int doc_write_seek(struct docg3 *docg3, int block0, int block1, int page,
+ int ofs)
+{
+ int ret = 0, sector;
+
+ doc_dbg("doc_write_seek(blocks=(%d,%d), page=%d, ofs=%d)\n",
+ block0, block1, page, ofs);
+
+ doc_set_reliable_mode(docg3);
+
+ if (ofs < 2 * DOC_LAYOUT_PAGE_SIZE) {
+ doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE1);
+ doc_flash_command(docg3, DOC_CMD_READ_PLANE1);
+ doc_delay(docg3, 2);
+ } else {
+ doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE2);
+ doc_flash_command(docg3, DOC_CMD_READ_PLANE2);
+ doc_delay(docg3, 2);
+ }
+
+ doc_flash_sequence(docg3, DOC_SEQ_PAGE_SETUP);
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE1);
+
+ sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
+ doc_setup_writeaddr_sector(docg3, sector, ofs);
+
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE3);
doc_delay(docg3, 2);
+ ret = doc_wait_ready(docg3);
+ if (ret)
+ goto out;
+
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE1);
+ sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
+ doc_setup_writeaddr_sector(docg3, sector, ofs);
+ doc_delay(docg3, 1);
out:
return ret;
}
+
/**
* doc_read_page_ecc_init - Initialize hardware ECC engine
* @docg3: the device
* @len: the number of bytes covered by the ECC (BCH covered)
*
* The function does initialize the hardware ECC engine to compute the Hamming
- * ECC (on 1 byte) and the BCH Syndroms (on 7 bytes).
+ * ECC (on 1 byte) and the BCH hardware ECC (on 7 bytes).
*
* Return 0 if succeeded, -EIO on error
*/
return doc_wait_ready(docg3);
}
+/**
+ * doc_write_page_ecc_init - Initialize hardware BCH ECC engine
+ * @docg3: the device
+ * @len: the number of bytes covered by the ECC (BCH covered)
+ *
+ * The function does initialize the hardware ECC engine to compute the Hamming
+ * ECC (on 1 byte) and the BCH hardware ECC (on 7 bytes).
+ *
+ * Return 0 if succeeded, -EIO on error
+ */
+static int doc_write_page_ecc_init(struct docg3 *docg3, int len)
+{
+ doc_writew(docg3, DOC_ECCCONF0_WRITE_MODE
+ | DOC_ECCCONF0_BCH_ENABLE | DOC_ECCCONF0_HAMMING_ENABLE
+ | (len & DOC_ECCCONF0_DATA_BYTES_MASK),
+ DOC_ECCCONF0);
+ doc_delay(docg3, 4);
+ doc_register_readb(docg3, DOC_FLASHCONTROL);
+ return doc_wait_ready(docg3);
+}
+
+/**
+ * doc_ecc_disable - Disable Hamming and BCH ECC hardware calculator
+ * @docg3: the device
+ *
+ * Disables the hardware ECC generator and checker, for unchecked reads (as when
+ * reading OOB only or write status byte).
+ */
+static void doc_ecc_disable(struct docg3 *docg3)
+{
+ doc_writew(docg3, DOC_ECCCONF0_READ_MODE, DOC_ECCCONF0);
+ doc_delay(docg3, 4);
+}
+
+/**
+ * doc_hamming_ecc_init - Initialize hardware Hamming ECC engine
+ * @docg3: the device
+ * @nb_bytes: the number of bytes covered by the ECC (Hamming covered)
+ *
+ * This function programs the ECC hardware to compute the hamming code on the
+ * last provided N bytes to the hardware generator.
+ */
+static void doc_hamming_ecc_init(struct docg3 *docg3, int nb_bytes)
+{
+ u8 ecc_conf1;
+
+ ecc_conf1 = doc_register_readb(docg3, DOC_ECCCONF1);
+ ecc_conf1 &= ~DOC_ECCCONF1_HAMMING_BITS_MASK;
+ ecc_conf1 |= (nb_bytes & DOC_ECCCONF1_HAMMING_BITS_MASK);
+ doc_writeb(docg3, ecc_conf1, DOC_ECCCONF1);
+}
+
+/**
+ * doc_ecc_bch_fix_data - Fix if need be read data from flash
+ * @docg3: the device
+ * @buf: the buffer of read data (512 + 7 + 1 bytes)
+ * @hwecc: the hardware calculated ECC.
+ * It's in fact recv_ecc ^ calc_ecc, where recv_ecc was read from OOB
+ * area data, and calc_ecc the ECC calculated by the hardware generator.
+ *
+ * Checks if the received data matches the ECC, and if an error is detected,
+ * tries to fix the bit flips (at most 4) in the buffer buf. As the docg3
+ * understands the (data, ecc, syndroms) in an inverted order in comparison to
+ * the BCH library, the function reverses the order of bits (ie. bit7 and bit0,
+ * bit6 and bit 1, ...) for all ECC data.
+ *
+ * The hardware ecc unit produces oob_ecc ^ calc_ecc. The kernel's bch
+ * algorithm is used to decode this. However the hw operates on page
+ * data in a bit order that is the reverse of that of the bch alg,
+ * requiring that the bits be reversed on the result. Thanks to Ivan
+ * Djelic for his analysis.
+ *
+ * Returns number of fixed bits (0, 1, 2, 3, 4) or -EBADMSG if too many bit
+ * errors were detected and cannot be fixed.
+ */
+static int doc_ecc_bch_fix_data(struct docg3 *docg3, void *buf, u8 *hwecc)
+{
+ u8 ecc[DOC_ECC_BCH_SIZE];
+ int errorpos[DOC_ECC_BCH_T], i, numerrs;
+
+ for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
+ ecc[i] = bitrev8(hwecc[i]);
+ numerrs = decode_bch(docg3_bch, NULL, DOC_ECC_BCH_COVERED_BYTES,
+ NULL, ecc, NULL, errorpos);
+ BUG_ON(numerrs == -EINVAL);
+ if (numerrs < 0)
+ goto out;
+
+ for (i = 0; i < numerrs; i++)
+ errorpos[i] = (errorpos[i] & ~7) | (7 - (errorpos[i] & 7));
+ for (i = 0; i < numerrs; i++)
+ if (errorpos[i] < DOC_ECC_BCH_COVERED_BYTES*8)
+ /* error is located in data, correct it */
+ change_bit(errorpos[i], buf);
+out:
+ doc_dbg("doc_ecc_bch_fix_data: flipped %d bits\n", numerrs);
+ return numerrs;
+}
+
+
/**
* doc_read_page_prepare - Prepares reading data from a flash page
* @docg3: the device
}
/**
- * doc_get_hw_bch_syndroms - Get hardware calculated BCH syndroms
+ * doc_write_page_putbytes - Writes bytes into a prepared page
+ * @docg3: the device
+ * @len: the number of bytes to be written
+ * @buf: the buffer of input bytes
+ *
+ */
+static void doc_write_page_putbytes(struct docg3 *docg3, int len,
+ const u_char *buf)
+{
+ doc_write_data_area(docg3, buf, len);
+ doc_delay(docg3, 2);
+}
+
+/**
+ * doc_get_bch_hw_ecc - Get hardware calculated BCH ECC
* @docg3: the device
- * @syns: the array of 7 integers where the syndroms will be stored
+ * @hwecc: the array of 7 integers where the hardware ecc will be stored
*/
-static void doc_get_hw_bch_syndroms(struct docg3 *docg3, int *syns)
+static void doc_get_bch_hw_ecc(struct docg3 *docg3, u8 *hwecc)
{
int i;
for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
- syns[i] = doc_register_readb(docg3, DOC_BCH_SYNDROM(i));
+ hwecc[i] = doc_register_readb(docg3, DOC_BCH_HW_ECC(i));
+}
+
+/**
+ * doc_page_finish - Ends reading/writing of a flash page
+ * @docg3: the device
+ */
+static void doc_page_finish(struct docg3 *docg3)
+{
+ doc_writeb(docg3, 0, DOC_DATAEND);
+ doc_delay(docg3, 2);
}
/**
*/
static void doc_read_page_finish(struct docg3 *docg3)
{
- doc_writeb(docg3, 0, DOC_DATAEND);
- doc_delay(docg3, 2);
+ doc_page_finish(docg3);
doc_set_device_id(docg3, 0);
}
* @block1: second plane block index calculated
* @page: page calculated
* @ofs: offset in page
+ * @reliable: 0 if docg3 in normal mode, 1 if docg3 in fast mode, 2 if docg3 in
+ * reliable mode.
+ *
+ * The calculation is based on the reliable/normal mode. In normal mode, the 64
+ * pages of a block are available. In reliable mode, as pages 2*n and 2*n+1 are
+ * clones, only 32 pages per block are available.
*/
static void calc_block_sector(loff_t from, int *block0, int *block1, int *page,
- int *ofs)
+ int *ofs, int reliable)
{
- uint sector;
+ uint sector, pages_biblock;
+
+ pages_biblock = DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES;
+ if (reliable == 1 || reliable == 2)
+ pages_biblock /= 2;
sector = from / DOC_LAYOUT_PAGE_SIZE;
- *block0 = sector / (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES)
- * DOC_LAYOUT_NBPLANES;
+ *block0 = sector / pages_biblock * DOC_LAYOUT_NBPLANES;
*block1 = *block0 + 1;
- *page = sector % (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES);
+ *page = sector % pages_biblock;
*page /= DOC_LAYOUT_NBPLANES;
+ if (reliable == 1 || reliable == 2)
+ *page *= 2;
if (sector % 2)
*ofs = DOC_LAYOUT_PAGE_OOB_SIZE;
else
}
/**
- * doc_read - Read bytes from flash
+ * doc_read_oob - Read out of band bytes from flash
* @mtd: the device
* @from: the offset from first block and first page, in bytes, aligned on page
* size
- * @len: the number of bytes to read (must be a multiple of 4)
- * @retlen: the number of bytes actually read
- * @buf: the filled in buffer
+ * @ops: the mtd oob structure
*
- * Reads flash memory pages. This function does not read the OOB chunk, but only
- * the page data.
+ * Reads flash memory OOB area of pages.
*
* Returns 0 if read successfull, of -EIO, -EINVAL if an error occured
*/
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
+static int doc_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
{
struct docg3 *docg3 = mtd->priv;
- int block0, block1, page, readlen, ret, ofs = 0;
- int syn[DOC_ECC_BCH_SIZE], eccconf1;
- u8 oob[DOC_LAYOUT_OOB_SIZE];
+ int block0, block1, page, ret, ofs = 0;
+ u8 *oobbuf = ops->oobbuf;
+ u8 *buf = ops->datbuf;
+ size_t len, ooblen, nbdata, nboob;
+ u8 hwecc[DOC_ECC_BCH_SIZE], eccconf1;
+
+ if (buf)
+ len = ops->len;
+ else
+ len = 0;
+ if (oobbuf)
+ ooblen = ops->ooblen;
+ else
+ ooblen = 0;
+
+ if (oobbuf && ops->mode == MTD_OPS_PLACE_OOB)
+ oobbuf += ops->ooboffs;
+
+ doc_dbg("doc_read_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n",
+ from, ops->mode, buf, len, oobbuf, ooblen);
+ if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % DOC_LAYOUT_OOB_SIZE) ||
+ (from % DOC_LAYOUT_PAGE_SIZE))
+ return -EINVAL;
ret = -EINVAL;
- doc_dbg("doc_read(from=%lld, len=%zu, buf=%p)\n", from, len, buf);
- if (from % DOC_LAYOUT_PAGE_SIZE)
- goto err;
- if (len % 4)
- goto err;
- calc_block_sector(from, &block0, &block1, &page, &ofs);
+ calc_block_sector(from + len, &block0, &block1, &page, &ofs,
+ docg3->reliable);
if (block1 > docg3->max_block)
goto err;
- *retlen = 0;
+ ops->oobretlen = 0;
+ ops->retlen = 0;
ret = 0;
- readlen = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE);
- while (!ret && len > 0) {
- readlen = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE);
+ while (!ret && (len > 0 || ooblen > 0)) {
+ calc_block_sector(from, &block0, &block1, &page, &ofs,
+ docg3->reliable);
+ nbdata = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE);
+ nboob = min_t(size_t, ooblen, (size_t)DOC_LAYOUT_OOB_SIZE);
ret = doc_read_page_prepare(docg3, block0, block1, page, ofs);
if (ret < 0)
goto err;
- ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_COVERED_BYTES);
+ ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
if (ret < 0)
goto err_in_read;
- ret = doc_read_page_getbytes(docg3, readlen, buf, 1);
- if (ret < readlen)
+ ret = doc_read_page_getbytes(docg3, nbdata, buf, 1);
+ if (ret < nbdata)
goto err_in_read;
- ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE,
- oob, 0);
- if (ret < DOC_LAYOUT_OOB_SIZE)
+ doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE - nbdata,
+ NULL, 0);
+ ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0);
+ if (ret < nboob)
goto err_in_read;
+ doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE - nboob,
+ NULL, 0);
- *retlen += readlen;
- buf += readlen;
- len -= readlen;
-
- ofs ^= DOC_LAYOUT_PAGE_OOB_SIZE;
- if (ofs == 0)
- page += 2;
- if (page > DOC_ADDR_PAGE_MASK) {
- page = 0;
- block0 += 2;
- block1 += 2;
- }
-
- /*
- * There should be a BCH bitstream fixing algorithm here ...
- * By now, a page read failure is triggered by BCH error
- */
- doc_get_hw_bch_syndroms(docg3, syn);
+ doc_get_bch_hw_ecc(docg3, hwecc);
eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1);
- doc_dbg("OOB - INFO: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
- oob[0], oob[1], oob[2], oob[3], oob[4],
- oob[5], oob[6]);
- doc_dbg("OOB - HAMMING: %02x\n", oob[7]);
- doc_dbg("OOB - BCH_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
- oob[8], oob[9], oob[10], oob[11], oob[12],
- oob[13], oob[14]);
- doc_dbg("OOB - UNUSED: %02x\n", oob[15]);
+ if (nboob >= DOC_LAYOUT_OOB_SIZE) {
+ doc_dbg("OOB - INFO: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ oobbuf[0], oobbuf[1], oobbuf[2], oobbuf[3],
+ oobbuf[4], oobbuf[5], oobbuf[6]);
+ doc_dbg("OOB - HAMMING: %02x\n", oobbuf[7]);
+ doc_dbg("OOB - BCH_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ oobbuf[8], oobbuf[9], oobbuf[10], oobbuf[11],
+ oobbuf[12], oobbuf[13], oobbuf[14]);
+ doc_dbg("OOB - UNUSED: %02x\n", oobbuf[15]);
+ }
doc_dbg("ECC checks: ECCConf1=%x\n", eccconf1);
- doc_dbg("ECC BCH syndrom: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
- syn[0], syn[1], syn[2], syn[3], syn[4], syn[5], syn[6]);
-
- ret = -EBADMSG;
- if (block0 >= DOC_LAYOUT_BLOCK_FIRST_DATA) {
- if (eccconf1 & DOC_ECCCONF1_BCH_SYNDROM_ERR)
- goto err_in_read;
- if (is_prot_seq_error(docg3))
- goto err_in_read;
+ doc_dbg("ECC HW_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ hwecc[0], hwecc[1], hwecc[2], hwecc[3], hwecc[4],
+ hwecc[5], hwecc[6]);
+
+ ret = -EIO;
+ if (is_prot_seq_error(docg3))
+ goto err_in_read;
+ ret = 0;
+ if ((block0 >= DOC_LAYOUT_BLOCK_FIRST_DATA) &&
+ (eccconf1 & DOC_ECCCONF1_BCH_SYNDROM_ERR) &&
+ (eccconf1 & DOC_ECCCONF1_PAGE_IS_WRITTEN) &&
+ (ops->mode != MTD_OPS_RAW) &&
+ (nbdata == DOC_LAYOUT_PAGE_SIZE)) {
+ ret = doc_ecc_bch_fix_data(docg3, buf, hwecc);
+ if (ret < 0) {
+ mtd->ecc_stats.failed++;
+ ret = -EBADMSG;
+ }
+ if (ret > 0) {
+ mtd->ecc_stats.corrected += ret;
+ ret = -EUCLEAN;
+ }
}
+
doc_read_page_finish(docg3);
+ ops->retlen += nbdata;
+ ops->oobretlen += nboob;
+ buf += nbdata;
+ oobbuf += nboob;
+ len -= nbdata;
+ ooblen -= nboob;
+ from += DOC_LAYOUT_PAGE_SIZE;
}
- return 0;
+ return ret;
err_in_read:
doc_read_page_finish(docg3);
err:
}
/**
- * doc_read_oob - Read out of band bytes from flash
+ * doc_read - Read bytes from flash
* @mtd: the device
* @from: the offset from first block and first page, in bytes, aligned on page
* size
- * @ops: the mtd oob structure
+ * @len: the number of bytes to read (must be a multiple of 4)
+ * @retlen: the number of bytes actually read
+ * @buf: the filled in buffer
*
- * Reads flash memory OOB area of pages.
+ * Reads flash memory pages. This function does not read the OOB chunk, but only
+ * the page data.
*
* Returns 0 if read successfull, of -EIO, -EINVAL if an error occured
*/
-static int doc_read_oob(struct mtd_info *mtd, loff_t from,
- struct mtd_oob_ops *ops)
+static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
{
- struct docg3 *docg3 = mtd->priv;
- int block0, block1, page, ofs, ret;
- u8 *buf = ops->oobbuf;
- size_t len = ops->ooblen;
-
- doc_dbg("doc_read_oob(from=%lld, buf=%p, len=%zu)\n", from, buf, len);
- if (len != DOC_LAYOUT_OOB_SIZE)
- return -EINVAL;
-
- switch (ops->mode) {
- case MTD_OPS_PLACE_OOB:
- buf += ops->ooboffs;
- break;
- default:
- break;
- }
+ struct mtd_oob_ops ops;
+ size_t ret;
- calc_block_sector(from, &block0, &block1, &page, &ofs);
- if (block1 > docg3->max_block)
- return -EINVAL;
-
- ret = doc_read_page_prepare(docg3, block0, block1, page,
- ofs + DOC_LAYOUT_PAGE_SIZE);
- if (!ret)
- ret = doc_read_page_ecc_init(docg3, DOC_LAYOUT_OOB_SIZE);
- if (!ret)
- ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE,
- buf, 1);
- doc_read_page_finish(docg3);
+ memset(&ops, 0, sizeof(ops));
+ ops.datbuf = buf;
+ ops.len = len;
+ ops.mode = MTD_OPS_AUTO_OOB;
- if (ret > 0)
- ops->oobretlen = ret;
- else
- ops->oobretlen = 0;
- return (ret > 0) ? 0 : ret;
+ ret = doc_read_oob(mtd, from, &ops);
+ *retlen = ops.retlen;
+ return ret;
}
static int doc_reload_bbt(struct docg3 *docg3)
struct docg3 *docg3 = mtd->priv;
int block0, block1, page, ofs, is_good;
- calc_block_sector(from, &block0, &block1, &page, &ofs);
+ calc_block_sector(from, &block0, &block1, &page, &ofs,
+ docg3->reliable);
doc_dbg("doc_block_isbad(from=%lld) => block=(%d,%d), page=%d, ofs=%d\n",
from, block0, block1, page, ofs);
return !is_good;
}
+#if 0
/**
* doc_get_erase_count - Get block erase count
* @docg3: the device
doc_dbg("doc_get_erase_count(from=%lld, buf=%p)\n", from, buf);
if (from % DOC_LAYOUT_PAGE_SIZE)
return -EINVAL;
- calc_block_sector(from, &block0, &block1, &page, &ofs);
+ calc_block_sector(from, &block0, &block1, &page, &ofs, docg3->reliable);
if (block1 > docg3->max_block)
return -EINVAL;
return max(plane1_erase_count, plane2_erase_count);
}
+#endif
-/*
- * Debug sysfs entries
+/**
+ * doc_get_op_status - get erase/write operation status
+ * @docg3: the device
+ *
+ * Queries the status from the chip, and returns it
+ *
+ * Returns the status (bits DOC_PLANES_STATUS_*)
*/
-static int dbg_flashctrl_show(struct seq_file *s, void *p)
+static int doc_get_op_status(struct docg3 *docg3)
{
- struct docg3 *docg3 = (struct docg3 *)s->private;
+ u8 status;
- int pos = 0;
- u8 fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+ doc_flash_sequence(docg3, DOC_SEQ_PLANES_STATUS);
+ doc_flash_command(docg3, DOC_CMD_PLANES_STATUS);
+ doc_delay(docg3, 5);
- pos += seq_printf(s,
- "FlashControl : 0x%02x (%s,CE# %s,%s,%s,flash %s)\n",
- fctrl,
- fctrl & DOC_CTRL_VIOLATION ? "protocol violation" : "-",
- fctrl & DOC_CTRL_CE ? "active" : "inactive",
- fctrl & DOC_CTRL_PROTECTION_ERROR ? "protection error" : "-",
- fctrl & DOC_CTRL_SEQUENCE_ERROR ? "sequence error" : "-",
- fctrl & DOC_CTRL_FLASHREADY ? "ready" : "not ready");
- return pos;
+ doc_ecc_disable(docg3);
+ doc_read_data_area(docg3, &status, 1, 1);
+ return status;
+}
+
+/**
+ * doc_write_erase_wait_status - wait for write or erase completion
+ * @docg3: the device
+ *
+ * Wait for the chip to be ready again after erase or write operation, and check
+ * erase/write status.
+ *
+ * Returns 0 if erase successfull, -EIO if erase/write issue, -ETIMEOUT if
+ * timeout
+ */
+static int doc_write_erase_wait_status(struct docg3 *docg3)
+{
+ int status, ret = 0;
+
+ if (!doc_is_ready(docg3))
+ usleep_range(3000, 3000);
+ if (!doc_is_ready(docg3)) {
+ doc_dbg("Timeout reached and the chip is still not ready\n");
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ status = doc_get_op_status(docg3);
+ if (status & DOC_PLANES_STATUS_FAIL) {
+ doc_dbg("Erase/Write failed on (a) plane(s), status = %x\n",
+ status);
+ ret = -EIO;
+ }
+
+out:
+ doc_page_finish(docg3);
+ return ret;
+}
+
+/**
+ * doc_erase_block - Erase a couple of blocks
+ * @docg3: the device
+ * @block0: the first block to erase (leftmost plane)
+ * @block1: the second block to erase (rightmost plane)
+ *
+ * Erase both blocks, and return operation status
+ *
+ * Returns 0 if erase successful, -EIO if erase issue, -ETIMEOUT if chip not
+ * ready for too long
+ */
+static int doc_erase_block(struct docg3 *docg3, int block0, int block1)
+{
+ int ret, sector;
+
+ doc_dbg("doc_erase_block(blocks=(%d,%d))\n", block0, block1);
+ ret = doc_reset_seq(docg3);
+ if (ret)
+ return -EIO;
+
+ doc_set_reliable_mode(docg3);
+ doc_flash_sequence(docg3, DOC_SEQ_ERASE);
+
+ sector = block0 << DOC_ADDR_BLOCK_SHIFT;
+ doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+ doc_setup_addr_sector(docg3, sector);
+ sector = block1 << DOC_ADDR_BLOCK_SHIFT;
+ doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+ doc_setup_addr_sector(docg3, sector);
+ doc_delay(docg3, 1);
+
+ doc_flash_command(docg3, DOC_CMD_ERASECYCLE2);
+ doc_delay(docg3, 2);
+
+ if (is_prot_seq_error(docg3)) {
+ doc_err("Erase blocks %d,%d error\n", block0, block1);
+ return -EIO;
+ }
+
+ return doc_write_erase_wait_status(docg3);
+}
+
+/**
+ * doc_erase - Erase a portion of the chip
+ * @mtd: the device
+ * @info: the erase info
+ *
+ * Erase a bunch of contiguous blocks, by pairs, as a "mtd" page of 1024 is
+ * split into 2 pages of 512 bytes on 2 contiguous blocks.
+ *
+ * Returns 0 if erase successful, -EINVAL if adressing error, -EIO if erase
+ * issue
+ */
+static int doc_erase(struct mtd_info *mtd, struct erase_info *info)
+{
+ struct docg3 *docg3 = mtd->priv;
+ uint64_t len;
+ int block0, block1, page, ret, ofs = 0;
+
+ doc_dbg("doc_erase(from=%lld, len=%lld\n", info->addr, info->len);
+ doc_set_device_id(docg3, docg3->device_id);
+
+ info->state = MTD_ERASE_PENDING;
+ calc_block_sector(info->addr + info->len, &block0, &block1, &page,
+ &ofs, docg3->reliable);
+ ret = -EINVAL;
+ if (block1 > docg3->max_block || page || ofs)
+ goto reset_err;
+
+ ret = 0;
+ calc_block_sector(info->addr, &block0, &block1, &page, &ofs,
+ docg3->reliable);
+ doc_set_reliable_mode(docg3);
+ for (len = info->len; !ret && len > 0; len -= mtd->erasesize) {
+ info->state = MTD_ERASING;
+ ret = doc_erase_block(docg3, block0, block1);
+ block0 += 2;
+ block1 += 2;
+ }
+
+ if (ret)
+ goto reset_err;
+
+ info->state = MTD_ERASE_DONE;
+ return 0;
+
+reset_err:
+ info->state = MTD_ERASE_FAILED;
+ return ret;
+}
+
+/**
+ * doc_write_page - Write a single page to the chip
+ * @docg3: the device
+ * @to: the offset from first block and first page, in bytes, aligned on page
+ * size
+ * @buf: buffer to get bytes from
+ * @oob: buffer to get out of band bytes from (can be NULL if no OOB should be
+ * written)
+ * @autoecc: if 0, all 16 bytes from OOB are taken, regardless of HW Hamming or
+ * BCH computations. If 1, only bytes 0-7 and byte 15 are taken,
+ * remaining ones are filled with hardware Hamming and BCH
+ * computations. Its value is not meaningfull is oob == NULL.
+ *
+ * Write one full page (ie. 1 page split on two planes), of 512 bytes, with the
+ * OOB data. The OOB ECC is automatically computed by the hardware Hamming and
+ * BCH generator if autoecc is not null.
+ *
+ * Returns 0 if write successful, -EIO if write error, -EAGAIN if timeout
+ */
+static int doc_write_page(struct docg3 *docg3, loff_t to, const u_char *buf,
+ const u_char *oob, int autoecc)
+{
+ int block0, block1, page, ret, ofs = 0;
+ u8 hwecc[DOC_ECC_BCH_SIZE], hamming;
+
+ doc_dbg("doc_write_page(to=%lld)\n", to);
+ calc_block_sector(to, &block0, &block1, &page, &ofs, docg3->reliable);
+
+ doc_set_device_id(docg3, docg3->device_id);
+ ret = doc_reset_seq(docg3);
+ if (ret)
+ goto err;
+
+ /* Program the flash address block and page */
+ ret = doc_write_seek(docg3, block0, block1, page, ofs);
+ if (ret)
+ goto err;
+
+ doc_write_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
+ doc_delay(docg3, 2);
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_PAGE_SIZE, buf);
+
+ if (oob && autoecc) {
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_PAGEINFO_SZ, oob);
+ doc_delay(docg3, 2);
+ oob += DOC_LAYOUT_OOB_UNUSED_OFS;
+
+ hamming = doc_register_readb(docg3, DOC_HAMMINGPARITY);
+ doc_delay(docg3, 2);
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_HAMMING_SZ,
+ &hamming);
+ doc_delay(docg3, 2);
+
+ doc_get_bch_hw_ecc(docg3, hwecc);
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_BCH_SZ, hwecc);
+ doc_delay(docg3, 2);
+
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_UNUSED_SZ, oob);
+ }
+ if (oob && !autoecc)
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_SIZE, oob);
+
+ doc_delay(docg3, 2);
+ doc_page_finish(docg3);
+ doc_delay(docg3, 2);
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE2);
+ doc_delay(docg3, 2);
+
+ /*
+ * The wait status will perform another doc_page_finish() call, but that
+ * seems to please the docg3, so leave it.
+ */
+ ret = doc_write_erase_wait_status(docg3);
+ return ret;
+err:
+ doc_read_page_finish(docg3);
+ return ret;
+}
+
+/**
+ * doc_guess_autoecc - Guess autoecc mode from mbd_oob_ops
+ * @ops: the oob operations
+ *
+ * Returns 0 or 1 if success, -EINVAL if invalid oob mode
+ */
+static int doc_guess_autoecc(struct mtd_oob_ops *ops)
+{
+ int autoecc;
+
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ autoecc = 1;
+ break;
+ case MTD_OPS_RAW:
+ autoecc = 0;
+ break;
+ default:
+ autoecc = -EINVAL;
+ }
+ return autoecc;
+}
+
+/**
+ * doc_fill_autooob - Fill a 16 bytes OOB from 8 non-ECC bytes
+ * @dst: the target 16 bytes OOB buffer
+ * @oobsrc: the source 8 bytes non-ECC OOB buffer
+ *
+ */
+static void doc_fill_autooob(u8 *dst, u8 *oobsrc)
+{
+ memcpy(dst, oobsrc, DOC_LAYOUT_OOB_PAGEINFO_SZ);
+ dst[DOC_LAYOUT_OOB_UNUSED_OFS] = oobsrc[DOC_LAYOUT_OOB_PAGEINFO_SZ];
+}
+
+/**
+ * doc_backup_oob - Backup OOB into docg3 structure
+ * @docg3: the device
+ * @to: the page offset in the chip
+ * @ops: the OOB size and buffer
+ *
+ * As the docg3 should write a page with its OOB in one pass, and some userland
+ * applications do write_oob() to setup the OOB and then write(), store the OOB
+ * into a temporary storage. This is very dangerous, as 2 concurrent
+ * applications could store an OOB, and then write their pages (which will
+ * result into one having its OOB corrupted).
+ *
+ * The only reliable way would be for userland to call doc_write_oob() with both
+ * the page data _and_ the OOB area.
+ *
+ * Returns 0 if success, -EINVAL if ops content invalid
+ */
+static int doc_backup_oob(struct docg3 *docg3, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int ooblen = ops->ooblen, autoecc;
+
+ if (ooblen != DOC_LAYOUT_OOB_SIZE)
+ return -EINVAL;
+ autoecc = doc_guess_autoecc(ops);
+ if (autoecc < 0)
+ return autoecc;
+
+ docg3->oob_write_ofs = to;
+ docg3->oob_autoecc = autoecc;
+ if (ops->mode == MTD_OPS_AUTO_OOB) {
+ doc_fill_autooob(docg3->oob_write_buf, ops->oobbuf);
+ ops->oobretlen = 8;
+ } else {
+ memcpy(docg3->oob_write_buf, ops->oobbuf, DOC_LAYOUT_OOB_SIZE);
+ ops->oobretlen = DOC_LAYOUT_OOB_SIZE;
+ }
+ return 0;
+}
+
+/**
+ * doc_write_oob - Write out of band bytes to flash
+ * @mtd: the device
+ * @ofs: the offset from first block and first page, in bytes, aligned on page
+ * size
+ * @ops: the mtd oob structure
+ *
+ * Either write OOB data into a temporary buffer, for the subsequent write
+ * page. The provided OOB should be 16 bytes long. If a data buffer is provided
+ * as well, issue the page write.
+ * Or provide data without OOB, and then a all zeroed OOB will be used (ECC will
+ * still be filled in if asked for).
+ *
+ * Returns 0 is successfull, EINVAL if length is not 14 bytes
+ */
+static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
+ struct mtd_oob_ops *ops)
+{
+ struct docg3 *docg3 = mtd->priv;
+ int block0, block1, page, ret, pofs = 0, autoecc, oobdelta;
+ u8 *oobbuf = ops->oobbuf;
+ u8 *buf = ops->datbuf;
+ size_t len, ooblen;
+ u8 oob[DOC_LAYOUT_OOB_SIZE];
+
+ if (buf)
+ len = ops->len;
+ else
+ len = 0;
+ if (oobbuf)
+ ooblen = ops->ooblen;
+ else
+ ooblen = 0;
+
+ if (oobbuf && ops->mode == MTD_OPS_PLACE_OOB)
+ oobbuf += ops->ooboffs;
+
+ doc_dbg("doc_write_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n",
+ ofs, ops->mode, buf, len, oobbuf, ooblen);
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_RAW:
+ oobdelta = mtd->oobsize;
+ break;
+ case MTD_OPS_AUTO_OOB:
+ oobdelta = mtd->ecclayout->oobavail;
+ break;
+ default:
+ oobdelta = 0;
+ }
+ if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % oobdelta) ||
+ (ofs % DOC_LAYOUT_PAGE_SIZE))
+ return -EINVAL;
+ if (len && ooblen &&
+ (len / DOC_LAYOUT_PAGE_SIZE) != (ooblen / oobdelta))
+ return -EINVAL;
+
+ ret = -EINVAL;
+ calc_block_sector(ofs + len, &block0, &block1, &page, &pofs,
+ docg3->reliable);
+ if (block1 > docg3->max_block)
+ goto err;
+
+ ops->oobretlen = 0;
+ ops->retlen = 0;
+ ret = 0;
+ if (len == 0 && ooblen == 0)
+ return -EINVAL;
+ if (len == 0 && ooblen > 0)
+ return doc_backup_oob(docg3, ofs, ops);
+
+ autoecc = doc_guess_autoecc(ops);
+ if (autoecc < 0)
+ return autoecc;
+
+ while (!ret && len > 0) {
+ memset(oob, 0, sizeof(oob));
+ if (ofs == docg3->oob_write_ofs)
+ memcpy(oob, docg3->oob_write_buf, DOC_LAYOUT_OOB_SIZE);
+ else if (ooblen > 0 && ops->mode == MTD_OPS_AUTO_OOB)
+ doc_fill_autooob(oob, oobbuf);
+ else if (ooblen > 0)
+ memcpy(oob, oobbuf, DOC_LAYOUT_OOB_SIZE);
+ ret = doc_write_page(docg3, ofs, buf, oob, autoecc);
+
+ ofs += DOC_LAYOUT_PAGE_SIZE;
+ len -= DOC_LAYOUT_PAGE_SIZE;
+ buf += DOC_LAYOUT_PAGE_SIZE;
+ if (ooblen) {
+ oobbuf += oobdelta;
+ ooblen -= oobdelta;
+ ops->oobretlen += oobdelta;
+ }
+ ops->retlen += DOC_LAYOUT_PAGE_SIZE;
+ }
+err:
+ doc_set_device_id(docg3, 0);
+ return ret;
+}
+
+/**
+ * doc_write - Write a buffer to the chip
+ * @mtd: the device
+ * @to: the offset from first block and first page, in bytes, aligned on page
+ * size
+ * @len: the number of bytes to write (must be a full page size, ie. 512)
+ * @retlen: the number of bytes actually written (0 or 512)
+ * @buf: the buffer to get bytes from
+ *
+ * Writes data to the chip.
+ *
+ * Returns 0 if write successful, -EIO if write error
+ */
+static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct docg3 *docg3 = mtd->priv;
+ int ret;
+ struct mtd_oob_ops ops;
+
+ doc_dbg("doc_write(to=%lld, len=%zu)\n", to, len);
+ ops.datbuf = (char *)buf;
+ ops.len = len;
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.oobbuf = NULL;
+ ops.ooblen = 0;
+ ops.ooboffs = 0;
+
+ ret = doc_write_oob(mtd, to, &ops);
+ *retlen = ops.retlen;
+ return ret;
+}
+
+static struct docg3 *sysfs_dev2docg3(struct device *dev,
+ struct device_attribute *attr)
+{
+ int floor;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mtd_info **docg3_floors = platform_get_drvdata(pdev);
+
+ floor = attr->attr.name[1] - '0';
+ if (floor < 0 || floor >= DOC_MAX_NBFLOORS)
+ return NULL;
+ else
+ return docg3_floors[floor]->priv;
+}
+
+static ssize_t dps0_is_key_locked(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int dps0;
+
+ doc_set_device_id(docg3, docg3->device_id);
+ dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
+ doc_set_device_id(docg3, 0);
+
+ return sprintf(buf, "%d\n", !(dps0 & DOC_DPS_KEY_OK));
+}
+
+static ssize_t dps1_is_key_locked(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int dps1;
+
+ doc_set_device_id(docg3, docg3->device_id);
+ dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
+ doc_set_device_id(docg3, 0);
+
+ return sprintf(buf, "%d\n", !(dps1 & DOC_DPS_KEY_OK));
+}
+
+static ssize_t dps0_insert_key(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int i;
+
+ if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
+ return -EINVAL;
+
+ doc_set_device_id(docg3, docg3->device_id);
+ for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
+ doc_writeb(docg3, buf[i], DOC_DPS0_KEY);
+ doc_set_device_id(docg3, 0);
+ return count;
+}
+
+static ssize_t dps1_insert_key(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int i;
+
+ if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
+ return -EINVAL;
+
+ doc_set_device_id(docg3, docg3->device_id);
+ for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
+ doc_writeb(docg3, buf[i], DOC_DPS1_KEY);
+ doc_set_device_id(docg3, 0);
+ return count;
+}
+
+#define FLOOR_SYSFS(id) { \
+ __ATTR(f##id##_dps0_is_keylocked, S_IRUGO, dps0_is_key_locked, NULL), \
+ __ATTR(f##id##_dps1_is_keylocked, S_IRUGO, dps1_is_key_locked, NULL), \
+ __ATTR(f##id##_dps0_protection_key, S_IWUGO, NULL, dps0_insert_key), \
+ __ATTR(f##id##_dps1_protection_key, S_IWUGO, NULL, dps1_insert_key), \
+}
+
+static struct device_attribute doc_sys_attrs[DOC_MAX_NBFLOORS][4] = {
+ FLOOR_SYSFS(0), FLOOR_SYSFS(1), FLOOR_SYSFS(2), FLOOR_SYSFS(3)
+};
+
+static int doc_register_sysfs(struct platform_device *pdev,
+ struct mtd_info **floors)
+{
+ int ret = 0, floor, i = 0;
+ struct device *dev = &pdev->dev;
+
+ for (floor = 0; !ret && floor < DOC_MAX_NBFLOORS && floors[floor];
+ floor++)
+ for (i = 0; !ret && i < 4; i++)
+ ret = device_create_file(dev, &doc_sys_attrs[floor][i]);
+ if (!ret)
+ return 0;
+ do {
+ while (--i >= 0)
+ device_remove_file(dev, &doc_sys_attrs[floor][i]);
+ i = 4;
+ } while (--floor >= 0);
+ return ret;
+}
+
+static void doc_unregister_sysfs(struct platform_device *pdev,
+ struct mtd_info **floors)
+{
+ struct device *dev = &pdev->dev;
+ int floor, i;
+
+ for (floor = 0; floor < DOC_MAX_NBFLOORS && floors[floor];
+ floor++)
+ for (i = 0; i < 4; i++)
+ device_remove_file(dev, &doc_sys_attrs[floor][i]);
+}
+
+/*
+ * Debug sysfs entries
+ */
+static int dbg_flashctrl_show(struct seq_file *s, void *p)
+{
+ struct docg3 *docg3 = (struct docg3 *)s->private;
+
+ int pos = 0;
+ u8 fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+
+ pos += seq_printf(s,
+ "FlashControl : 0x%02x (%s,CE# %s,%s,%s,flash %s)\n",
+ fctrl,
+ fctrl & DOC_CTRL_VIOLATION ? "protocol violation" : "-",
+ fctrl & DOC_CTRL_CE ? "active" : "inactive",
+ fctrl & DOC_CTRL_PROTECTION_ERROR ? "protection error" : "-",
+ fctrl & DOC_CTRL_SEQUENCE_ERROR ? "sequence error" : "-",
+ fctrl & DOC_CTRL_FLASHREADY ? "ready" : "not ready");
+ return pos;
}
DEBUGFS_RO_ATTR(flashcontrol, dbg_flashctrl_show);
{
struct docg3 *docg3 = (struct docg3 *)s->private;
int pos = 0;
- int protect = doc_register_readb(docg3, DOC_PROTECTION);
- int dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
- int dps0_low = doc_register_readb(docg3, DOC_DPS0_ADDRLOW);
- int dps0_high = doc_register_readb(docg3, DOC_DPS0_ADDRHIGH);
- int dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
- int dps1_low = doc_register_readb(docg3, DOC_DPS1_ADDRLOW);
- int dps1_high = doc_register_readb(docg3, DOC_DPS1_ADDRHIGH);
+ int protect, dps0, dps0_low, dps0_high, dps1, dps1_low, dps1_high;
+
+ protect = doc_register_readb(docg3, DOC_PROTECTION);
+ dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
+ dps0_low = doc_register_readw(docg3, DOC_DPS0_ADDRLOW);
+ dps0_high = doc_register_readw(docg3, DOC_DPS0_ADDRHIGH);
+ dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
+ dps1_low = doc_register_readw(docg3, DOC_DPS1_ADDRLOW);
+ dps1_high = doc_register_readw(docg3, DOC_DPS1_ADDRHIGH);
pos += seq_printf(s, "Protection = 0x%02x (",
protect);
cfg = doc_register_readb(docg3, DOC_CONFIGURATION);
docg3->if_cfg = (cfg & DOC_CONF_IF_CFG ? 1 : 0);
+ docg3->reliable = reliable_mode;
switch (chip_id) {
case DOC_CHIPID_G3:
- mtd->name = "DiskOnChip G3";
+ mtd->name = kasprintf(GFP_KERNEL, "DiskOnChip G3 floor %d",
+ docg3->device_id);
docg3->max_block = 2047;
break;
}
mtd->type = MTD_NANDFLASH;
- /*
- * Once write methods are added, the correct flags will be set.
- * mtd->flags = MTD_CAP_NANDFLASH;
- */
- mtd->flags = MTD_CAP_ROM;
+ mtd->flags = MTD_CAP_NANDFLASH;
mtd->size = (docg3->max_block + 1) * DOC_LAYOUT_BLOCK_SIZE;
+ if (docg3->reliable == 2)
+ mtd->size /= 2;
mtd->erasesize = DOC_LAYOUT_BLOCK_SIZE * DOC_LAYOUT_NBPLANES;
+ if (docg3->reliable == 2)
+ mtd->erasesize /= 2;
mtd->writesize = DOC_LAYOUT_PAGE_SIZE;
mtd->oobsize = DOC_LAYOUT_OOB_SIZE;
mtd->owner = THIS_MODULE;
- mtd->erase = NULL;
- mtd->point = NULL;
- mtd->unpoint = NULL;
+ mtd->erase = doc_erase;
mtd->read = doc_read;
- mtd->write = NULL;
+ mtd->write = doc_write;
mtd->read_oob = doc_read_oob;
- mtd->write_oob = NULL;
- mtd->sync = NULL;
+ mtd->write_oob = doc_write_oob;
mtd->block_isbad = doc_block_isbad;
+ mtd->ecclayout = &docg3_oobinfo;
}
/**
- * doc_probe - Probe the IO space for a DiskOnChip G3 chip
- * @pdev: platform device
+ * doc_probe_device - Check if a device is available
+ * @base: the io space where the device is probed
+ * @floor: the floor of the probed device
+ * @dev: the device
*
- * Probes for a G3 chip at the specified IO space in the platform data
- * ressources.
+ * Checks whether a device at the specified IO range, and floor is available.
*
- * Returns 0 on success, -ENOMEM, -ENXIO on error
+ * Returns a mtd_info struct if there is a device, ENODEV if none found, ENOMEM
+ * if a memory allocation failed. If floor 0 is checked, a reset of the ASIC is
+ * launched.
*/
-static int __init docg3_probe(struct platform_device *pdev)
+static struct mtd_info *doc_probe_device(void __iomem *base, int floor,
+ struct device *dev)
{
- struct device *dev = &pdev->dev;
- struct docg3 *docg3;
- struct mtd_info *mtd;
- struct resource *ress;
int ret, bbt_nbpages;
u16 chip_id, chip_id_inv;
+ struct docg3 *docg3;
+ struct mtd_info *mtd;
ret = -ENOMEM;
docg3 = kzalloc(sizeof(struct docg3), GFP_KERNEL);
if (!mtd)
goto nomem2;
mtd->priv = docg3;
+ bbt_nbpages = DIV_ROUND_UP(docg3->max_block + 1,
+ 8 * DOC_LAYOUT_PAGE_SIZE);
+ docg3->bbt = kzalloc(bbt_nbpages * DOC_LAYOUT_PAGE_SIZE, GFP_KERNEL);
+ if (!docg3->bbt)
+ goto nomem3;
- ret = -ENXIO;
- ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!ress) {
- dev_err(dev, "No I/O memory resource defined\n");
- goto noress;
- }
- docg3->base = ioremap(ress->start, DOC_IOSPACE_SIZE);
-
- docg3->dev = &pdev->dev;
- docg3->device_id = 0;
+ docg3->dev = dev;
+ docg3->device_id = floor;
+ docg3->base = base;
doc_set_device_id(docg3, docg3->device_id);
- doc_set_asic_mode(docg3, DOC_ASICMODE_RESET);
+ if (!floor)
+ doc_set_asic_mode(docg3, DOC_ASICMODE_RESET);
doc_set_asic_mode(docg3, DOC_ASICMODE_NORMAL);
chip_id = doc_register_readw(docg3, DOC_CHIPID);
chip_id_inv = doc_register_readw(docg3, DOC_CHIPID_INV);
- ret = -ENODEV;
+ ret = 0;
if (chip_id != (u16)(~chip_id_inv)) {
- doc_info("No device found at IO addr %p\n",
- (void *)ress->start);
- goto nochipfound;
+ goto nomem3;
}
switch (chip_id) {
case DOC_CHIPID_G3:
- doc_info("Found a G3 DiskOnChip at addr %p\n",
- (void *)ress->start);
+ doc_info("Found a G3 DiskOnChip at addr %p, floor %d\n",
+ base, floor);
break;
default:
doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id);
- goto nochipfound;
+ goto nomem3;
}
doc_set_driver_info(chip_id, mtd);
- platform_set_drvdata(pdev, mtd);
- ret = -ENOMEM;
- bbt_nbpages = DIV_ROUND_UP(docg3->max_block + 1,
- 8 * DOC_LAYOUT_PAGE_SIZE);
- docg3->bbt = kzalloc(bbt_nbpages * DOC_LAYOUT_PAGE_SIZE, GFP_KERNEL);
- if (!docg3->bbt)
- goto nochipfound;
+ doc_hamming_ecc_init(docg3, DOC_LAYOUT_OOB_PAGEINFO_SZ);
doc_reload_bbt(docg3);
+ return mtd;
- ret = mtd_device_parse_register(mtd, part_probes,
- NULL, NULL, 0);
- if (ret)
- goto register_error;
+nomem3:
+ kfree(mtd);
+nomem2:
+ kfree(docg3);
+nomem1:
+ return ERR_PTR(ret);
+}
- doc_dbg_register(docg3);
- return 0;
+/**
+ * doc_release_device - Release a docg3 floor
+ * @mtd: the device
+ */
+static void doc_release_device(struct mtd_info *mtd)
+{
+ struct docg3 *docg3 = mtd->priv;
-register_error:
+ mtd_device_unregister(mtd);
kfree(docg3->bbt);
-nochipfound:
- iounmap(docg3->base);
-noress:
+ kfree(docg3);
+ kfree(mtd->name);
kfree(mtd);
+}
+
+/**
+ * docg3_resume - Awakens docg3 floor
+ * @pdev: platfrom device
+ *
+ * Returns 0 (always successfull)
+ */
+static int docg3_resume(struct platform_device *pdev)
+{
+ int i;
+ struct mtd_info **docg3_floors, *mtd;
+ struct docg3 *docg3;
+
+ docg3_floors = platform_get_drvdata(pdev);
+ mtd = docg3_floors[0];
+ docg3 = mtd->priv;
+
+ doc_dbg("docg3_resume()\n");
+ for (i = 0; i < 12; i++)
+ doc_readb(docg3, DOC_IOSPACE_IPL);
+ return 0;
+}
+
+/**
+ * docg3_suspend - Put in low power mode the docg3 floor
+ * @pdev: platform device
+ * @state: power state
+ *
+ * Shuts off most of docg3 circuitery to lower power consumption.
+ *
+ * Returns 0 if suspend succeeded, -EIO if chip refused suspend
+ */
+static int docg3_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int floor, i;
+ struct mtd_info **docg3_floors, *mtd;
+ struct docg3 *docg3;
+ u8 ctrl, pwr_down;
+
+ docg3_floors = platform_get_drvdata(pdev);
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
+ mtd = docg3_floors[floor];
+ if (!mtd)
+ continue;
+ docg3 = mtd->priv;
+
+ doc_writeb(docg3, floor, DOC_DEVICESELECT);
+ ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+ ctrl &= ~DOC_CTRL_VIOLATION & ~DOC_CTRL_CE;
+ doc_writeb(docg3, ctrl, DOC_FLASHCONTROL);
+
+ for (i = 0; i < 10; i++) {
+ usleep_range(3000, 4000);
+ pwr_down = doc_register_readb(docg3, DOC_POWERMODE);
+ if (pwr_down & DOC_POWERDOWN_READY)
+ break;
+ }
+ if (pwr_down & DOC_POWERDOWN_READY) {
+ doc_dbg("docg3_suspend(): floor %d powerdown ok\n",
+ floor);
+ } else {
+ doc_err("docg3_suspend(): floor %d powerdown failed\n",
+ floor);
+ return -EIO;
+ }
+ }
+
+ mtd = docg3_floors[0];
+ docg3 = mtd->priv;
+ doc_set_asic_mode(docg3, DOC_ASICMODE_POWERDOWN);
+ return 0;
+}
+
+/**
+ * doc_probe - Probe the IO space for a DiskOnChip G3 chip
+ * @pdev: platform device
+ *
+ * Probes for a G3 chip at the specified IO space in the platform data
+ * ressources. The floor 0 must be available.
+ *
+ * Returns 0 on success, -ENOMEM, -ENXIO on error
+ */
+static int __init docg3_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtd_info *mtd;
+ struct resource *ress;
+ void __iomem *base;
+ int ret, floor, found = 0;
+ struct mtd_info **docg3_floors;
+
+ ret = -ENXIO;
+ ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!ress) {
+ dev_err(dev, "No I/O memory resource defined\n");
+ goto noress;
+ }
+ base = ioremap(ress->start, DOC_IOSPACE_SIZE);
+
+ ret = -ENOMEM;
+ docg3_floors = kzalloc(sizeof(*docg3_floors) * DOC_MAX_NBFLOORS,
+ GFP_KERNEL);
+ if (!docg3_floors)
+ goto nomem1;
+ docg3_bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T,
+ DOC_ECC_BCH_PRIMPOLY);
+ if (!docg3_bch)
+ goto nomem2;
+
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
+ mtd = doc_probe_device(base, floor, dev);
+ if (IS_ERR(mtd)) {
+ ret = PTR_ERR(mtd);
+ goto err_probe;
+ }
+ if (!mtd) {
+ if (floor == 0)
+ goto notfound;
+ else
+ continue;
+ }
+ docg3_floors[floor] = mtd;
+ ret = mtd_device_parse_register(mtd, part_probes, NULL, NULL,
+ 0);
+ if (ret)
+ goto err_probe;
+ found++;
+ }
+
+ ret = doc_register_sysfs(pdev, docg3_floors);
+ if (ret)
+ goto err_probe;
+ if (!found)
+ goto notfound;
+
+ platform_set_drvdata(pdev, docg3_floors);
+ doc_dbg_register(docg3_floors[0]->priv);
+ return 0;
+
+notfound:
+ ret = -ENODEV;
+ dev_info(dev, "No supported DiskOnChip found\n");
+err_probe:
+ free_bch(docg3_bch);
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
+ if (docg3_floors[floor])
+ doc_release_device(docg3_floors[floor]);
nomem2:
- kfree(docg3);
+ kfree(docg3_floors);
nomem1:
+ iounmap(base);
+noress:
return ret;
}
*/
static int __exit docg3_release(struct platform_device *pdev)
{
- struct mtd_info *mtd = platform_get_drvdata(pdev);
- struct docg3 *docg3 = mtd->priv;
+ struct mtd_info **docg3_floors = platform_get_drvdata(pdev);
+ struct docg3 *docg3 = docg3_floors[0]->priv;
+ void __iomem *base = docg3->base;
+ int floor;
+ doc_unregister_sysfs(pdev, docg3_floors);
doc_dbg_unregister(docg3);
- mtd_device_unregister(mtd);
- iounmap(docg3->base);
- kfree(docg3->bbt);
- kfree(docg3);
- kfree(mtd);
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
+ if (docg3_floors[floor])
+ doc_release_device(docg3_floors[floor]);
+
+ kfree(docg3_floors);
+ free_bch(docg3_bch);
+ iounmap(base);
return 0;
}
.name = "docg3",
.owner = THIS_MODULE,
},
+ .suspend = docg3_suspend,
+ .resume = docg3_resume,
.remove = __exit_p(docg3_release),
};
#define DOC_LAYOUT_WEAR_OFFSET (DOC_LAYOUT_PAGE_OOB_SIZE * 2)
#define DOC_LAYOUT_BLOCK_SIZE \
(DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_PAGE_SIZE)
+
+/*
+ * ECC related constants
+ */
+#define DOC_ECC_BCH_M 14
+#define DOC_ECC_BCH_T 4
+#define DOC_ECC_BCH_PRIMPOLY 0x4443
#define DOC_ECC_BCH_SIZE 7
#define DOC_ECC_BCH_COVERED_BYTES \
(DOC_LAYOUT_PAGE_SIZE + DOC_LAYOUT_OOB_PAGEINFO_SZ + \
- DOC_LAYOUT_OOB_HAMMING_SZ + DOC_LAYOUT_OOB_BCH_SZ)
+ DOC_LAYOUT_OOB_HAMMING_SZ)
+#define DOC_ECC_BCH_TOTAL_BYTES \
+ (DOC_ECC_BCH_COVERED_BYTES + DOC_LAYOUT_OOB_BCH_SZ)
/*
* Blocks distribution
#define DOC_CHIPID_G3 0x200
#define DOC_ERASE_MARK 0xaa
+#define DOC_MAX_NBFLOORS 4
/*
* Flash registers
*/
#define DOC_ECCCONF1 0x1042
#define DOC_ECCPRESET 0x1044
#define DOC_HAMMINGPARITY 0x1046
-#define DOC_BCH_SYNDROM(idx) (0x1048 + (idx << 1))
+#define DOC_BCH_HW_ECC(idx) (0x1048 + idx)
#define DOC_PROTECTION 0x1056
+#define DOC_DPS0_KEY 0x105c
+#define DOC_DPS1_KEY 0x105e
#define DOC_DPS0_ADDRLOW 0x1060
#define DOC_DPS0_ADDRHIGH 0x1062
#define DOC_DPS1_ADDRLOW 0x1064
#define DOC_ASICMODECONFIRM 0x1072
#define DOC_CHIPID_INV 0x1074
+#define DOC_POWERMODE 0x107c
/*
* Flash sequences
*/
#define DOC_SEQ_RESET 0x00
#define DOC_SEQ_PAGE_SIZE_532 0x03
-#define DOC_SEQ_SET_MODE 0x09
+#define DOC_SEQ_SET_FASTMODE 0x05
+#define DOC_SEQ_SET_RELIABLEMODE 0x09
#define DOC_SEQ_READ 0x12
#define DOC_SEQ_SET_PLANE1 0x0e
#define DOC_SEQ_SET_PLANE2 0x10
#define DOC_SEQ_PAGE_SETUP 0x1d
+#define DOC_SEQ_ERASE 0x27
+#define DOC_SEQ_PLANES_STATUS 0x31
/*
* Flash commands
#define DOC_CMD_PROG_BLOCK_ADDR 0x60
#define DOC_CMD_PROG_CYCLE1 0x80
#define DOC_CMD_PROG_CYCLE2 0x10
+#define DOC_CMD_PROG_CYCLE3 0x11
#define DOC_CMD_ERASECYCLE2 0xd0
+#define DOC_CMD_READ_STATUS 0x70
+#define DOC_CMD_PLANES_STATUS 0x71
#define DOC_CMD_RELIABLE_MODE 0x22
#define DOC_CMD_FAST_MODE 0xa2
/*
* Flash register : DOC_ECCCONF0
*/
+#define DOC_ECCCONF0_WRITE_MODE 0x0000
#define DOC_ECCCONF0_READ_MODE 0x8000
#define DOC_ECCCONF0_AUTO_ECC_ENABLE 0x4000
#define DOC_ECCCONF0_HAMMING_ENABLE 0x1000
*/
#define DOC_ECCCONF1_BCH_SYNDROM_ERR 0x80
#define DOC_ECCCONF1_UNKOWN1 0x40
-#define DOC_ECCCONF1_UNKOWN2 0x20
+#define DOC_ECCCONF1_PAGE_IS_WRITTEN 0x20
#define DOC_ECCCONF1_UNKOWN3 0x10
#define DOC_ECCCONF1_HAMMING_BITS_MASK 0x0f
#define DOC_READADDR_ONE_BYTE 0x4000
#define DOC_READADDR_ADDR_MASK 0x1fff
+/*
+ * Flash register : DOC_POWERMODE
+ */
+#define DOC_POWERDOWN_READY 0x80
+
+/*
+ * Status of erase and write operation
+ */
+#define DOC_PLANES_STATUS_FAIL 0x01
+#define DOC_PLANES_STATUS_PLANE0_KO 0x02
+#define DOC_PLANES_STATUS_PLANE1_KO 0x04
+
+/*
+ * DPS key management
+ *
+ * Each floor of docg3 has 2 protection areas: DPS0 and DPS1. These areas span
+ * across block boundaries, and define whether these blocks can be read or
+ * written.
+ * The definition is dynamically stored in page 0 of blocks (2,3) for DPS0, and
+ * page 0 of blocks (4,5) for DPS1.
+ */
+#define DOC_LAYOUT_DPS_KEY_LENGTH 8
+
/**
* struct docg3 - DiskOnChip driver private data
* @dev: the device currently under control
* @base: mapped IO space
* @device_id: number of the cascaded DoCG3 device (0, 1, 2 or 3)
* @if_cfg: if true, reads are on 16bits, else reads are on 8bits
+
+ * @reliable: if 0, docg3 in normal mode, if 1 docg3 in fast mode, if 2 in
+ * reliable mode
+ * Fast mode implies more errors than normal mode.
+ * Reliable mode implies that page 2*n and 2*n+1 are clones.
* @bbt: bad block table cache
+ * @oob_write_ofs: offset of the MTD where this OOB should belong (ie. in next
+ * page_write)
+ * @oob_autoecc: if 1, use only bytes 0-7, 15, and fill the others with HW ECC
+ * if 0, use all the 16 bytes.
+ * @oob_write_buf: prepared OOB for next page_write
* @debugfs_root: debugfs root node
*/
struct docg3 {
void __iomem *base;
unsigned int device_id:4;
unsigned int if_cfg:1;
+ unsigned int reliable:2;
int max_block;
u8 *bbt;
+ loff_t oob_write_ofs;
+ int oob_autoecc;
+ u8 oob_write_buf[DOC_LAYOUT_OOB_SIZE];
struct dentry *debugfs_root;
};
return;
}
docfound = 1;
- mtd = kmalloc(sizeof(struct DiskOnChip) + sizeof(struct mtd_info), GFP_KERNEL);
-
+ mtd = kzalloc(sizeof(struct DiskOnChip) + sizeof(struct mtd_info), GFP_KERNEL);
if (!mtd) {
printk(KERN_WARNING "Cannot allocate memory for data structures. Dropping.\n");
iounmap(docptr);
}
this = (struct DiskOnChip *)(&mtd[1]);
-
- memset((char *)mtd,0, sizeof(struct mtd_info));
- memset((char *)this, 0, sizeof(struct DiskOnChip));
-
mtd->priv = this;
this->virtadr = docptr;
this->physadr = physadr;
static struct spi_driver m25p80_driver = {
.driver = {
.name = "m25p80",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.id_table = m25p_ids,
static struct spi_driver dataflash_driver = {
.driver = {
.name = "mtd_dataflash",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.of_match_table = dataflash_dt_ids,
},
struct flash_info *flash_info;
struct sst25l_flash *flash;
struct flash_platform_data *data;
- int ret, i;
+ int ret;
flash_info = sst25l_match_device(spi);
if (!flash_info)
static struct spi_driver sst25l_driver = {
.driver = {
.name = "sst25l",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = sst25l_probe,
(offset + sizeof(header)) < max_offset;
offset += part->mbd.mtd->erasesize ? : 0x2000) {
- err = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &ret,
- (unsigned char *)&header);
+ err = mtd_read(part->mbd.mtd, offset, sizeof(header), &ret,
+ (unsigned char *)&header);
if (err)
return err;
for (i = 0; i < le16_to_cpu(part->header.NumEraseUnits); i++) {
offset = ((i + le16_to_cpu(part->header.FirstPhysicalEUN))
<< part->header.EraseUnitSize);
- ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &retval,
- (unsigned char *)&header);
+ ret = mtd_read(part->mbd.mtd, offset, sizeof(header), &retval,
+ (unsigned char *)&header);
if (ret)
goto out_XferInfo;
part->EUNInfo[i].Deleted = 0;
offset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset);
- ret = part->mbd.mtd->read(part->mbd.mtd, offset,
- part->BlocksPerUnit * sizeof(uint32_t), &retval,
- (unsigned char *)part->bam_cache);
+ ret = mtd_read(part->mbd.mtd, offset,
+ part->BlocksPerUnit * sizeof(uint32_t), &retval,
+ (unsigned char *)part->bam_cache);
if (ret)
goto out_bam_cache;
erase->len = 1 << part->header.EraseUnitSize;
erase->priv = (u_long)part;
- ret = part->mbd.mtd->erase(part->mbd.mtd, erase);
+ ret = mtd_erase(part->mbd.mtd, erase);
if (!ret)
xfer->EraseCount++;
header.LogicalEUN = cpu_to_le16(0xffff);
header.EraseCount = cpu_to_le32(xfer->EraseCount);
- ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset, sizeof(header),
- &retlen, (u_char *)&header);
+ ret = mtd_write(part->mbd.mtd, xfer->Offset, sizeof(header), &retlen,
+ (u_char *)&header);
if (ret) {
return ret;
for (i = 0; i < nbam; i++, offset += sizeof(uint32_t)) {
- ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint32_t),
- &retlen, (u_char *)&ctl);
+ ret = mtd_write(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
+ (u_char *)&ctl);
if (ret)
return ret;
offset = eun->Offset + le32_to_cpu(part->header.BAMOffset);
- ret = part->mbd.mtd->read(part->mbd.mtd, offset,
- part->BlocksPerUnit * sizeof(uint32_t),
- &retlen, (u_char *) (part->bam_cache));
+ ret = mtd_read(part->mbd.mtd, offset,
+ part->BlocksPerUnit * sizeof(uint32_t), &retlen,
+ (u_char *)(part->bam_cache));
/* mark the cache bad, in case we get an error later */
part->bam_index = 0xffff;
offset = xfer->Offset + 20; /* Bad! */
unit = cpu_to_le16(0x7fff);
- ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint16_t),
- &retlen, (u_char *) &unit);
+ ret = mtd_write(part->mbd.mtd, offset, sizeof(uint16_t), &retlen,
+ (u_char *)&unit);
if (ret) {
printk( KERN_WARNING "ftl: Failed to write back to BAM cache in copy_erase_unit()!\n");
break;
case BLOCK_DATA:
case BLOCK_REPLACEMENT:
- ret = part->mbd.mtd->read(part->mbd.mtd, src, SECTOR_SIZE,
- &retlen, (u_char *) buf);
+ ret = mtd_read(part->mbd.mtd, src, SECTOR_SIZE, &retlen,
+ (u_char *)buf);
if (ret) {
printk(KERN_WARNING "ftl: Error reading old xfer unit in copy_erase_unit\n");
return ret;
}
- ret = part->mbd.mtd->write(part->mbd.mtd, dest, SECTOR_SIZE,
- &retlen, (u_char *) buf);
+ ret = mtd_write(part->mbd.mtd, dest, SECTOR_SIZE, &retlen,
+ (u_char *)buf);
if (ret) {
printk(KERN_WARNING "ftl: Error writing new xfer unit in copy_erase_unit\n");
return ret;
}
/* Write the BAM to the transfer unit */
- ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + le32_to_cpu(part->header.BAMOffset),
- part->BlocksPerUnit * sizeof(int32_t), &retlen,
- (u_char *)part->bam_cache);
+ ret = mtd_write(part->mbd.mtd,
+ xfer->Offset + le32_to_cpu(part->header.BAMOffset),
+ part->BlocksPerUnit * sizeof(int32_t),
+ &retlen,
+ (u_char *)part->bam_cache);
if (ret) {
printk( KERN_WARNING "ftl: Error writing BAM in copy_erase_unit\n");
return ret;
/* All clear? Then update the LogicalEUN again */
- ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + 20, sizeof(uint16_t),
- &retlen, (u_char *)&srcunitswap);
+ ret = mtd_write(part->mbd.mtd, xfer->Offset + 20, sizeof(uint16_t),
+ &retlen, (u_char *)&srcunitswap);
if (ret) {
printk(KERN_WARNING "ftl: Error writing new LogicalEUN in copy_erase_unit\n");
if (queued) {
pr_debug("ftl_cs: waiting for transfer "
"unit to be prepared...\n");
- if (part->mbd.mtd->sync)
- part->mbd.mtd->sync(part->mbd.mtd);
+ mtd_sync(part->mbd.mtd);
} else {
static int ne = 0;
if (++ne < 5)
/* Invalidate cache */
part->bam_index = 0xffff;
- ret = part->mbd.mtd->read(part->mbd.mtd,
- part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset),
- part->BlocksPerUnit * sizeof(uint32_t),
- &retlen, (u_char *) (part->bam_cache));
+ ret = mtd_read(part->mbd.mtd,
+ part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset),
+ part->BlocksPerUnit * sizeof(uint32_t),
+ &retlen,
+ (u_char *)(part->bam_cache));
if (ret) {
printk(KERN_WARNING"ftl: Error reading BAM in find_free\n");
else {
offset = (part->EUNInfo[log_addr / bsize].Offset
+ (log_addr % bsize));
- ret = part->mbd.mtd->read(part->mbd.mtd, offset, SECTOR_SIZE,
- &retlen, (u_char *) buffer);
+ ret = mtd_read(part->mbd.mtd, offset, SECTOR_SIZE, &retlen,
+ (u_char *)buffer);
if (ret) {
printk(KERN_WARNING "Error reading MTD device in ftl_read()\n");
le32_to_cpu(part->header.BAMOffset));
#ifdef PSYCHO_DEBUG
- ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(uint32_t),
- &retlen, (u_char *)&old_addr);
+ ret = mtd_read(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
+ (u_char *)&old_addr);
if (ret) {
printk(KERN_WARNING"ftl: Error reading old_addr in set_bam_entry: %d\n",ret);
return ret;
#endif
part->bam_cache[blk] = le_virt_addr;
}
- ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint32_t),
- &retlen, (u_char *)&le_virt_addr);
+ ret = mtd_write(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
+ (u_char *)&le_virt_addr);
if (ret) {
printk(KERN_NOTICE "ftl_cs: set_bam_entry() failed!\n");
part->EUNInfo[part->bam_index].Deleted++;
offset = (part->EUNInfo[part->bam_index].Offset +
blk * SECTOR_SIZE);
- ret = part->mbd.mtd->write(part->mbd.mtd, offset, SECTOR_SIZE, &retlen,
- buffer);
+ ret = mtd_write(part->mbd.mtd, offset, SECTOR_SIZE, &retlen, buffer);
if (ret) {
printk(KERN_NOTICE "ftl_cs: block write failed!\n");
ops.oobbuf = buf;
ops.datbuf = NULL;
- res = mtd->read_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+ res = mtd_read_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
*retlen = ops.oobretlen;
return res;
}
ops.oobbuf = buf;
ops.datbuf = NULL;
- res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+ res = mtd_write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
*retlen = ops.oobretlen;
return res;
}
ops.datbuf = buf;
ops.len = len;
- res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+ res = mtd_write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
*retlen = ops.retlen;
return res;
}
if (BlockMap[block] == BLOCK_NIL)
continue;
- ret = mtd->read(mtd, (inftl->EraseSize * BlockMap[block]) +
- (block * SECTORSIZE), SECTORSIZE, &retlen,
- movebuf);
+ ret = mtd_read(mtd,
+ (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE),
+ SECTORSIZE,
+ &retlen,
+ movebuf);
if (ret < 0 && !mtd_is_bitflip(ret)) {
- ret = mtd->read(mtd,
- (inftl->EraseSize * BlockMap[block]) +
- (block * SECTORSIZE), SECTORSIZE,
- &retlen, movebuf);
+ ret = mtd_read(mtd,
+ (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE),
+ SECTORSIZE,
+ &retlen,
+ movebuf);
if (ret != -EIO)
pr_debug("INFTL: error went away on retry?\n");
}
} else {
size_t retlen;
loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs;
- int ret = mtd->read(mtd, ptr, SECTORSIZE, &retlen, buffer);
+ int ret = mtd_read(mtd, ptr, SECTORSIZE, &retlen, buffer);
/* Handle corrected bit flips gracefully */
if (ret < 0 && !mtd_is_bitflip(ret))
* Check for BNAND header first. Then whinge if it's found
* but later checks fail.
*/
- ret = mtd->read(mtd, block * inftl->EraseSize,
- SECTORSIZE, &retlen, buf);
+ ret = mtd_read(mtd, block * inftl->EraseSize, SECTORSIZE,
+ &retlen, buf);
/* We ignore ret in case the ECC of the MediaHeader is invalid
(which is apparently acceptable) */
if (retlen != SECTORSIZE) {
memcpy(mh, buf, sizeof(struct INFTLMediaHeader));
/* Read the spare media header at offset 4096 */
- mtd->read(mtd, block * inftl->EraseSize + 4096,
- SECTORSIZE, &retlen, buf);
+ mtd_read(mtd, block * inftl->EraseSize + 4096, SECTORSIZE,
+ &retlen, buf);
if (retlen != SECTORSIZE) {
printk(KERN_WARNING "INFTL: Unable to read spare "
"Media Header\n");
*/
instr->addr = ip->Reserved0 * inftl->EraseSize;
instr->len = inftl->EraseSize;
- mtd->erase(mtd, instr);
+ mtd_erase(mtd, instr);
}
if ((ip->lastUnit - ip->firstUnit + 1) < ip->virtualUnits) {
printk(KERN_WARNING "INFTL: Media Header "
/* If any of the physical eraseblocks are bad, don't
use the unit. */
for (physblock = 0; physblock < inftl->EraseSize; physblock += inftl->mbd.mtd->erasesize) {
- if (inftl->mbd.mtd->block_isbad(inftl->mbd.mtd, i * inftl->EraseSize + physblock))
+ if (mtd_block_isbad(inftl->mbd.mtd,
+ i * inftl->EraseSize + physblock))
inftl->PUtable[i] = BLOCK_RESERVED;
}
}
int i;
for (i = 0; i < len; i += SECTORSIZE) {
- if (mtd->read(mtd, address, SECTORSIZE, &retlen, buf))
+ if (mtd_read(mtd, address, SECTORSIZE, &retlen, buf))
return -1;
if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
return -1;
mark only the failed block in the bbt. */
for (physblock = 0; physblock < inftl->EraseSize;
physblock += instr->len, instr->addr += instr->len) {
- mtd->erase(inftl->mbd.mtd, instr);
+ mtd_erase(inftl->mbd.mtd, instr);
if (instr->state == MTD_ERASE_FAILED) {
printk(KERN_WARNING "INFTL: error while formatting block %d\n",
fail:
/* could not format, update the bad block table (caller is responsible
for setting the PUtable to BLOCK_RESERVED on failure) */
- inftl->mbd.mtd->block_markbad(inftl->mbd.mtd, instr->addr);
+ mtd_block_markbad(inftl->mbd.mtd, instr->addr);
return -1;
}
mtd->erase = lpddr_erase;
mtd->write = lpddr_write_buffers;
mtd->writev = lpddr_writev;
- mtd->read_oob = NULL;
- mtd->write_oob = NULL;
- mtd->sync = NULL;
mtd->lock = lpddr_lock;
mtd->unlock = lpddr_unlock;
- mtd->suspend = NULL;
- mtd->resume = NULL;
if (map_is_linear(map)) {
mtd->point = lpddr_point;
mtd->unpoint = lpddr_unpoint;
}
- mtd->block_isbad = NULL;
- mtd->block_markbad = NULL;
mtd->size = 1 << lpddr->qinfo->DevSizeShift;
mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
help
Support for flash chips on NETtel/SecureEdge/SnapGear boards.
-config MTD_BCM963XX
- tristate "Map driver for Broadcom BCM963xx boards"
- depends on BCM63XX
- select MTD_MAP_BANK_WIDTH_2
- select MTD_CFI_I1
- help
- Support for parsing CFE image tag and creating MTD partitions on
- Broadcom BCM63xx boards.
-
config MTD_LANTIQ
tristate "Lantiq SoC NOR support"
depends on LANTIQ
obj-$(CONFIG_MTD_RBTX4939) += rbtx4939-flash.o
obj-$(CONFIG_MTD_VMU) += vmu-flash.o
obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o
-obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o
obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o
obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o
+++ /dev/null
-/*
- * Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org>
- * Mike Albon <malbon@openwrt.org>
- * Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/vmalloc.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include <asm/mach-bcm63xx/bcm963xx_tag.h>
-
-#define BCM63XX_BUSWIDTH 2 /* Buswidth */
-#define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */
-
-#define PFX KBUILD_MODNAME ": "
-
-static struct mtd_partition *parsed_parts;
-
-static struct mtd_info *bcm963xx_mtd_info;
-
-static struct map_info bcm963xx_map = {
- .name = "bcm963xx",
- .bankwidth = BCM63XX_BUSWIDTH,
-};
-
-static int parse_cfe_partitions(struct mtd_info *master,
- struct mtd_partition **pparts)
-{
- /* CFE, NVRAM and global Linux are always present */
- int nrparts = 3, curpart = 0;
- struct bcm_tag *buf;
- struct mtd_partition *parts;
- int ret;
- size_t retlen;
- unsigned int rootfsaddr, kerneladdr, spareaddr;
- unsigned int rootfslen, kernellen, sparelen, totallen;
- int namelen = 0;
- int i;
- char *boardid;
- char *tagversion;
-
- /* Allocate memory for buffer */
- buf = vmalloc(sizeof(struct bcm_tag));
- if (!buf)
- return -ENOMEM;
-
- /* Get the tag */
- ret = master->read(master, master->erasesize, sizeof(struct bcm_tag),
- &retlen, (void *)buf);
- if (retlen != sizeof(struct bcm_tag)) {
- vfree(buf);
- return -EIO;
- }
-
- sscanf(buf->kernel_address, "%u", &kerneladdr);
- sscanf(buf->kernel_length, "%u", &kernellen);
- sscanf(buf->total_length, "%u", &totallen);
- tagversion = &(buf->tag_version[0]);
- boardid = &(buf->board_id[0]);
-
- printk(KERN_INFO PFX "CFE boot tag found with version %s "
- "and board type %s\n", tagversion, boardid);
-
- kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
- rootfsaddr = kerneladdr + kernellen;
- spareaddr = roundup(totallen, master->erasesize) + master->erasesize;
- sparelen = master->size - spareaddr - master->erasesize;
- rootfslen = spareaddr - rootfsaddr;
-
- /* Determine number of partitions */
- namelen = 8;
- if (rootfslen > 0) {
- nrparts++;
- namelen += 6;
- };
- if (kernellen > 0) {
- nrparts++;
- namelen += 6;
- };
-
- /* Ask kernel for more memory */
- parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
- if (!parts) {
- vfree(buf);
- return -ENOMEM;
- };
-
- /* Start building partition list */
- parts[curpart].name = "CFE";
- parts[curpart].offset = 0;
- parts[curpart].size = master->erasesize;
- curpart++;
-
- if (kernellen > 0) {
- parts[curpart].name = "kernel";
- parts[curpart].offset = kerneladdr;
- parts[curpart].size = kernellen;
- curpart++;
- };
-
- if (rootfslen > 0) {
- parts[curpart].name = "rootfs";
- parts[curpart].offset = rootfsaddr;
- parts[curpart].size = rootfslen;
- if (sparelen > 0)
- parts[curpart].size += sparelen;
- curpart++;
- };
-
- parts[curpart].name = "nvram";
- parts[curpart].offset = master->size - master->erasesize;
- parts[curpart].size = master->erasesize;
-
- /* Global partition "linux" to make easy firmware upgrade */
- curpart++;
- parts[curpart].name = "linux";
- parts[curpart].offset = parts[0].size;
- parts[curpart].size = master->size - parts[0].size - parts[3].size;
-
- for (i = 0; i < nrparts; i++)
- printk(KERN_INFO PFX "Partition %d is %s offset %lx and "
- "length %lx\n", i, parts[i].name,
- (long unsigned int)(parts[i].offset),
- (long unsigned int)(parts[i].size));
-
- printk(KERN_INFO PFX "Spare partition is %x offset and length %x\n",
- spareaddr, sparelen);
- *pparts = parts;
- vfree(buf);
-
- return nrparts;
-};
-
-static int bcm963xx_detect_cfe(struct mtd_info *master)
-{
- int idoffset = 0x4e0;
- static char idstring[8] = "CFE1CFE1";
- char buf[9];
- int ret;
- size_t retlen;
-
- ret = master->read(master, idoffset, 8, &retlen, (void *)buf);
- buf[retlen] = 0;
- printk(KERN_INFO PFX "Read Signature value of %s\n", buf);
-
- return strncmp(idstring, buf, 8);
-}
-
-static int bcm963xx_probe(struct platform_device *pdev)
-{
- int err = 0;
- int parsed_nr_parts = 0;
- char *part_type;
- struct resource *r;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "no resource supplied\n");
- return -ENODEV;
- }
-
- bcm963xx_map.phys = r->start;
- bcm963xx_map.size = resource_size(r);
- bcm963xx_map.virt = ioremap(r->start, resource_size(r));
- if (!bcm963xx_map.virt) {
- dev_err(&pdev->dev, "failed to ioremap\n");
- return -EIO;
- }
-
- dev_info(&pdev->dev, "0x%08lx at 0x%08x\n",
- bcm963xx_map.size, bcm963xx_map.phys);
-
- simple_map_init(&bcm963xx_map);
-
- bcm963xx_mtd_info = do_map_probe("cfi_probe", &bcm963xx_map);
- if (!bcm963xx_mtd_info) {
- dev_err(&pdev->dev, "failed to probe using CFI\n");
- bcm963xx_mtd_info = do_map_probe("jedec_probe", &bcm963xx_map);
- if (bcm963xx_mtd_info)
- goto probe_ok;
- dev_err(&pdev->dev, "failed to probe using JEDEC\n");
- err = -EIO;
- goto err_probe;
- }
-
-probe_ok:
- bcm963xx_mtd_info->owner = THIS_MODULE;
-
- /* This is mutually exclusive */
- if (bcm963xx_detect_cfe(bcm963xx_mtd_info) == 0) {
- dev_info(&pdev->dev, "CFE bootloader detected\n");
- if (parsed_nr_parts == 0) {
- int ret = parse_cfe_partitions(bcm963xx_mtd_info,
- &parsed_parts);
- if (ret > 0) {
- part_type = "CFE";
- parsed_nr_parts = ret;
- }
- }
- } else {
- dev_info(&pdev->dev, "unsupported bootloader\n");
- err = -ENODEV;
- goto err_probe;
- }
-
- return mtd_device_register(bcm963xx_mtd_info, parsed_parts,
- parsed_nr_parts);
-
-err_probe:
- iounmap(bcm963xx_map.virt);
- return err;
-}
-
-static int bcm963xx_remove(struct platform_device *pdev)
-{
- if (bcm963xx_mtd_info) {
- mtd_device_unregister(bcm963xx_mtd_info);
- map_destroy(bcm963xx_mtd_info);
- }
-
- if (bcm963xx_map.virt) {
- iounmap(bcm963xx_map.virt);
- bcm963xx_map.virt = 0;
- }
-
- return 0;
-}
-
-static struct platform_driver bcm63xx_mtd_dev = {
- .probe = bcm963xx_probe,
- .remove = bcm963xx_remove,
- .driver = {
- .name = "bcm963xx-flash",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init bcm963xx_mtd_init(void)
-{
- return platform_driver_register(&bcm63xx_mtd_dev);
-}
-
-static void __exit bcm963xx_mtd_exit(void)
-{
- platform_driver_unregister(&bcm63xx_mtd_dev);
-}
-
-module_init(bcm963xx_mtd_init);
-module_exit(bcm963xx_mtd_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Broadcom BCM63xx MTD driver for CFE and RedBoot");
-MODULE_AUTHOR("Daniel Dickinson <openwrt@cshore.neomailbox.net>");
-MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
-MODULE_AUTHOR("Mike Albon <malbon@openwrt.org>");
},
};
-static int __init bfin_flash_init(void)
-{
- return platform_driver_register(&bfin_flash_driver);
-}
-module_init(bfin_flash_init);
-
-static void __exit bfin_flash_exit(void)
-{
- platform_driver_unregister(&bfin_flash_driver);
-}
-module_exit(bfin_flash_exit);
+module_platform_driver(bfin_flash_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MTD map driver for Blackfins with flash/ethernet on same async bank");
},
};
-static int __init gpio_flash_init(void)
-{
- return platform_driver_register(&gpio_flash_driver);
-}
-module_init(gpio_flash_init);
-
-static void __exit gpio_flash_exit(void)
-{
- platform_driver_unregister(&gpio_flash_driver);
-}
-module_exit(gpio_flash_exit);
+module_platform_driver(gpio_flash_driver);
MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>");
MODULE_DESCRIPTION("MTD map driver for flashes addressed physically and with gpios");
},
};
-static int __init ixp2000_flash_init(void)
-{
- return platform_driver_register(&ixp2000_flash_driver);
-}
-
-static void __exit ixp2000_flash_exit(void)
-{
- platform_driver_unregister(&ixp2000_flash_driver);
-}
+module_platform_driver(ixp2000_flash_driver);
-module_init(ixp2000_flash_init);
-module_exit(ixp2000_flash_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
MODULE_ALIAS("platform:IXP2000-Flash");
},
};
-static int __init ixp4xx_flash_init(void)
-{
- return platform_driver_register(&ixp4xx_flash_driver);
-}
-
-static void __exit ixp4xx_flash_exit(void)
-{
- platform_driver_unregister(&ixp4xx_flash_driver);
-}
-
-
-module_init(ixp4xx_flash_init);
-module_exit(ixp4xx_flash_exit);
+module_platform_driver(ixp4xx_flash_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems");
if (!ltq_mtd->mtd) {
dev_err(&pdev->dev, "probing failed\n");
err = -ENXIO;
- goto err_unmap;
+ goto err_free;
}
ltq_mtd->mtd->owner = THIS_MODULE;
err_destroy:
map_destroy(ltq_mtd->mtd);
-err_unmap:
- iounmap(ltq_mtd->map->virt);
err_free:
kfree(ltq_mtd->map);
err_out:
mtd_device_unregister(ltq_mtd->mtd);
map_destroy(ltq_mtd->mtd);
}
- if (ltq_mtd->map->virt)
- iounmap(ltq_mtd->map->virt);
kfree(ltq_mtd->map);
kfree(ltq_mtd);
}
},
};
-static int __init latch_addr_flash_init(void)
-{
- return platform_driver_register(&latch_addr_flash_driver);
-}
-module_init(latch_addr_flash_init);
-
-static void __exit latch_addr_flash_exit(void)
-{
- platform_driver_unregister(&latch_addr_flash_driver);
-}
-module_exit(latch_addr_flash_exit);
+module_platform_driver(latch_addr_flash_driver);
MODULE_AUTHOR("David Griego <dgriego@mvista.com>");
MODULE_DESCRIPTION("MTD map driver for flashes addressed physically with upper "
struct physmap_flash_data *physmap_data;
struct physmap_flash_info *info;
const char **probe_type;
+ const char **part_types;
int err = 0;
int i;
int devices_found = 0;
if (err)
goto err_out;
- mtd_device_parse_register(info->cmtd, part_probe_types, 0,
+ part_types = physmap_data->part_probe_types ? : part_probe_types;
+
+ mtd_device_parse_register(info->cmtd, part_types, 0,
physmap_data->parts, physmap_data->nr_parts);
return 0;
int i;
for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++)
- if (info->mtd[i]->suspend && info->mtd[i]->resume)
- if (info->mtd[i]->suspend(info->mtd[i]) == 0)
- info->mtd[i]->resume(info->mtd[i]);
+ if (mtd_suspend(info->mtd[i]) == 0)
+ mtd_resume(info->mtd[i]);
}
#else
#define physmap_flash_shutdown NULL
.remove = of_flash_remove,
};
-static int __init of_flash_init(void)
-{
- return platform_driver_register(&of_flash_driver);
-}
-
-static void __exit of_flash_exit(void)
-{
- platform_driver_unregister(&of_flash_driver);
-}
-
-module_init(of_flash_init);
-module_exit(of_flash_exit);
+module_platform_driver(of_flash_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vitaly Wool <vwool@ru.mvista.com>");
{
struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
- if (info && info->mtd->suspend(info->mtd) == 0)
- info->mtd->resume(info->mtd);
+ if (info && mtd_suspend(info->mtd) == 0)
+ mtd_resume(info->mtd);
}
#else
#define pxa2xx_flash_shutdown NULL
.shutdown = pxa2xx_flash_shutdown,
};
-static int __init init_pxa2xx_flash(void)
-{
- return platform_driver_register(&pxa2xx_flash_driver);
-}
-
-static void __exit cleanup_pxa2xx_flash(void)
-{
- platform_driver_unregister(&pxa2xx_flash_driver);
-}
-
-module_init(init_pxa2xx_flash);
-module_exit(cleanup_pxa2xx_flash);
+module_platform_driver(pxa2xx_flash_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net>");
{
struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
- if (info->mtd->suspend && info->mtd->resume)
- if (info->mtd->suspend(info->mtd) == 0)
- info->mtd->resume(info->mtd);
+ if (mtd_suspend(info->mtd) == 0)
+ mtd_resume(info->mtd);
}
#else
#define rbtx4939_flash_shutdown NULL
},
};
-static int __init rbtx4939_flash_init(void)
-{
- return platform_driver_register(&rbtx4939_flash_driver);
-}
-
-static void __exit rbtx4939_flash_exit(void)
-{
- platform_driver_unregister(&rbtx4939_flash_driver);
-}
-
-module_init(rbtx4939_flash_init);
-module_exit(rbtx4939_flash_exit);
+module_platform_driver(rbtx4939_flash_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RBTX4939 MTD map driver");
static void sa1100_mtd_shutdown(struct platform_device *dev)
{
struct sa_info *info = platform_get_drvdata(dev);
- if (info && info->mtd->suspend(info->mtd) == 0)
- info->mtd->resume(info->mtd);
+ if (info && mtd_suspend(info->mtd) == 0)
+ mtd_resume(info->mtd);
}
#else
#define sa1100_mtd_shutdown NULL
},
};
-static int __init sa1100_mtd_init(void)
-{
- return platform_driver_register(&sa1100_mtd_driver);
-}
-
-static void __exit sa1100_mtd_exit(void)
-{
- platform_driver_unregister(&sa1100_mtd_driver);
-}
-
-module_init(sa1100_mtd_init);
-module_exit(sa1100_mtd_exit);
+module_platform_driver(sa1100_mtd_driver);
MODULE_AUTHOR("Nicolas Pitre");
MODULE_DESCRIPTION("SA1100 CFI map driver");
return;
/* disable flash writes */
- if (scb2_mtd->lock)
- scb2_mtd->lock(scb2_mtd, 0, scb2_mtd->size);
+ mtd_lock(scb2_mtd, 0, scb2_mtd->size);
mtd_device_unregister(scb2_mtd);
map_destroy(scb2_mtd);
.remove = __devexit_p(uflash_remove),
};
-static int __init uflash_init(void)
-{
- return platform_driver_register(&uflash_driver);
-}
-
-static void __exit uflash_exit(void)
-{
- platform_driver_unregister(&uflash_driver);
-}
-
-module_init(uflash_init);
-module_exit(uflash_exit);
+module_platform_driver(uflash_driver);
mutex_lock(&dev->lock);
- if (dev->open++)
+ if (dev->open)
goto unlock;
kref_get(&dev->ref);
goto error_release;
unlock:
+ dev->open++;
mutex_unlock(&dev->lock);
blktrans_dev_put(dev);
return ret;
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&wait_q, &wait);
- ret = mtd->erase(mtd, &erase);
+ ret = mtd_erase(mtd, &erase);
if (ret) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&wait_q, &wait);
* Next, write the data to flash.
*/
- ret = mtd->write(mtd, pos, len, &retlen, buf);
+ ret = mtd_write(mtd, pos, len, &retlen, buf);
if (ret)
return ret;
if (retlen != len)
mtd->name, pos, len);
if (!sect_size)
- return mtd->write(mtd, pos, len, &retlen, buf);
+ return mtd_write(mtd, pos, len, &retlen, buf);
while (len > 0) {
unsigned long sect_start = (pos/sect_size)*sect_size;
mtdblk->cache_offset != sect_start) {
/* fill the cache with the current sector */
mtdblk->cache_state = STATE_EMPTY;
- ret = mtd->read(mtd, sect_start, sect_size,
- &retlen, mtdblk->cache_data);
+ ret = mtd_read(mtd, sect_start, sect_size,
+ &retlen, mtdblk->cache_data);
if (ret)
return ret;
if (retlen != sect_size)
mtd->name, pos, len);
if (!sect_size)
- return mtd->read(mtd, pos, len, &retlen, buf);
+ return mtd_read(mtd, pos, len, &retlen, buf);
while (len > 0) {
unsigned long sect_start = (pos/sect_size)*sect_size;
mtdblk->cache_offset == sect_start) {
memcpy (buf, mtdblk->cache_data + offset, size);
} else {
- ret = mtd->read(mtd, pos, size, &retlen, buf);
+ ret = mtd_read(mtd, pos, size, &retlen, buf);
if (ret)
return ret;
if (retlen != size)
if (!--mtdblk->count) {
/* It was the last usage. Free the cache */
- if (mbd->mtd->sync)
- mbd->mtd->sync(mbd->mtd);
+ mtd_sync(mbd->mtd);
vfree(mtdblk->cache_data);
}
mutex_lock(&mtdblk->cache_mutex);
write_cached_data(mtdblk);
mutex_unlock(&mtdblk->cache_mutex);
-
- if (dev->mtd->sync)
- dev->mtd->sync(dev->mtd);
+ mtd_sync(dev->mtd);
return 0;
}
{
size_t retlen;
- if (dev->mtd->read(dev->mtd, (block * 512), 512, &retlen, buf))
+ if (mtd_read(dev->mtd, (block * 512), 512, &retlen, buf))
return 1;
return 0;
}
{
size_t retlen;
- if (dev->mtd->write(dev->mtd, (block * 512), 512, &retlen, buf))
+ if (mtd_write(dev->mtd, (block * 512), 512, &retlen, buf))
return 1;
return 0;
}
enum mtd_file_modes mode;
};
-static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
+static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
-static int mtd_open(struct inode *inode, struct file *file)
+static int mtdchar_open(struct inode *inode, struct file *file)
{
int minor = iminor(inode);
int devnum = minor >> 1;
out:
mutex_unlock(&mtd_mutex);
return ret;
-} /* mtd_open */
+} /* mtdchar_open */
/*====================================================================*/
-static int mtd_close(struct inode *inode, struct file *file)
+static int mtdchar_close(struct inode *inode, struct file *file)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
pr_debug("MTD_close\n");
/* Only sync if opened RW */
- if ((file->f_mode & FMODE_WRITE) && mtd->sync)
- mtd->sync(mtd);
+ if ((file->f_mode & FMODE_WRITE))
+ mtd_sync(mtd);
iput(mfi->ino);
kfree(mfi);
return 0;
-} /* mtd_close */
+} /* mtdchar_close */
/* Back in June 2001, dwmw2 wrote:
*
* alignment requirements are not met in the NAND subdriver.
*/
-static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
+static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
- size_t retlen=0;
+ size_t retlen;
size_t total_retlen=0;
int ret=0;
int len;
switch (mfi->mode) {
case MTD_FILE_MODE_OTP_FACTORY:
- ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
+ ret = mtd_read_fact_prot_reg(mtd, *ppos, len,
+ &retlen, kbuf);
break;
case MTD_FILE_MODE_OTP_USER:
- ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
+ ret = mtd_read_user_prot_reg(mtd, *ppos, len,
+ &retlen, kbuf);
break;
case MTD_FILE_MODE_RAW:
{
ops.oobbuf = NULL;
ops.len = len;
- ret = mtd->read_oob(mtd, *ppos, &ops);
+ ret = mtd_read_oob(mtd, *ppos, &ops);
retlen = ops.retlen;
break;
}
default:
- ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
+ ret = mtd_read(mtd, *ppos, len, &retlen, kbuf);
}
/* Nand returns -EBADMSG on ECC errors, but it returns
* the data. For our userspace tools it is important
kfree(kbuf);
return total_retlen;
-} /* mtd_read */
+} /* mtdchar_read */
-static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
+static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count,
+ loff_t *ppos)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
ret = -EROFS;
break;
case MTD_FILE_MODE_OTP_USER:
- if (!mtd->write_user_prot_reg) {
- ret = -EOPNOTSUPP;
- break;
- }
- ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
+ ret = mtd_write_user_prot_reg(mtd, *ppos, len,
+ &retlen, kbuf);
break;
case MTD_FILE_MODE_RAW:
ops.ooboffs = 0;
ops.len = len;
- ret = mtd->write_oob(mtd, *ppos, &ops);
+ ret = mtd_write_oob(mtd, *ppos, &ops);
retlen = ops.retlen;
break;
}
default:
- ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
+ ret = mtd_write(mtd, *ppos, len, &retlen, kbuf);
}
if (!ret) {
*ppos += retlen;
kfree(kbuf);
return total_retlen;
-} /* mtd_write */
+} /* mtdchar_write */
/*======================================================================
static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
{
struct mtd_info *mtd = mfi->mtd;
+ size_t retlen;
int ret = 0;
+ /*
+ * Make a fake call to mtd_read_fact_prot_reg() to check if OTP
+ * operations are supported.
+ */
+ if (mtd_read_fact_prot_reg(mtd, -1, -1, &retlen, NULL) == -EOPNOTSUPP)
+ return -EOPNOTSUPP;
+
switch (mode) {
case MTD_OTP_FACTORY:
- if (!mtd->read_fact_prot_reg)
- ret = -EOPNOTSUPP;
- else
- mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
+ mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
break;
case MTD_OTP_USER:
- if (!mtd->read_fact_prot_reg)
- ret = -EOPNOTSUPP;
- else
- mfi->mode = MTD_FILE_MODE_OTP_USER;
+ mfi->mode = MTD_FILE_MODE_OTP_USER;
break;
default:
ret = -EINVAL;
# define otp_select_filemode(f,m) -EOPNOTSUPP
#endif
-static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
+static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
uint64_t start, uint32_t length, void __user *ptr,
uint32_t __user *retp)
{
return PTR_ERR(ops.oobbuf);
start &= ~((uint64_t)mtd->writesize - 1);
- ret = mtd->write_oob(mtd, start, &ops);
+ ret = mtd_write_oob(mtd, start, &ops);
if (ops.oobretlen > 0xFFFFFFFFU)
ret = -EOVERFLOW;
return ret;
}
-static int mtd_do_readoob(struct file *file, struct mtd_info *mtd,
+static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
uint64_t start, uint32_t length, void __user *ptr,
uint32_t __user *retp)
{
if (length > 4096)
return -EINVAL;
- if (!mtd->read_oob)
- ret = -EOPNOTSUPP;
- else
- ret = access_ok(VERIFY_WRITE, ptr,
- length) ? 0 : -EFAULT;
- if (ret)
- return ret;
+ if (!access_ok(VERIFY_WRITE, ptr, length))
+ return -EFAULT;
ops.ooblen = length;
ops.ooboffs = start & (mtd->writesize - 1);
return -ENOMEM;
start &= ~((uint64_t)mtd->writesize - 1);
- ret = mtd->read_oob(mtd, start, &ops);
+ ret = mtd_read_oob(mtd, start, &ops);
if (put_user(ops.oobretlen, retp))
ret = -EFAULT;
return 0;
}
-static int mtd_blkpg_ioctl(struct mtd_info *mtd,
+static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
struct blkpg_ioctl_arg __user *arg)
{
struct blkpg_ioctl_arg a;
}
}
-static int mtd_write_ioctl(struct mtd_info *mtd,
+static int mtdchar_write_ioctl(struct mtd_info *mtd,
struct mtd_write_req __user *argp)
{
struct mtd_write_req req;
ops.oobbuf = NULL;
}
- ret = mtd->write_oob(mtd, (loff_t)req.start, &ops);
+ ret = mtd_write_oob(mtd, (loff_t)req.start, &ops);
kfree(ops.datbuf);
kfree(ops.oobbuf);
return ret;
}
-static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
+static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
wq_head is no longer there when the
callback routine tries to wake us up.
*/
- ret = mtd->erase(mtd, erase);
+ ret = mtd_erase(mtd, erase);
if (!ret) {
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&waitq, &wait);
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
+ ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
buf.ptr, &buf_user->length);
break;
}
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_readoob(file, mtd, buf.start, buf.length,
+ ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
buf.ptr, &buf_user->start);
break;
}
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
+ ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
(void __user *)(uintptr_t)buf.usr_ptr,
&buf_user->length);
break;
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_readoob(file, mtd, buf.start, buf.length,
+ ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
(void __user *)(uintptr_t)buf.usr_ptr,
&buf_user->length);
break;
case MEMWRITE:
{
- ret = mtd_write_ioctl(mtd,
+ ret = mtdchar_write_ioctl(mtd,
(struct mtd_write_req __user *)arg);
break;
}
if (copy_from_user(&einfo, argp, sizeof(einfo)))
return -EFAULT;
- if (!mtd->lock)
- ret = -EOPNOTSUPP;
- else
- ret = mtd->lock(mtd, einfo.start, einfo.length);
+ ret = mtd_lock(mtd, einfo.start, einfo.length);
break;
}
if (copy_from_user(&einfo, argp, sizeof(einfo)))
return -EFAULT;
- if (!mtd->unlock)
- ret = -EOPNOTSUPP;
- else
- ret = mtd->unlock(mtd, einfo.start, einfo.length);
+ ret = mtd_unlock(mtd, einfo.start, einfo.length);
break;
}
if (copy_from_user(&einfo, argp, sizeof(einfo)))
return -EFAULT;
- if (!mtd->is_locked)
- ret = -EOPNOTSUPP;
- else
- ret = mtd->is_locked(mtd, einfo.start, einfo.length);
+ ret = mtd_is_locked(mtd, einfo.start, einfo.length);
break;
}
if (copy_from_user(&offs, argp, sizeof(loff_t)))
return -EFAULT;
- if (!mtd->block_isbad)
- ret = -EOPNOTSUPP;
- else
- return mtd->block_isbad(mtd, offs);
+ return mtd_block_isbad(mtd, offs);
break;
}
if (copy_from_user(&offs, argp, sizeof(loff_t)))
return -EFAULT;
- if (!mtd->block_markbad)
- ret = -EOPNOTSUPP;
- else
- return mtd->block_markbad(mtd, offs);
+ return mtd_block_markbad(mtd, offs);
break;
}
struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- ret = -EOPNOTSUPP;
switch (mfi->mode) {
case MTD_FILE_MODE_OTP_FACTORY:
- if (mtd->get_fact_prot_info)
- ret = mtd->get_fact_prot_info(mtd, buf, 4096);
+ ret = mtd_get_fact_prot_info(mtd, buf, 4096);
break;
case MTD_FILE_MODE_OTP_USER:
- if (mtd->get_user_prot_info)
- ret = mtd->get_user_prot_info(mtd, buf, 4096);
+ ret = mtd_get_user_prot_info(mtd, buf, 4096);
break;
default:
+ ret = -EINVAL;
break;
}
if (ret >= 0) {
return -EINVAL;
if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
return -EFAULT;
- if (!mtd->lock_user_prot_reg)
- return -EOPNOTSUPP;
- ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
+ ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
break;
}
#endif
break;
case MTD_FILE_MODE_RAW:
- if (!mtd->read_oob || !mtd->write_oob)
+ if (!mtd_has_oob(mtd))
return -EOPNOTSUPP;
mfi->mode = arg;
case BLKPG:
{
- ret = mtd_blkpg_ioctl(mtd,
+ ret = mtdchar_blkpg_ioctl(mtd,
(struct blkpg_ioctl_arg __user *)arg);
break;
}
return ret;
} /* memory_ioctl */
-static long mtd_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
+static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
{
int ret;
mutex_lock(&mtd_mutex);
- ret = mtd_ioctl(file, cmd, arg);
+ ret = mtdchar_ioctl(file, cmd, arg);
mutex_unlock(&mtd_mutex);
return ret;
#define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
#define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
-static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
+static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct mtd_file_info *mfi = file->private_data;
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_writeoob(file, mtd, buf.start,
+ ret = mtdchar_writeoob(file, mtd, buf.start,
buf.length, compat_ptr(buf.ptr),
&buf_user->length);
break;
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_readoob(file, mtd, buf.start,
+ ret = mtdchar_readoob(file, mtd, buf.start,
buf.length, compat_ptr(buf.ptr),
&buf_user->start);
break;
}
default:
- ret = mtd_ioctl(file, cmd, (unsigned long)argp);
+ ret = mtdchar_ioctl(file, cmd, (unsigned long)argp);
}
mutex_unlock(&mtd_mutex);
* mappings)
*/
#ifndef CONFIG_MMU
-static unsigned long mtd_get_unmapped_area(struct file *file,
+static unsigned long mtdchar_get_unmapped_area(struct file *file,
unsigned long addr,
unsigned long len,
unsigned long pgoff,
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
+ unsigned long offset;
+ int ret;
- if (mtd->get_unmapped_area) {
- unsigned long offset;
-
- if (addr != 0)
- return (unsigned long) -EINVAL;
-
- if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
- return (unsigned long) -EINVAL;
+ if (addr != 0)
+ return (unsigned long) -EINVAL;
- offset = pgoff << PAGE_SHIFT;
- if (offset > mtd->size - len)
- return (unsigned long) -EINVAL;
+ if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
+ return (unsigned long) -EINVAL;
- return mtd->get_unmapped_area(mtd, len, offset, flags);
- }
+ offset = pgoff << PAGE_SHIFT;
+ if (offset > mtd->size - len)
+ return (unsigned long) -EINVAL;
- /* can't map directly */
- return (unsigned long) -ENOSYS;
+ ret = mtd_get_unmapped_area(mtd, len, offset, flags);
+ return ret == -EOPNOTSUPP ? -ENOSYS : ret;
}
#endif
/*
* set up a mapping for shared memory segments
*/
-static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
+static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
{
#ifdef CONFIG_MMU
struct mtd_file_info *mfi = file->private_data;
static const struct file_operations mtd_fops = {
.owner = THIS_MODULE,
- .llseek = mtd_lseek,
- .read = mtd_read,
- .write = mtd_write,
- .unlocked_ioctl = mtd_unlocked_ioctl,
+ .llseek = mtdchar_lseek,
+ .read = mtdchar_read,
+ .write = mtdchar_write,
+ .unlocked_ioctl = mtdchar_unlocked_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = mtd_compat_ioctl,
+ .compat_ioctl = mtdchar_compat_ioctl,
#endif
- .open = mtd_open,
- .release = mtd_close,
- .mmap = mtd_mmap,
+ .open = mtdchar_open,
+ .release = mtdchar_close,
+ .mmap = mtdchar_mmap,
#ifndef CONFIG_MMU
- .get_unmapped_area = mtd_get_unmapped_area,
+ .get_unmapped_area = mtdchar_get_unmapped_area,
#endif
};
/* Entire transaction goes into this subdev */
size = len;
- err = subdev->read(subdev, from, size, &retsize, buf);
+ err = mtd_read(subdev, from, size, &retsize, buf);
/* Save information about bitflips! */
if (unlikely(err)) {
if (!(subdev->flags & MTD_WRITEABLE))
err = -EROFS;
else
- err = subdev->write(subdev, to, size, &retsize, buf);
+ err = mtd_write(subdev, to, size, &retsize, buf);
if (err)
break;
if (!(subdev->flags & MTD_WRITEABLE))
err = -EROFS;
else
- err = subdev->writev(subdev, &vecs_copy[entry_low],
- entry_high - entry_low + 1, to, &retsize);
+ err = mtd_writev(subdev, &vecs_copy[entry_low],
+ entry_high - entry_low + 1, to,
+ &retsize);
vecs_copy[entry_high].iov_len = old_iov_len - size;
vecs_copy[entry_high].iov_base += size;
if (from + devops.len > subdev->size)
devops.len = subdev->size - from;
- err = subdev->read_oob(subdev, from, &devops);
+ err = mtd_read_oob(subdev, from, &devops);
ops->retlen += devops.retlen;
ops->oobretlen += devops.oobretlen;
if (to + devops.len > subdev->size)
devops.len = subdev->size - to;
- err = subdev->write_oob(subdev, to, &devops);
+ err = mtd_write_oob(subdev, to, &devops);
ops->retlen += devops.oobretlen;
if (err)
return err;
* FIXME: Allow INTERRUPTIBLE. Which means
* not having the wait_queue head on the stack.
*/
- err = mtd->erase(mtd, erase);
+ err = mtd_erase(mtd, erase);
if (!err) {
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&waitq, &wait);
else
size = len;
- if (subdev->lock) {
- err = subdev->lock(subdev, ofs, size);
- if (err)
- break;
- } else
- err = -EOPNOTSUPP;
+ err = mtd_lock(subdev, ofs, size);
+ if (err)
+ break;
len -= size;
if (len == 0)
else
size = len;
- if (subdev->unlock) {
- err = subdev->unlock(subdev, ofs, size);
- if (err)
- break;
- } else
- err = -EOPNOTSUPP;
+ err = mtd_unlock(subdev, ofs, size);
+ if (err)
+ break;
len -= size;
if (len == 0)
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
- subdev->sync(subdev);
+ mtd_sync(subdev);
}
}
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
- if ((rc = subdev->suspend(subdev)) < 0)
+ if ((rc = mtd_suspend(subdev)) < 0)
return rc;
}
return rc;
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
- subdev->resume(subdev);
+ mtd_resume(subdev);
}
}
struct mtd_concat *concat = CONCAT(mtd);
int i, res = 0;
- if (!concat->subdev[0]->block_isbad)
+ if (!mtd_can_have_bb(concat->subdev[0]))
return res;
if (ofs > mtd->size)
continue;
}
- res = subdev->block_isbad(subdev, ofs);
+ res = mtd_block_isbad(subdev, ofs);
break;
}
struct mtd_concat *concat = CONCAT(mtd);
int i, err = -EINVAL;
- if (!concat->subdev[0]->block_markbad)
+ if (!mtd_can_have_bb(concat->subdev[0]))
return 0;
if (ofs > mtd->size)
continue;
}
- err = subdev->block_markbad(subdev, ofs);
+ err = mtd_block_markbad(subdev, ofs);
if (!err)
mtd->ecc_stats.badblocks++;
break;
if (offset + len > subdev->size)
return (unsigned long) -EINVAL;
- if (subdev->get_unmapped_area)
- return subdev->get_unmapped_area(subdev, len, offset,
- flags);
-
- break;
+ return mtd_get_unmapped_area(subdev, len, offset, flags);
}
return (unsigned long) -ENOSYS;
*/
static void mtd_release(struct device *dev)
{
- dev_t index = MTD_DEVT(dev_to_mtd(dev)->index);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ dev_t index = MTD_DEVT(mtd->index);
/* remove /dev/mtdXro node if needed */
if (index)
static int mtd_cls_suspend(struct device *dev, pm_message_t state)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
- if (mtd && mtd->suspend)
- return mtd->suspend(mtd);
- else
- return 0;
+ return mtd_suspend(mtd);
}
static int mtd_cls_resume(struct device *dev)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
-
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
if (mtd && mtd->resume)
- mtd->resume(mtd);
+ mtd_resume(mtd);
return 0;
}
static ssize_t mtd_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
char *type;
switch (mtd->type) {
static ssize_t mtd_flags_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
static ssize_t mtd_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)mtd->size);
static ssize_t mtd_erasesize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
static ssize_t mtd_writesize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
static ssize_t mtd_subpagesize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
static ssize_t mtd_oobsize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
static ssize_t mtd_numeraseregions_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
static ssize_t mtd_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
/* Some chips always power up locked. Unlock them now */
- if ((mtd->flags & MTD_WRITEABLE)
- && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
- if (mtd->unlock(mtd, 0, mtd->size))
+ if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
+ error = mtd_unlock(mtd, 0, mtd->size);
+ if (error && error != -EOPNOTSUPP)
printk(KERN_WARNING
"%s: unlock failed, writes may not work\n",
mtd->name);
* or removal of MTD devices. Causes the 'add' callback to be immediately
* invoked for each MTD device currently present in the system.
*/
-
void register_mtd_user (struct mtd_notifier *new)
{
struct mtd_info *mtd;
mutex_unlock(&mtd_table_mutex);
}
+EXPORT_SYMBOL_GPL(register_mtd_user);
/**
* unregister_mtd_user - unregister a 'user' of MTD devices.
* 'remove' callback to be immediately invoked for each MTD device
* currently present in the system.
*/
-
int unregister_mtd_user (struct mtd_notifier *old)
{
struct mtd_info *mtd;
mutex_unlock(&mtd_table_mutex);
return 0;
}
-
+EXPORT_SYMBOL_GPL(unregister_mtd_user);
/**
* get_mtd_device - obtain a validated handle for an MTD device
* both, return the num'th driver only if its address matches. Return
* error code if not.
*/
-
struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
{
struct mtd_info *ret = NULL, *other;
mutex_unlock(&mtd_table_mutex);
return ret;
}
+EXPORT_SYMBOL_GPL(get_mtd_device);
int __get_mtd_device(struct mtd_info *mtd)
mtd->usecount++;
return 0;
}
+EXPORT_SYMBOL_GPL(__get_mtd_device);
/**
* get_mtd_device_nm - obtain a validated handle for an MTD device by
* This function returns MTD device description structure in case of
* success and an error code in case of failure.
*/
-
struct mtd_info *get_mtd_device_nm(const char *name)
{
int err = -ENODEV;
mutex_unlock(&mtd_table_mutex);
return ERR_PTR(err);
}
+EXPORT_SYMBOL_GPL(get_mtd_device_nm);
void put_mtd_device(struct mtd_info *mtd)
{
mutex_unlock(&mtd_table_mutex);
}
+EXPORT_SYMBOL_GPL(put_mtd_device);
void __put_mtd_device(struct mtd_info *mtd)
{
module_put(mtd->owner);
}
+EXPORT_SYMBOL_GPL(__put_mtd_device);
-/* default_mtd_writev - default mtd writev method for MTD devices that
- * don't implement their own
+/*
+ * default_mtd_writev - the default writev method
+ * @mtd: mtd device description object pointer
+ * @vecs: the vectors to write
+ * @count: count of vectors in @vecs
+ * @to: the MTD device offset to write to
+ * @retlen: on exit contains the count of bytes written to the MTD device.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
*/
-
-int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
- unsigned long count, loff_t to, size_t *retlen)
+static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
{
unsigned long i;
size_t totlen = 0, thislen;
int ret = 0;
- if(!mtd->write) {
- ret = -EROFS;
- } else {
- for (i=0; i<count; i++) {
- if (!vecs[i].iov_len)
- continue;
- ret = mtd->write(mtd, to, vecs[i].iov_len, &thislen, vecs[i].iov_base);
- totlen += thislen;
- if (ret || thislen != vecs[i].iov_len)
- break;
- to += vecs[i].iov_len;
- }
+ for (i = 0; i < count; i++) {
+ if (!vecs[i].iov_len)
+ continue;
+ ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
+ vecs[i].iov_base);
+ totlen += thislen;
+ if (ret || thislen != vecs[i].iov_len)
+ break;
+ to += vecs[i].iov_len;
}
- if (retlen)
- *retlen = totlen;
+ *retlen = totlen;
return ret;
}
+/*
+ * mtd_writev - the vector-based MTD write method
+ * @mtd: mtd device description object pointer
+ * @vecs: the vectors to write
+ * @count: count of vectors in @vecs
+ * @to: the MTD device offset to write to
+ * @retlen: on exit contains the count of bytes written to the MTD device.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
+{
+ *retlen = 0;
+ if (!mtd->writev)
+ return default_mtd_writev(mtd, vecs, count, to, retlen);
+ return mtd->writev(mtd, vecs, count, to, retlen);
+}
+EXPORT_SYMBOL_GPL(mtd_writev);
+
/**
* mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
- * @size: A pointer to the ideal or maximum size of the allocation. Points
+ * @mtd: mtd device description object pointer
+ * @size: a pointer to the ideal or maximum size of the allocation, points
* to the actual allocation size on success.
*
* This routine attempts to allocate a contiguous kernel buffer up to
*/
return kmalloc(*size, GFP_KERNEL);
}
-
-EXPORT_SYMBOL_GPL(get_mtd_device);
-EXPORT_SYMBOL_GPL(get_mtd_device_nm);
-EXPORT_SYMBOL_GPL(__get_mtd_device);
-EXPORT_SYMBOL_GPL(put_mtd_device);
-EXPORT_SYMBOL_GPL(__put_mtd_device);
-EXPORT_SYMBOL_GPL(register_mtd_user);
-EXPORT_SYMBOL_GPL(unregister_mtd_user);
-EXPORT_SYMBOL_GPL(default_mtd_writev);
EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
#ifdef CONFIG_PROC_FS
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&wait_q, &wait);
- ret = mtd->erase(mtd, &erase);
+ ret = mtd_erase(mtd, &erase);
if (ret) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&wait_q, &wait);
cxt->nextpage = 0;
}
- while (mtd->block_isbad) {
- ret = mtd->block_isbad(mtd, cxt->nextpage * record_size);
+ while (mtd_can_have_bb(mtd)) {
+ ret = mtd_block_isbad(mtd, cxt->nextpage * record_size);
if (!ret)
break;
if (ret < 0) {
return;
}
- if (mtd->block_markbad && ret == -EIO) {
- ret = mtd->block_markbad(mtd, cxt->nextpage * record_size);
+ if (mtd_can_have_bb(mtd) && ret == -EIO) {
+ ret = mtd_block_markbad(mtd, cxt->nextpage * record_size);
if (ret < 0) {
printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n");
return;
hdr[0] = cxt->nextcount;
hdr[1] = MTDOOPS_KERNMSG_MAGIC;
- if (panic)
- ret = mtd->panic_write(mtd, cxt->nextpage * record_size,
- record_size, &retlen, cxt->oops_buf);
- else
- ret = mtd->write(mtd, cxt->nextpage * record_size,
- record_size, &retlen, cxt->oops_buf);
+ if (panic) {
+ ret = mtd_panic_write(mtd, cxt->nextpage * record_size,
+ record_size, &retlen, cxt->oops_buf);
+ if (ret == -EOPNOTSUPP) {
+ printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
+ return;
+ }
+ } else
+ ret = mtd_write(mtd, cxt->nextpage * record_size,
+ record_size, &retlen, cxt->oops_buf);
if (retlen != record_size || ret < 0)
printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n",
size_t retlen;
for (page = 0; page < cxt->oops_pages; page++) {
+ if (mtd_can_have_bb(mtd) &&
+ mtd_block_isbad(mtd, page * record_size))
+ continue;
/* Assume the page is used */
mark_page_used(cxt, page);
- ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
- &retlen, (u_char *) &count[0]);
+ ret = mtd_read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
+ &retlen, (u_char *)&count[0]);
if (retlen != MTDOOPS_HEADER_SIZE ||
(ret < 0 && !mtd_is_bitflip(ret))) {
printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n",
memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
/* Panics must be written immediately */
- if (reason != KMSG_DUMP_OOPS) {
- if (!cxt->mtd->panic_write)
- printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
- else
- mtdoops_write(cxt, 1);
- return;
- }
+ if (reason != KMSG_DUMP_OOPS)
+ mtdoops_write(cxt, 1);
/* For other cases, schedule work to write it "nicely" */
schedule_work(&cxt->work_write);
/* oops_page_used is a bit field */
cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages,
- BITS_PER_LONG));
+ BITS_PER_LONG) * sizeof(unsigned long));
if (!cxt->oops_page_used) {
printk(KERN_ERR "mtdoops: could not allocate page array\n");
return;
len = 0;
else if (from + len > mtd->size)
len = mtd->size - from;
- res = part->master->read(part->master, from + part->offset,
- len, retlen, buf);
+ res = mtd_read(part->master, from + part->offset, len, retlen, buf);
if (unlikely(res)) {
if (mtd_is_bitflip(res))
mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
len = 0;
else if (from + len > mtd->size)
len = mtd->size - from;
- return part->master->point (part->master, from + part->offset,
- len, retlen, virt, phys);
+ return mtd_point(part->master, from + part->offset, len, retlen,
+ virt, phys);
}
static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
struct mtd_part *part = PART(mtd);
- part->master->unpoint(part->master, from + part->offset, len);
+ mtd_unpoint(part->master, from + part->offset, len);
}
static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
struct mtd_part *part = PART(mtd);
offset += part->offset;
- return part->master->get_unmapped_area(part->master, len, offset,
- flags);
+ return mtd_get_unmapped_area(part->master, len, offset, flags);
}
static int part_read_oob(struct mtd_info *mtd, loff_t from,
return -EINVAL;
}
- res = part->master->read_oob(part->master, from + part->offset, ops);
+ res = mtd_read_oob(part->master, from + part->offset, ops);
if (unlikely(res)) {
if (mtd_is_bitflip(res))
mtd->ecc_stats.corrected++;
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
- return part->master->read_user_prot_reg(part->master, from,
- len, retlen, buf);
+ return mtd_read_user_prot_reg(part->master, from, len, retlen, buf);
}
static int part_get_user_prot_info(struct mtd_info *mtd,
struct otp_info *buf, size_t len)
{
struct mtd_part *part = PART(mtd);
- return part->master->get_user_prot_info(part->master, buf, len);
+ return mtd_get_user_prot_info(part->master, buf, len);
}
static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
- return part->master->read_fact_prot_reg(part->master, from,
- len, retlen, buf);
+ return mtd_read_fact_prot_reg(part->master, from, len, retlen, buf);
}
static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
size_t len)
{
struct mtd_part *part = PART(mtd);
- return part->master->get_fact_prot_info(part->master, buf, len);
+ return mtd_get_fact_prot_info(part->master, buf, len);
}
static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
len = 0;
else if (to + len > mtd->size)
len = mtd->size - to;
- return part->master->write(part->master, to + part->offset,
- len, retlen, buf);
+ return mtd_write(part->master, to + part->offset, len, retlen, buf);
}
static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
len = 0;
else if (to + len > mtd->size)
len = mtd->size - to;
- return part->master->panic_write(part->master, to + part->offset,
- len, retlen, buf);
+ return mtd_panic_write(part->master, to + part->offset, len, retlen,
+ buf);
}
static int part_write_oob(struct mtd_info *mtd, loff_t to,
return -EINVAL;
if (ops->datbuf && to + ops->len > mtd->size)
return -EINVAL;
- return part->master->write_oob(part->master, to + part->offset, ops);
+ return mtd_write_oob(part->master, to + part->offset, ops);
}
static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
- return part->master->write_user_prot_reg(part->master, from,
- len, retlen, buf);
+ return mtd_write_user_prot_reg(part->master, from, len, retlen, buf);
}
static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len)
{
struct mtd_part *part = PART(mtd);
- return part->master->lock_user_prot_reg(part->master, from, len);
+ return mtd_lock_user_prot_reg(part->master, from, len);
}
static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
struct mtd_part *part = PART(mtd);
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
- return part->master->writev(part->master, vecs, count,
- to + part->offset, retlen);
+ return mtd_writev(part->master, vecs, count, to + part->offset,
+ retlen);
}
static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
if (instr->addr >= mtd->size)
return -EINVAL;
instr->addr += part->offset;
- ret = part->master->erase(part->master, instr);
+ ret = mtd_erase(part->master, instr);
if (ret) {
if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
instr->fail_addr -= part->offset;
struct mtd_part *part = PART(mtd);
if ((len + ofs) > mtd->size)
return -EINVAL;
- return part->master->lock(part->master, ofs + part->offset, len);
+ return mtd_lock(part->master, ofs + part->offset, len);
}
static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct mtd_part *part = PART(mtd);
if ((len + ofs) > mtd->size)
return -EINVAL;
- return part->master->unlock(part->master, ofs + part->offset, len);
+ return mtd_unlock(part->master, ofs + part->offset, len);
}
static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct mtd_part *part = PART(mtd);
if ((len + ofs) > mtd->size)
return -EINVAL;
- return part->master->is_locked(part->master, ofs + part->offset, len);
+ return mtd_is_locked(part->master, ofs + part->offset, len);
}
static void part_sync(struct mtd_info *mtd)
{
struct mtd_part *part = PART(mtd);
- part->master->sync(part->master);
+ mtd_sync(part->master);
}
static int part_suspend(struct mtd_info *mtd)
{
struct mtd_part *part = PART(mtd);
- return part->master->suspend(part->master);
+ return mtd_suspend(part->master);
}
static void part_resume(struct mtd_info *mtd)
{
struct mtd_part *part = PART(mtd);
- part->master->resume(part->master);
+ mtd_resume(part->master);
}
static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
if (ofs >= mtd->size)
return -EINVAL;
ofs += part->offset;
- return part->master->block_isbad(part->master, ofs);
+ return mtd_block_isbad(part->master, ofs);
}
static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
if (ofs >= mtd->size)
return -EINVAL;
ofs += part->offset;
- res = part->master->block_markbad(part->master, ofs);
+ res = mtd_block_markbad(part->master, ofs);
if (!res)
mtd->ecc_stats.badblocks++;
return res;
uint64_t offs = 0;
while (offs < slave->mtd.size) {
- if (master->block_isbad(master,
- offs + slave->offset))
+ if (mtd_block_isbad(master, offs + slave->offset))
slave->mtd.ecc_stats.badblocks++;
offs += slave->mtd.erasesize;
}
eb->root = NULL;
/* badblocks not supported */
- if (!d->mtd->block_markbad)
+ if (!mtd_can_have_bb(d->mtd))
return 1;
offset = mtdswap_eb_offset(d, eb);
dev_warn(d->dev, "Marking bad block at %08llx\n", offset);
- ret = d->mtd->block_markbad(d->mtd, offset);
+ ret = mtd_block_markbad(d->mtd, offset);
if (ret) {
dev_warn(d->dev, "Mark block bad failed for block at %08llx "
static int mtdswap_read_oob(struct mtdswap_dev *d, loff_t from,
struct mtd_oob_ops *ops)
{
- int ret = d->mtd->read_oob(d->mtd, from, ops);
+ int ret = mtd_read_oob(d->mtd, from, ops);
if (mtd_is_bitflip(ret))
return ret;
offset = mtdswap_eb_offset(d, eb);
/* Check first if the block is bad. */
- if (d->mtd->block_isbad && d->mtd->block_isbad(d->mtd, offset))
+ if (mtd_can_have_bb(d->mtd) && mtd_block_isbad(d->mtd, offset))
return MTDSWAP_SCANNED_BAD;
ops.ooblen = 2 * d->mtd->ecclayout->oobavail;
offset = mtdswap_eb_offset(d, eb) + d->mtd->writesize;
}
- ret = d->mtd->write_oob(d->mtd, offset , &ops);
+ ret = mtd_write_oob(d->mtd, offset, &ops);
if (ret) {
dev_warn(d->dev, "Write OOB failed for block at %08llx "
erase.len = mtd->erasesize;
erase.priv = (u_long)&wq;
- ret = mtd->erase(mtd, &erase);
+ ret = mtd_erase(mtd, &erase);
if (ret) {
if (retries++ < MTDSWAP_ERASE_RETRIES) {
dev_warn(d->dev,
return ret;
writepos = (loff_t)*bp << PAGE_SHIFT;
- ret = mtd->write(mtd, writepos, PAGE_SIZE, &retlen, buf);
+ ret = mtd_write(mtd, writepos, PAGE_SIZE, &retlen, buf);
if (ret == -EIO || mtd_is_eccerr(ret)) {
d->curr_write_pos--;
eb->active_count--;
retries = 0;
retry:
- ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf);
+ ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf);
if (ret < 0 && !mtd_is_bitflip(ret)) {
oldeb = d->eb_data + oldblock / d->pages_per_eblk;
patt = mtdswap_test_patt(test + i);
memset(d->page_buf, patt, mtd->writesize);
memset(d->oob_buf, patt, mtd->ecclayout->oobavail);
- ret = mtd->write_oob(mtd, pos, &ops);
+ ret = mtd_write_oob(mtd, pos, &ops);
if (ret)
goto error;
pos = base;
for (i = 0; i < mtd_pages; i++) {
- ret = mtd->read_oob(mtd, pos, &ops);
+ ret = mtd_read_oob(mtd, pos, &ops);
if (ret)
goto error;
{
struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
- if (d->mtd->sync)
- d->mtd->sync(d->mtd);
+ mtd_sync(d->mtd);
return 0;
}
badcnt = 0;
- if (mtd->block_isbad)
+ if (mtd_can_have_bb(mtd))
for (offset = 0; offset < size; offset += mtd->erasesize)
- if (mtd->block_isbad(mtd, offset))
+ if (mtd_block_isbad(mtd, offset))
badcnt++;
return badcnt;
retries = 0;
retry:
- ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, buf);
+ ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, buf);
d->mtd_read_count++;
if (mtd_is_bitflip(ret)) {
config MTD_NAND_OMAP2
tristate "NAND Flash device on OMAP2, OMAP3 and OMAP4"
- depends on ARM && (ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP4)
+ depends on ARCH_OMAP2PLUS
help
Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4
platforms.
},
};
-static int __init ams_delta_nand_init(void)
-{
- return platform_driver_register(&ams_delta_nand_driver);
-}
-module_init(ams_delta_nand_init);
-
-static void __exit ams_delta_nand_exit(void)
-{
- platform_driver_unregister(&ams_delta_nand_driver);
-}
-module_exit(ams_delta_nand_exit);
+module_platform_driver(ams_delta_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
.resume = bcm_umi_nand_resume,
};
-static int __init nand_init(void)
-{
- return platform_driver_register(&nand_driver);
-}
-
-static void __exit nand_exit(void)
-{
- platform_driver_unregister(&nand_driver);
-}
-
-module_init(nand_init);
-module_exit(nand_exit);
+module_platform_driver(nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Broadcom");
davinci_nand_writel(info, A1CR_OFFSET + info->core_chipsel * 4, val);
- ret = davinci_aemif_setup_timing(info->timing, info->base,
+ ret = 0;
+ if (info->timing)
+ ret = davinci_aemif_setup_timing(info->timing, info->base,
info->core_chipsel);
if (ret < 0) {
dev_dbg(&pdev->dev, "NAND timing values setup fail\n");
size_t retlen;
for (offs = 0; offs < mtd->size; offs += mtd->erasesize) {
- ret = mtd->read(mtd, offs, mtd->writesize, &retlen, buf);
+ ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf);
if (retlen != mtd->writesize)
continue;
if (ret) {
/* Only one mediaheader was found. We want buf to contain a
mediaheader on return, so we'll have to re-read the one we found. */
offs = doc->mh0_page << this->page_shift;
- ret = mtd->read(mtd, offs, mtd->writesize, &retlen, buf);
+ ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf);
if (retlen != mtd->writesize) {
/* Insanity. Give up. */
printk(KERN_ERR "Read DiskOnChip Media Header once, but can't reread it???\n");
elbc_fcm_ctrl->page = page_addr;
- out_be32(&lbc->fbar,
- page_addr >> (chip->phys_erase_shift - chip->page_shift));
-
if (priv->page_size) {
+ /*
+ * large page size chip : FPAR[PI] save the lowest 6 bits,
+ * FBAR[BLK] save the other bits.
+ */
+ out_be32(&lbc->fbar, page_addr >> 6);
out_be32(&lbc->fpar,
((page_addr << FPAR_LP_PI_SHIFT) & FPAR_LP_PI) |
(oob ? FPAR_LP_MS : 0) | column);
buf_num = (page_addr & 1) << 2;
} else {
+ /*
+ * small page size chip : FPAR[PI] save the lowest 5 bits,
+ * FBAR[BLK] save the other bits.
+ */
+ out_be32(&lbc->fbar, page_addr >> 5);
out_be32(&lbc->fpar,
((page_addr << FPAR_SP_PI_SHIFT) & FPAR_SP_PI) |
(oob ? FPAR_SP_MS : 0) | column);
fsl_elbc_run_command(mtd);
return;
- /* READID must read all 5 possible bytes while CEB is active */
case NAND_CMD_READID:
- dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_READID.\n");
+ case NAND_CMD_PARAM:
+ dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD %x\n", command);
out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) |
(FIR_OP_UA << FIR_OP1_SHIFT) |
(FIR_OP_RBW << FIR_OP2_SHIFT));
- out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT);
- /* nand_get_flash_type() reads 8 bytes of entire ID string */
- out_be32(&lbc->fbcr, 8);
- elbc_fcm_ctrl->read_bytes = 8;
+ out_be32(&lbc->fcr, command << FCR_CMD0_SHIFT);
+ /*
+ * although currently it's 8 bytes for READID, we always read
+ * the maximum 256 bytes(for PARAM)
+ */
+ out_be32(&lbc->fbcr, 256);
+ elbc_fcm_ctrl->read_bytes = 256;
elbc_fcm_ctrl->use_mdr = 1;
- elbc_fcm_ctrl->mdr = 0;
-
+ elbc_fcm_ctrl->mdr = column;
set_addr(mtd, 0, 0, 0);
fsl_elbc_run_command(mtd);
return;
page_addr, column);
elbc_fcm_ctrl->column = column;
- elbc_fcm_ctrl->oob = 0;
elbc_fcm_ctrl->use_mdr = 1;
+ if (column >= mtd->writesize) {
+ /* OOB area */
+ column -= mtd->writesize;
+ elbc_fcm_ctrl->oob = 1;
+ } else {
+ WARN_ON(column != 0);
+ elbc_fcm_ctrl->oob = 0;
+ }
+
fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
(NAND_CMD_SEQIN << FCR_CMD2_SHIFT) |
(NAND_CMD_PAGEPROG << FCR_CMD3_SHIFT);
(FIR_OP_CW1 << FIR_OP6_SHIFT) |
(FIR_OP_RS << FIR_OP7_SHIFT));
- if (column >= mtd->writesize) {
+ if (elbc_fcm_ctrl->oob)
/* OOB area --> READOOB */
- column -= mtd->writesize;
fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT;
- elbc_fcm_ctrl->oob = 1;
- } else {
- WARN_ON(column != 0);
+ else
/* First 256 bytes --> READ0 */
fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
- }
}
out_be32(&lbc->fcr, fcr);
*/
if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 ||
elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize)
- out_be32(&lbc->fbcr, elbc_fcm_ctrl->index);
+ out_be32(&lbc->fbcr,
+ elbc_fcm_ctrl->index - elbc_fcm_ctrl->column);
else
out_be32(&lbc->fbcr, 0);
if (chip->pagemask & 0xff000000)
al++;
- /* add to ECCM mode set in fsl_elbc_init */
- priv->fmr |= (12 << FMR_CWTO_SHIFT) | /* Timeout > 12 ms */
- (al << FMR_AL_SHIFT);
+ priv->fmr |= al << FMR_AL_SHIFT;
dev_dbg(priv->dev, "fsl_elbc_init: nand->numchips = %d\n",
chip->numchips);
priv->mtd.priv = chip;
priv->mtd.owner = THIS_MODULE;
- /* Set the ECCM according to the settings in bootloader.*/
- priv->fmr = in_be32(&lbc->fmr) & FMR_ECCM;
+ /* set timeout to maximum */
+ priv->fmr = 15 << FMR_CWTO_SHIFT;
+ if (in_be32(&lbc->bank[priv->bank].or) & OR_FCM_PGS)
+ priv->fmr |= FMR_ECCM;
/* fill in nand_chip structure */
/* set up function call table */
.remove = fsl_elbc_nand_remove,
};
-static int __init fsl_elbc_nand_init(void)
-{
- return platform_driver_register(&fsl_elbc_nand_driver);
-}
-
-static void __exit fsl_elbc_nand_exit(void)
-{
- platform_driver_unregister(&fsl_elbc_nand_driver);
-}
-
-module_init(fsl_elbc_nand_init);
-module_exit(fsl_elbc_nand_exit);
+module_platform_driver(fsl_elbc_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Freescale");
.remove = __devexit_p(fun_remove),
};
-static int __init fun_module_init(void)
-{
- return platform_driver_register(&of_fun_driver);
-}
-module_init(fun_module_init);
-
-static void __exit fun_module_exit(void)
-{
- platform_driver_unregister(&of_fun_driver);
-}
-module_exit(fun_module_exit);
+module_platform_driver(of_fun_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/nand-gpio.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
struct gpiomtd {
void __iomem *io_sync;
return gpio_get_value(gpiomtd->plat.gpio_rdy);
}
+#ifdef CONFIG_OF
+static const struct of_device_id gpio_nand_id_table[] = {
+ { .compatible = "gpio-control-nand" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, gpio_nand_id_table);
+
+static int gpio_nand_get_config_of(const struct device *dev,
+ struct gpio_nand_platdata *plat)
+{
+ u32 val;
+
+ if (!of_property_read_u32(dev->of_node, "bank-width", &val)) {
+ if (val == 2) {
+ plat->options |= NAND_BUSWIDTH_16;
+ } else if (val != 1) {
+ dev_err(dev, "invalid bank-width %u\n", val);
+ return -EINVAL;
+ }
+ }
+
+ plat->gpio_rdy = of_get_gpio(dev->of_node, 0);
+ plat->gpio_nce = of_get_gpio(dev->of_node, 1);
+ plat->gpio_ale = of_get_gpio(dev->of_node, 2);
+ plat->gpio_cle = of_get_gpio(dev->of_node, 3);
+ plat->gpio_nwp = of_get_gpio(dev->of_node, 4);
+
+ if (!of_property_read_u32(dev->of_node, "chip-delay", &val))
+ plat->chip_delay = val;
+
+ return 0;
+}
+
+static struct resource *gpio_nand_get_io_sync_of(struct platform_device *pdev)
+{
+ struct resource *r = devm_kzalloc(&pdev->dev, sizeof(*r), GFP_KERNEL);
+ u64 addr;
+
+ if (!r || of_property_read_u64(pdev->dev.of_node,
+ "gpio-control-nand,io-sync-reg", &addr))
+ return NULL;
+
+ r->start = addr;
+ r->end = r->start + 0x3;
+ r->flags = IORESOURCE_MEM;
+
+ return r;
+}
+#else /* CONFIG_OF */
+#define gpio_nand_id_table NULL
+static inline int gpio_nand_get_config_of(const struct device *dev,
+ struct gpio_nand_platdata *plat)
+{
+ return -ENOSYS;
+}
+
+static inline struct resource *
+gpio_nand_get_io_sync_of(struct platform_device *pdev)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
+static inline int gpio_nand_get_config(const struct device *dev,
+ struct gpio_nand_platdata *plat)
+{
+ int ret = gpio_nand_get_config_of(dev, plat);
+
+ if (!ret)
+ return ret;
+
+ if (dev->platform_data) {
+ memcpy(plat, dev->platform_data, sizeof(*plat));
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline struct resource *
+gpio_nand_get_io_sync(struct platform_device *pdev)
+{
+ struct resource *r = gpio_nand_get_io_sync_of(pdev);
+
+ if (r)
+ return r;
+
+ return platform_get_resource(pdev, IORESOURCE_MEM, 1);
+}
+
static int __devexit gpio_nand_remove(struct platform_device *dev)
{
struct gpiomtd *gpiomtd = platform_get_drvdata(dev);
nand_release(&gpiomtd->mtd_info);
- res = platform_get_resource(dev, IORESOURCE_MEM, 1);
+ res = gpio_nand_get_io_sync(dev);
iounmap(gpiomtd->io_sync);
if (res)
release_mem_region(res->start, resource_size(res));
struct gpiomtd *gpiomtd;
struct nand_chip *this;
struct resource *res0, *res1;
- int ret;
+ struct mtd_part_parser_data ppdata = {};
+ int ret = 0;
- if (!dev->dev.platform_data)
+ if (!dev->dev.of_node && !dev->dev.platform_data)
return -EINVAL;
res0 = platform_get_resource(dev, IORESOURCE_MEM, 0);
goto err_map;
}
- res1 = platform_get_resource(dev, IORESOURCE_MEM, 1);
+ res1 = gpio_nand_get_io_sync(dev);
if (res1) {
gpiomtd->io_sync = request_and_remap(res1, 4, "NAND sync", &ret);
if (!gpiomtd->io_sync) {
}
}
- memcpy(&gpiomtd->plat, dev->dev.platform_data, sizeof(gpiomtd->plat));
+ ret = gpio_nand_get_config(&dev->dev, &gpiomtd->plat);
+ if (ret)
+ goto err_nce;
ret = gpio_request(gpiomtd->plat.gpio_nce, "NAND NCE");
if (ret)
gpiomtd->plat.adjust_parts(&gpiomtd->plat,
gpiomtd->mtd_info.size);
- mtd_device_register(&gpiomtd->mtd_info, gpiomtd->plat.parts,
- gpiomtd->plat.num_parts);
+ ppdata.of_node = dev->dev.of_node;
+ ret = mtd_device_parse_register(&gpiomtd->mtd_info, NULL, &ppdata,
+ gpiomtd->plat.parts,
+ gpiomtd->plat.num_parts);
+ if (ret)
+ goto err_wp;
platform_set_drvdata(dev, gpiomtd);
return 0;
.remove = gpio_nand_remove,
.driver = {
.name = "gpio-nand",
+ .of_match_table = gpio_nand_id_table,
},
};
},
};
-static int __init jz_nand_init(void)
-{
- return platform_driver_register(&jz_nand_driver);
-}
-module_init(jz_nand_init);
-
-static void __exit jz_nand_exit(void)
-{
- platform_driver_unregister(&jz_nand_driver);
-}
-module_exit(jz_nand_exit);
+module_platform_driver(jz_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
},
};
-static int __init mpc5121_nfc_init(void)
-{
- return platform_driver_register(&mpc5121_nfc_driver);
-}
-
-module_init(mpc5121_nfc_init);
-
-static void __exit mpc5121_nfc_cleanup(void)
-{
- platform_driver_unregister(&mpc5121_nfc_driver);
-}
-
-module_exit(mpc5121_nfc_cleanup);
+module_platform_driver(mpc5121_nfc_driver);
MODULE_AUTHOR("Freescale Semiconductor, Inc.");
MODULE_DESCRIPTION("MPC5121 NAND MTD driver");
* Bad block marker is stored in the last page of each block
* on Samsung and Hynix MLC devices; stored in first two pages
* of each block on Micron devices with 2KiB pages and on
- * SLC Samsung, Hynix, Toshiba and AMD/Spansion. All others scan
- * only the first page.
+ * SLC Samsung, Hynix, Toshiba, AMD/Spansion, and Macronix.
+ * All others scan only the first page.
*/
if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
(*maf_id == NAND_MFR_SAMSUNG ||
(*maf_id == NAND_MFR_SAMSUNG ||
*maf_id == NAND_MFR_HYNIX ||
*maf_id == NAND_MFR_TOSHIBA ||
- *maf_id == NAND_MFR_AMD)) ||
+ *maf_id == NAND_MFR_AMD ||
+ *maf_id == NAND_MFR_MACRONIX)) ||
(mtd->writesize == 2048 &&
*maf_id == NAND_MFR_MICRON))
chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
from += marker_len;
marker_len = 0;
}
- res = mtd->read(mtd, from, len, &retlen, buf);
+ res = mtd_read(mtd, from, len, &retlen, buf);
if (res < 0) {
if (mtd_is_eccerr(res)) {
pr_info("nand_bbt: ECC error in BBT at "
if (td->options & NAND_BBT_VERSION)
len++;
- return mtd->read(mtd, offs, len, &retlen, buf);
+ return mtd_read(mtd, offs, len, &retlen, buf);
}
/* Scan read raw data from flash */
ops.len = min(len, (size_t)mtd->writesize);
ops.oobbuf = buf + ops.len;
- res = mtd->read_oob(mtd, offs, &ops);
+ res = mtd_read_oob(mtd, offs, &ops);
if (res)
return res;
ops.oobbuf = oob;
ops.len = len;
- return mtd->write_oob(mtd, offs, &ops);
+ return mtd_write_oob(mtd, offs, &ops);
}
static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
* Read the full oob until read_oob is fixed to handle single
* byte reads for 16 bit buswidth.
*/
- ret = mtd->read_oob(mtd, offs, &ops);
+ ret = mtd_read_oob(mtd, offs, &ops);
/* Ignore ECC errors when checking for BBM */
if (ret && !mtd_is_bitflip_or_eccerr(ret))
return ret;
/* Make it block aligned */
to &= ~((loff_t)((1 << this->bbt_erase_shift) - 1));
len = 1 << this->bbt_erase_shift;
- res = mtd->read(mtd, to, len, &retlen, buf);
+ res = mtd_read(mtd, to, len, &retlen, buf);
if (res < 0) {
if (retlen != len) {
pr_info("nand_bbt: error reading block "
/* Read oob data */
ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
ops.oobbuf = &buf[len];
- res = mtd->read_oob(mtd, to + mtd->writesize, &ops);
+ res = mtd_read_oob(mtd, to + mtd->writesize, &ops);
if (res < 0 || ops.oobretlen != ops.ooblen)
goto outerr;
#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY | NAND_NO_AUTOINCR)
#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
- /*512 Megabit */
+ /* 512 Megabit */
{"NAND 64MiB 1,8V 8-bit", 0xA2, 0, 64, 0, LP_OPTIONS},
{"NAND 64MiB 1,8V 8-bit", 0xA0, 0, 64, 0, LP_OPTIONS},
{"NAND 64MiB 3,3V 8-bit", 0xF2, 0, 64, 0, LP_OPTIONS},
{"NAND 64MiB 3,3V 8-bit", 0xD0, 0, 64, 0, LP_OPTIONS},
+ {"NAND 64MiB 3,3V 8-bit", 0xF0, 0, 64, 0, LP_OPTIONS},
{"NAND 64MiB 1,8V 16-bit", 0xB2, 0, 64, 0, LP_OPTIONS16},
{"NAND 64MiB 1,8V 16-bit", 0xB0, 0, 64, 0, LP_OPTIONS16},
{"NAND 64MiB 3,3V 16-bit", 0xC2, 0, 64, 0, LP_OPTIONS16},
{NAND_MFR_HYNIX, "Hynix"},
{NAND_MFR_MICRON, "Micron"},
{NAND_MFR_AMD, "AMD"},
+ {NAND_MFR_MACRONIX, "Macronix"},
{0x0, "Unknown"}
};
return -EINVAL;
}
offset = erase_block_no * ns->geom.secsz;
- if (mtd->block_markbad(mtd, offset)) {
+ if (mtd_block_markbad(mtd, offset)) {
NS_ERR("invalid badblocks.\n");
return -EINVAL;
}
.remove = __devexit_p(ndfc_remove),
};
-static int __init ndfc_nand_init(void)
-{
- return platform_driver_register(&ndfc_driver);
-}
-
-static void __exit ndfc_nand_exit(void)
-{
- platform_driver_unregister(&ndfc_driver);
-}
-
-module_init(ndfc_nand_init);
-module_exit(ndfc_nand_exit);
+module_platform_driver(ndfc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
struct nomadik_nand_host *host = dev_get_drvdata(dev);
int ret = 0;
if (host)
- ret = host->mtd.suspend(&host->mtd);
+ ret = mtd_suspend(&host->mtd);
return ret;
}
{
struct nomadik_nand_host *host = dev_get_drvdata(dev);
if (host)
- host->mtd.resume(&host->mtd);
+ mtd_resume(&host->mtd);
return 0;
}
},
};
-static int __init nand_nomadik_init(void)
-{
- pr_info("Nomadik NAND driver\n");
- return platform_driver_register(&nomadik_nand_driver);
-}
-
-static void __exit nand_nomadik_exit(void)
-{
- platform_driver_unregister(&nomadik_nand_driver);
-}
-
-module_init(nand_nomadik_init);
-module_exit(nand_nomadik_exit);
+module_platform_driver(nomadik_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("ST Microelectronics (sachin.verma@st.com)");
},
};
-static int __init nuc900_nand_init(void)
-{
- return platform_driver_register(&nuc900_nand_driver);
-}
-
-static void __exit nuc900_nand_exit(void)
-{
- platform_driver_unregister(&nuc900_nand_driver);
-}
-
-module_init(nuc900_nand_init);
-module_exit(nuc900_nand_exit);
+module_platform_driver(nuc900_nand_driver);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!");
},
};
-static int __init omap_nand_init(void)
-{
- pr_info("%s driver initializing\n", DRIVER_NAME);
-
- return platform_driver_register(&omap_nand_driver);
-}
-
-static void __exit omap_nand_exit(void)
-{
- platform_driver_unregister(&omap_nand_driver);
-}
-
-module_init(omap_nand_init);
-module_exit(omap_nand_exit);
+module_platform_driver(omap_nand_driver);
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_LICENSE("GPL");
.remove = pasemi_nand_remove,
};
-static int __init pasemi_nand_init(void)
-{
- return platform_driver_register(&pasemi_nand_driver);
-}
-module_init(pasemi_nand_init);
-
-static void __exit pasemi_nand_exit(void)
-{
- platform_driver_unregister(&pasemi_nand_driver);
-}
-module_exit(pasemi_nand_exit);
+module_platform_driver(pasemi_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
},
};
-static int __init plat_nand_init(void)
-{
- return platform_driver_register(&plat_nand_driver);
-}
-
-static void __exit plat_nand_exit(void)
-{
- platform_driver_unregister(&plat_nand_driver);
-}
-
-module_init(plat_nand_init);
-module_exit(plat_nand_exit);
+module_platform_driver(plat_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vitaly Wool");
for (cs = 0; cs < pdata->num_cs; cs++) {
mtd = info->host[cs]->mtd;
- mtd->suspend(mtd);
+ mtd_suspend(mtd);
}
return 0;
nand_writel(info, NDSR, NDSR_MASK);
for (cs = 0; cs < pdata->num_cs; cs++) {
mtd = info->host[cs]->mtd;
- mtd->resume(mtd);
+ mtd_resume(mtd);
}
return 0;
.resume = pxa3xx_nand_resume,
};
-static int __init pxa3xx_nand_init(void)
-{
- return platform_driver_register(&pxa3xx_nand_driver);
-}
-module_init(pxa3xx_nand_init);
-
-static void __exit pxa3xx_nand_exit(void)
-{
- platform_driver_unregister(&pxa3xx_nand_driver);
-}
-module_exit(pxa3xx_nand_exit);
+module_platform_driver(pxa3xx_nand_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PXA3xx NAND controller driver");
.remove = __devexit_p(sharpsl_nand_remove),
};
-static int __init sharpsl_nand_init(void)
-{
- return platform_driver_register(&sharpsl_nand_driver);
-}
-module_init(sharpsl_nand_init);
-
-static void __exit sharpsl_nand_exit(void)
-{
- platform_driver_unregister(&sharpsl_nand_driver);
-}
-module_exit(sharpsl_nand_exit);
+module_platform_driver(sharpsl_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
ops.datbuf = NULL;
- ret = mtd->write_oob(mtd, ofs, &ops);
+ ret = mtd_write_oob(mtd, ofs, &ops);
if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) {
printk(KERN_NOTICE
"sm_common: can't mark sector at %i as bad\n",
.remove = __devexit_p(socrates_nand_remove),
};
-static int __init socrates_nand_init(void)
-{
- return platform_driver_register(&socrates_nand_driver);
-}
-
-static void __exit socrates_nand_exit(void)
-{
- platform_driver_unregister(&socrates_nand_driver);
-}
-
-module_init(socrates_nand_init);
-module_exit(socrates_nand_exit);
+module_platform_driver(socrates_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ilya Yanok");
.resume = tmio_resume,
};
-static int __init tmio_init(void)
-{
- return platform_driver_register(&tmio_driver);
-}
-
-static void __exit tmio_exit(void)
-{
- platform_driver_unregister(&tmio_driver);
-}
-
-module_init(tmio_init);
-module_exit(tmio_exit);
+module_platform_driver(tmio_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Ian Molton, Dirk Opfer, Chris Humbert, Dmitry Baryshkov");
drvdata = devm_kzalloc(&dev->dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- if (!devm_request_mem_region(&dev->dev, res->start,
- resource_size(res), dev_name(&dev->dev)))
- return -EBUSY;
- drvdata->base = devm_ioremap(&dev->dev, res->start,
- resource_size(res));
+ drvdata->base = devm_request_and_ioremap(&dev->dev, res);
if (!drvdata->base)
return -EBUSY;
if (memcmp(mtd->name, "DiskOnChip", 10))
return;
- if (!mtd->block_isbad) {
+ if (!mtd_can_have_bb(mtd)) {
printk(KERN_ERR
"NFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
"Please use the new diskonchip driver under the NAND subsystem.\n");
ops.oobbuf = buf;
ops.datbuf = NULL;
- res = mtd->read_oob(mtd, offs & ~mask, &ops);
+ res = mtd_read_oob(mtd, offs & ~mask, &ops);
*retlen = ops.oobretlen;
return res;
}
ops.oobbuf = buf;
ops.datbuf = NULL;
- res = mtd->write_oob(mtd, offs & ~mask, &ops);
+ res = mtd_write_oob(mtd, offs & ~mask, &