[MTD] Unlocking all Intel flash that is locked on power up.
[deliverable/linux.git] / drivers / mtd / chips / cfi_cmdset_0001.c
index f334959a335ba0fc3bcb90f5042be352f871a14b..8189adfefaef5db6c26b81ae4b329d9fcee1ff16 100644 (file)
@@ -15,6 +15,8 @@
  *     - optimized write buffer method
  * 02/05/2002  Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
  *     - reworked lock/unlock/erase support for var size flash
+ * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
+ *     - auto unlock sectors on resume for auto locking flash on power up
  */
 
 #include <linux/module.h>
@@ -30,6 +32,7 @@
 #include <linux/delay.h>
 #include <linux/interrupt.h>
 #include <linux/reboot.h>
+#include <linux/bitmap.h>
 #include <linux/mtd/xip.h>
 #include <linux/mtd/map.h>
 #include <linux/mtd/mtd.h>
@@ -47,6 +50,7 @@
 #define I82802AC       0x00ac
 #define MANUFACTURER_ST         0x0020
 #define M50LPW080       0x002F
+#define AT49BV640D     0x02de
 
 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@@ -82,6 +86,7 @@ static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
                        size_t len);
 
+static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
 #include "fwh_lock.h"
@@ -153,6 +158,47 @@ static void cfi_tell_features(struct cfi_pri_intelext *extp)
 }
 #endif
 
+/* Atmel chips don't use the same PRI format as Intel chips */
+static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
+{
+       struct map_info *map = mtd->priv;
+       struct cfi_private *cfi = map->fldrv_priv;
+       struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+       struct cfi_pri_atmel atmel_pri;
+       uint32_t features = 0;
+
+       /* Reverse byteswapping */
+       extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
+       extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
+       extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
+
+       memcpy(&atmel_pri, extp, sizeof(atmel_pri));
+       memset((char *)extp + 5, 0, sizeof(*extp) - 5);
+
+       printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
+
+       if (atmel_pri.Features & 0x01) /* chip erase supported */
+               features |= (1<<0);
+       if (atmel_pri.Features & 0x02) /* erase suspend supported */
+               features |= (1<<1);
+       if (atmel_pri.Features & 0x04) /* program suspend supported */
+               features |= (1<<2);
+       if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
+               features |= (1<<9);
+       if (atmel_pri.Features & 0x20) /* page mode read supported */
+               features |= (1<<7);
+       if (atmel_pri.Features & 0x40) /* queued erase supported */
+               features |= (1<<4);
+       if (atmel_pri.Features & 0x80) /* Protection bits supported */
+               features |= (1<<6);
+
+       extp->FeatureSupport = features;
+
+       /* burst write mode not supported */
+       cfi->cfiq->BufWriteTimeoutTyp = 0;
+       cfi->cfiq->BufWriteTimeoutMax = 0;
+}
+
 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
@@ -220,7 +266,23 @@ static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
        }
 }
 
+/*
+ * Some chips power-up with all sectors locked by default.
+ */
+static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
+{
+       struct map_info *map = mtd->priv;
+       struct cfi_private *cfi = map->fldrv_priv;
+       struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
+
+       if (cfip->FeatureSupport&32) {
+               printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
+               mtd->flags |= MTD_POWERUP_LOCK;
+       }
+}
+
 static struct cfi_fixup cfi_fixup_table[] = {
+       { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
        { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
 #endif
@@ -232,6 +294,7 @@ static struct cfi_fixup cfi_fixup_table[] = {
 #endif
        { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
        { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
+       { MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
        { 0, 0, NULL, NULL }
 };
 
@@ -263,7 +326,7 @@ read_pri_intelext(struct map_info *map, __u16 adr)
                return NULL;
 
        if (extp->MajorVersion != '1' ||
-           (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
+           (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
                printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
                       "version %c.%c.\n",  extp->MajorVersion,
                       extp->MinorVersion);
@@ -460,6 +523,7 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
+                       mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
                }
                offset += (ersize * ernum);
        }
@@ -512,7 +576,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 
        /*
-        * Probing of multi-partition flash ships.
+        * Probing of multi-partition flash chips.
         *
         * To support multiple partitions when available, we simply arrange
         * for each of them to have their own flchip structure even if they
@@ -627,73 +691,13 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
 /*
  *  *********** CHIP ACCESS FUNCTIONS ***********
  */
-
-static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
+static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
 {
        DECLARE_WAITQUEUE(wait, current);
        struct cfi_private *cfi = map->fldrv_priv;
        map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
-       unsigned long timeo;
        struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
-
- resettime:
-       timeo = jiffies + HZ;
- retry:
-       if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
-               /*
-                * OK. We have possibility for contension on the write/erase
-                * operations which are global to the real chip and not per
-                * partition.  So let's fight it over in the partition which
-                * currently has authority on the operation.
-                *
-                * The rules are as follows:
-                *
-                * - any write operation must own shared->writing.
-                *
-                * - any erase operation must own _both_ shared->writing and
-                *   shared->erasing.
-                *
-                * - contension arbitration is handled in the owner's context.
-                *
-                * The 'shared' struct can be read and/or written only when
-                * its lock is taken.
-                */
-               struct flchip_shared *shared = chip->priv;
-               struct flchip *contender;
-               spin_lock(&shared->lock);
-               contender = shared->writing;
-               if (contender && contender != chip) {
-                       /*
-                        * The engine to perform desired operation on this
-                        * partition is already in use by someone else.
-                        * Let's fight over it in the context of the chip
-                        * currently using it.  If it is possible to suspend,
-                        * that other partition will do just that, otherwise
-                        * it'll happily send us to sleep.  In any case, when
-                        * get_chip returns success we're clear to go ahead.
-                        */
-                       int ret = spin_trylock(contender->mutex);
-                       spin_unlock(&shared->lock);
-                       if (!ret)
-                               goto retry;
-                       spin_unlock(chip->mutex);
-                       ret = get_chip(map, contender, contender->start, mode);
-                       spin_lock(chip->mutex);
-                       if (ret) {
-                               spin_unlock(contender->mutex);
-                               return ret;
-                       }
-                       timeo = jiffies + HZ;
-                       spin_lock(&shared->lock);
-                       spin_unlock(contender->mutex);
-               }
-
-               /* We now own it */
-               shared->writing = chip;
-               if (mode == FL_ERASING)
-                       shared->erasing = chip;
-               spin_unlock(&shared->lock);
-       }
+       unsigned long timeo = jiffies + HZ;
 
        switch (chip->state) {
 
@@ -708,16 +712,11 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
                        if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
                                break;
 
-                       if (time_after(jiffies, timeo)) {
-                               printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
-                                      map->name, status.x[0]);
-                               return -EIO;
-                       }
                        spin_unlock(chip->mutex);
                        cfi_udelay(1);
                        spin_lock(chip->mutex);
                        /* Someone else might have been playing with it. */
-                       goto retry;
+                       return -EAGAIN;
                }
 
        case FL_READY:
@@ -784,6 +783,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
                if (mode == FL_READY && chip->oldstate == FL_READY)
                        return 0;
 
+       case FL_SHUTDOWN:
+               /* The machine is rebooting now,so no one can get chip anymore */
+               return -EIO;
        default:
        sleep:
                set_current_state(TASK_UNINTERRUPTIBLE);
@@ -792,8 +794,95 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
                schedule();
                remove_wait_queue(&chip->wq, &wait);
                spin_lock(chip->mutex);
-               goto resettime;
+               return -EAGAIN;
+       }
+}
+
+static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
+{
+       int ret;
+       DECLARE_WAITQUEUE(wait, current);
+
+ retry:
+       if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING
+                          || mode == FL_OTP_WRITE || mode == FL_SHUTDOWN)) {
+               /*
+                * OK. We have possibility for contention on the write/erase
+                * operations which are global to the real chip and not per
+                * partition.  So let's fight it over in the partition which
+                * currently has authority on the operation.
+                *
+                * The rules are as follows:
+                *
+                * - any write operation must own shared->writing.
+                *
+                * - any erase operation must own _both_ shared->writing and
+                *   shared->erasing.
+                *
+                * - contention arbitration is handled in the owner's context.
+                *
+                * The 'shared' struct can be read and/or written only when
+                * its lock is taken.
+                */
+               struct flchip_shared *shared = chip->priv;
+               struct flchip *contender;
+               spin_lock(&shared->lock);
+               contender = shared->writing;
+               if (contender && contender != chip) {
+                       /*
+                        * The engine to perform desired operation on this
+                        * partition is already in use by someone else.
+                        * Let's fight over it in the context of the chip
+                        * currently using it.  If it is possible to suspend,
+                        * that other partition will do just that, otherwise
+                        * it'll happily send us to sleep.  In any case, when
+                        * get_chip returns success we're clear to go ahead.
+                        */
+                       ret = spin_trylock(contender->mutex);
+                       spin_unlock(&shared->lock);
+                       if (!ret)
+                               goto retry;
+                       spin_unlock(chip->mutex);
+                       ret = chip_ready(map, contender, contender->start, mode);
+                       spin_lock(chip->mutex);
+
+                       if (ret == -EAGAIN) {
+                               spin_unlock(contender->mutex);
+                               goto retry;
+                       }
+                       if (ret) {
+                               spin_unlock(contender->mutex);
+                               return ret;
+                       }
+                       spin_lock(&shared->lock);
+                       spin_unlock(contender->mutex);
+               }
+
+               /* Check if we already have suspended erase
+                * on this chip. Sleep. */
+               if (mode == FL_ERASING && shared->erasing
+                   && shared->erasing->oldstate == FL_ERASING) {
+                       spin_unlock(&shared->lock);
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       add_wait_queue(&chip->wq, &wait);
+                       spin_unlock(chip->mutex);
+                       schedule();
+                       remove_wait_queue(&chip->wq, &wait);
+                       spin_lock(chip->mutex);
+                       goto retry;
+               }
+
+               /* We now own it */
+               shared->writing = chip;
+               if (mode == FL_ERASING)
+                       shared->erasing = chip;
+               spin_unlock(&shared->lock);
        }
+       ret = chip_ready(map, chip, adr, mode);
+       if (ret == -EAGAIN)
+               goto retry;
+
+       return ret;
 }
 
 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
@@ -1152,28 +1241,34 @@ static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, si
 {
        struct map_info *map = mtd->priv;
        struct cfi_private *cfi = map->fldrv_priv;
-       unsigned long ofs;
+       unsigned long ofs, last_end = 0;
        int chipnum;
        int ret = 0;
 
        if (!map->virt || (from + len > mtd->size))
                return -EINVAL;
 
-       *mtdbuf = (void *)map->virt + from;
-       *retlen = 0;
-
        /* Now lock the chip(s) to POINT state */
 
        /* ofs: offset within the first chip that the first read should start */
        chipnum = (from >> cfi->chipshift);
        ofs = from - (chipnum << cfi->chipshift);
 
+       *mtdbuf = (void *)map->virt + cfi->chips[chipnum].start + ofs;
+       *retlen = 0;
+
        while (len) {
                unsigned long thislen;
 
                if (chipnum >= cfi->numchips)
                        break;
 
+               /* We cannot point across chips that are virtually disjoint */
+               if (!last_end)
+                       last_end = cfi->chips[chipnum].start;
+               else if (cfi->chips[chipnum].start != last_end)
+                       break;
+
                if ((len + ofs -1) >> cfi->chipshift)
                        thislen = (1<<cfi->chipshift) - ofs;
                else
@@ -1187,6 +1282,7 @@ static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, si
                len -= thislen;
 
                ofs = 0;
+               last_end += 1 << cfi->chipshift;
                chipnum++;
        }
        return 0;
@@ -1766,7 +1862,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
        return ret;
 }
 
-int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
+static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
 {
        unsigned long ofs, len;
        int ret;
@@ -1825,8 +1921,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
        }
 }
 
-#ifdef DEBUG_LOCK_BITS
-static int __xipram do_printlockstatus_oneblock(struct map_info *map,
+static int __xipram do_getlockstatus_oneblock(struct map_info *map,
                                                struct flchip *chip,
                                                unsigned long adr,
                                                int len, void *thunk)
@@ -1840,8 +1935,17 @@ static int __xipram do_printlockstatus_oneblock(struct map_info *map,
        chip->state = FL_JEDEC_QUERY;
        status = cfi_read_query(map, adr+(2*ofs_factor));
        xip_enable(map, chip, 0);
+       return status;
+}
+
+#ifdef DEBUG_LOCK_BITS
+static int __xipram do_printlockstatus_oneblock(struct map_info *map,
+                                               struct flchip *chip,
+                                               unsigned long adr,
+                                               int len, void *thunk)
+{
        printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
-              adr, status);
+              adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
        return 0;
 }
 #endif
@@ -1908,7 +2012,7 @@ static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
        printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
               __FUNCTION__, ofs, len);
        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
-               ofs, len, 0);
+               ofs, len, NULL);
 #endif
 
        ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
@@ -1918,7 +2022,7 @@ static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
        printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
               __FUNCTION__, ret);
        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
-               ofs, len, 0);
+               ofs, len, NULL);
 #endif
 
        return ret;
@@ -1932,7 +2036,7 @@ static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
        printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
               __FUNCTION__, ofs, len);
        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
-               ofs, len, 0);
+               ofs, len, NULL);
 #endif
 
        ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
@@ -1942,7 +2046,7 @@ static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
        printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
               __FUNCTION__, ret);
        cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
-               ofs, len, 0);
+               ofs, len, NULL);
 #endif
 
        return ret;
@@ -2216,14 +2320,45 @@ static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
 
 #endif
 
+static void cfi_intelext_save_locks(struct mtd_info *mtd)
+{
+       struct mtd_erase_region_info *region;
+       int block, status, i;
+       unsigned long adr;
+       size_t len;
+
+       for (i = 0; i < mtd->numeraseregions; i++) {
+               region = &mtd->eraseregions[i];
+               if (!region->lockmap)
+                       continue;
+
+               for (block = 0; block < region->numblocks; block++){
+                       len = region->erasesize;
+                       adr = region->offset + block * len;
+
+                       status = cfi_varsize_frob(mtd,
+                                       do_getlockstatus_oneblock, adr, len, NULL);
+                       if (status)
+                               set_bit(block, region->lockmap);
+                       else
+                               clear_bit(block, region->lockmap);
+               }
+       }
+}
+
 static int cfi_intelext_suspend(struct mtd_info *mtd)
 {
        struct map_info *map = mtd->priv;
        struct cfi_private *cfi = map->fldrv_priv;
+       struct cfi_pri_intelext *extp = cfi->cmdset_priv;
        int i;
        struct flchip *chip;
        int ret = 0;
 
+       if ((mtd->flags & MTD_POWERUP_LOCK)
+           && extp && (extp->FeatureSupport & (1 << 5)))
+               cfi_intelext_save_locks(mtd);
+
        for (i=0; !ret && i<cfi->numchips; i++) {
                chip = &cfi->chips[i];
 
@@ -2285,10 +2420,33 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
        return ret;
 }
 
+static void cfi_intelext_restore_locks(struct mtd_info *mtd)
+{
+       struct mtd_erase_region_info *region;
+       int block, i;
+       unsigned long adr;
+       size_t len;
+
+       for (i = 0; i < mtd->numeraseregions; i++) {
+               region = &mtd->eraseregions[i];
+               if (!region->lockmap)
+                       continue;
+
+               for (block = 0; block < region->numblocks; block++) {
+                       len = region->erasesize;
+                       adr = region->offset + block * len;
+
+                       if (!test_bit(block, region->lockmap))
+                               cfi_intelext_unlock(mtd, adr, len);
+               }
+       }
+}
+
 static void cfi_intelext_resume(struct mtd_info *mtd)
 {
        struct map_info *map = mtd->priv;
        struct cfi_private *cfi = map->fldrv_priv;
+       struct cfi_pri_intelext *extp = cfi->cmdset_priv;
        int i;
        struct flchip *chip;
 
@@ -2307,6 +2465,10 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
 
                spin_unlock(chip->mutex);
        }
+
+       if ((mtd->flags & MTD_POWERUP_LOCK)
+           && extp && (extp->FeatureSupport & (1 << 5)))
+               cfi_intelext_restore_locks(mtd);
 }
 
 static int cfi_intelext_reset(struct mtd_info *mtd)
@@ -2322,10 +2484,10 @@ static int cfi_intelext_reset(struct mtd_info *mtd)
                   and switch to array mode so any bootloader in
                   flash is accessible for soft reboot. */
                spin_lock(chip->mutex);
-               ret = get_chip(map, chip, chip->start, FL_SYNCING);
+               ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
                if (!ret) {
                        map_write(map, CMD(0xff), chip->start);
-                       chip->state = FL_READY;
+                       chip->state = FL_SHUTDOWN;
                }
                spin_unlock(chip->mutex);
        }
@@ -2347,12 +2509,19 @@ static void cfi_intelext_destroy(struct mtd_info *mtd)
 {
        struct map_info *map = mtd->priv;
        struct cfi_private *cfi = map->fldrv_priv;
+       struct mtd_erase_region_info *region;
+       int i;
        cfi_intelext_reset(mtd);
        unregister_reboot_notifier(&mtd->reboot_notifier);
        kfree(cfi->cmdset_priv);
        kfree(cfi->cfiq);
        kfree(cfi->chips[0].priv);
        kfree(cfi);
+       for (i = 0; i < mtd->numeraseregions; i++) {
+               region = &mtd->eraseregions[i];
+               if (region->lockmap)
+                       kfree(region->lockmap);
+       }
        kfree(mtd->eraseregions);
 }
 
This page took 0.034266 seconds and 5 git commands to generate.