[MTD] ESB2 check for closed ROM window
[deliverable/linux.git] / drivers / mtd / chips / cfi_cmdset_0002.c
CommitLineData
1da177e4
LT
1/*
2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4 *
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
02b15e34 7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
1da177e4
LT
8 *
9 * 2_by_8 routines added by Simon Munton
10 *
11 * 4_by_16 work by Carolyn J. Smith
12 *
1f948b43 13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
02b15e34 14 * by Nicolas Pitre)
1f948b43 15 *
1da177e4
LT
16 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
17 *
18 * This code is GPL
19 *
1f948b43 20 * $Id: cfi_cmdset_0002.c,v 1.122 2005/11/07 11:14:22 gleixner Exp $
1da177e4
LT
21 *
22 */
23
1da177e4
LT
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/kernel.h>
27#include <linux/sched.h>
28#include <linux/init.h>
29#include <asm/io.h>
30#include <asm/byteorder.h>
31
32#include <linux/errno.h>
33#include <linux/slab.h>
34#include <linux/delay.h>
35#include <linux/interrupt.h>
36#include <linux/mtd/compatmac.h>
37#include <linux/mtd/map.h>
38#include <linux/mtd/mtd.h>
39#include <linux/mtd/cfi.h>
02b15e34 40#include <linux/mtd/xip.h>
1da177e4
LT
41
42#define AMD_BOOTLOC_BUG
43#define FORCE_WORD_WRITE 0
44
45#define MAX_WORD_RETRIES 3
46
47#define MANUFACTURER_AMD 0x0001
0165508c 48#define MANUFACTURER_ATMEL 0x001F
1da177e4
LT
49#define MANUFACTURER_SST 0x00BF
50#define SST49LF004B 0x0060
89072ef9 51#define SST49LF040B 0x0050
fb4a90bf 52#define SST49LF008A 0x005a
0165508c 53#define AT49BV6416 0x00d6
1da177e4
LT
54
55static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
56static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
58static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
59static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
60static void cfi_amdstd_sync (struct mtd_info *);
61static int cfi_amdstd_suspend (struct mtd_info *);
62static void cfi_amdstd_resume (struct mtd_info *);
63static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
64
65static void cfi_amdstd_destroy(struct mtd_info *);
66
67struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
68static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
69
70static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
71static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
72#include "fwh_lock.h"
73
0165508c
HS
74static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
75static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
76
1da177e4
LT
77static struct mtd_chip_driver cfi_amdstd_chipdrv = {
78 .probe = NULL, /* Not usable directly */
79 .destroy = cfi_amdstd_destroy,
80 .name = "cfi_cmdset_0002",
81 .module = THIS_MODULE
82};
83
84
85/* #define DEBUG_CFI_FEATURES */
86
87
88#ifdef DEBUG_CFI_FEATURES
89static void cfi_tell_features(struct cfi_pri_amdstd *extp)
90{
91 const char* erase_suspend[3] = {
92 "Not supported", "Read only", "Read/write"
93 };
94 const char* top_bottom[6] = {
95 "No WP", "8x8KiB sectors at top & bottom, no WP",
96 "Bottom boot", "Top boot",
97 "Uniform, Bottom WP", "Uniform, Top WP"
98 };
99
100 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
1f948b43 101 printk(" Address sensitive unlock: %s\n",
1da177e4
LT
102 (extp->SiliconRevision & 1) ? "Not required" : "Required");
103
104 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
105 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
106 else
107 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
108
109 if (extp->BlkProt == 0)
110 printk(" Block protection: Not supported\n");
111 else
112 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
113
114
115 printk(" Temporary block unprotect: %s\n",
116 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
117 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
118 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
119 printk(" Burst mode: %s\n",
120 extp->BurstMode ? "Supported" : "Not supported");
121 if (extp->PageMode == 0)
122 printk(" Page mode: Not supported\n");
123 else
124 printk(" Page mode: %d word page\n", extp->PageMode << 2);
125
1f948b43 126 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
1da177e4 127 extp->VppMin >> 4, extp->VppMin & 0xf);
1f948b43 128 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
1da177e4
LT
129 extp->VppMax >> 4, extp->VppMax & 0xf);
130
131 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
132 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
133 else
134 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
135}
136#endif
137
138#ifdef AMD_BOOTLOC_BUG
139/* Wheee. Bring me the head of someone at AMD. */
140static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
141{
142 struct map_info *map = mtd->priv;
143 struct cfi_private *cfi = map->fldrv_priv;
144 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
145 __u8 major = extp->MajorVersion;
146 __u8 minor = extp->MinorVersion;
147
148 if (((major << 8) | minor) < 0x3131) {
149 /* CFI version 1.0 => don't trust bootloc */
150 if (cfi->id & 0x80) {
151 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
152 extp->TopBottom = 3; /* top boot */
153 } else {
154 extp->TopBottom = 2; /* bottom boot */
155 }
156 }
157}
158#endif
159
160static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
161{
162 struct map_info *map = mtd->priv;
163 struct cfi_private *cfi = map->fldrv_priv;
164 if (cfi->cfiq->BufWriteTimeoutTyp) {
165 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
166 mtd->write = cfi_amdstd_write_buffers;
167 }
168}
169
5b0c5c2c
HS
170/* Atmel chips don't use the same PRI format as AMD chips */
171static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
172{
173 struct map_info *map = mtd->priv;
174 struct cfi_private *cfi = map->fldrv_priv;
175 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
176 struct cfi_pri_atmel atmel_pri;
177
178 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
de591dac 179 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
5b0c5c2c
HS
180
181 if (atmel_pri.Features & 0x02)
182 extp->EraseSuspend = 2;
183
184 if (atmel_pri.BottomBoot)
185 extp->TopBottom = 2;
186 else
187 extp->TopBottom = 3;
188}
189
1da177e4
LT
190static void fixup_use_secsi(struct mtd_info *mtd, void *param)
191{
192 /* Setup for chips with a secsi area */
193 mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
194 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
195}
196
197static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
198{
199 struct map_info *map = mtd->priv;
200 struct cfi_private *cfi = map->fldrv_priv;
201 if ((cfi->cfiq->NumEraseRegions == 1) &&
202 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
203 mtd->erase = cfi_amdstd_erase_chip;
204 }
1f948b43 205
1da177e4
LT
206}
207
0165508c
HS
208/*
209 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
210 * locked by default.
211 */
212static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
213{
214 mtd->lock = cfi_atmel_lock;
215 mtd->unlock = cfi_atmel_unlock;
187ef152 216 mtd->flags |= MTD_STUPID_LOCK;
0165508c
HS
217}
218
1da177e4
LT
219static struct cfi_fixup cfi_fixup_table[] = {
220#ifdef AMD_BOOTLOC_BUG
221 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
222#endif
223 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
224 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
225 { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
226 { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
227 { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
228 { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
229#if !FORCE_WORD_WRITE
230 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
231#endif
5b0c5c2c 232 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
1da177e4
LT
233 { 0, 0, NULL, NULL }
234};
235static struct cfi_fixup jedec_fixup_table[] = {
236 { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
89072ef9 237 { MANUFACTURER_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
fb4a90bf 238 { MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
1da177e4
LT
239 { 0, 0, NULL, NULL }
240};
241
242static struct cfi_fixup fixup_table[] = {
243 /* The CFI vendor ids and the JEDEC vendor IDs appear
244 * to be common. It is like the devices id's are as
245 * well. This table is to pick all cases where
246 * we know that is the case.
247 */
248 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
0165508c 249 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
1da177e4
LT
250 { 0, 0, NULL, NULL }
251};
252
253
254struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
255{
256 struct cfi_private *cfi = map->fldrv_priv;
257 struct mtd_info *mtd;
258 int i;
259
95b93a0c 260 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
1da177e4
LT
261 if (!mtd) {
262 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
263 return NULL;
264 }
1da177e4
LT
265 mtd->priv = map;
266 mtd->type = MTD_NORFLASH;
267
268 /* Fill in the default mtd operations */
269 mtd->erase = cfi_amdstd_erase_varsize;
270 mtd->write = cfi_amdstd_write_words;
271 mtd->read = cfi_amdstd_read;
272 mtd->sync = cfi_amdstd_sync;
273 mtd->suspend = cfi_amdstd_suspend;
274 mtd->resume = cfi_amdstd_resume;
275 mtd->flags = MTD_CAP_NORFLASH;
276 mtd->name = map->name;
783ed81f 277 mtd->writesize = 1;
1da177e4
LT
278
279 if (cfi->cfi_mode==CFI_MODE_CFI){
280 unsigned char bootloc;
1f948b43 281 /*
1da177e4
LT
282 * It's a real CFI chip, not one for which the probe
283 * routine faked a CFI structure. So we read the feature
284 * table from it.
285 */
286 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
287 struct cfi_pri_amdstd *extp;
288
289 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
290 if (!extp) {
291 kfree(mtd);
292 return NULL;
293 }
294
d88f977b
TP
295 if (extp->MajorVersion != '1' ||
296 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
297 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
298 "version %c.%c.\n", extp->MajorVersion,
299 extp->MinorVersion);
300 kfree(extp);
301 kfree(mtd);
302 return NULL;
303 }
304
1da177e4 305 /* Install our own private info structure */
1f948b43 306 cfi->cmdset_priv = extp;
1da177e4
LT
307
308 /* Apply cfi device specific fixups */
309 cfi_fixup(mtd, cfi_fixup_table);
310
311#ifdef DEBUG_CFI_FEATURES
312 /* Tell the user about it in lots of lovely detail */
313 cfi_tell_features(extp);
1f948b43 314#endif
1da177e4
LT
315
316 bootloc = extp->TopBottom;
317 if ((bootloc != 2) && (bootloc != 3)) {
318 printk(KERN_WARNING "%s: CFI does not contain boot "
319 "bank location. Assuming top.\n", map->name);
320 bootloc = 2;
321 }
322
323 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
324 printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
1f948b43 325
1da177e4
LT
326 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
327 int j = (cfi->cfiq->NumEraseRegions-1)-i;
328 __u32 swap;
1f948b43 329
1da177e4
LT
330 swap = cfi->cfiq->EraseRegionInfo[i];
331 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
332 cfi->cfiq->EraseRegionInfo[j] = swap;
333 }
334 }
335 /* Set the default CFI lock/unlock addresses */
336 cfi->addr_unlock1 = 0x555;
337 cfi->addr_unlock2 = 0x2aa;
338 /* Modify the unlock address if we are in compatibility mode */
339 if ( /* x16 in x8 mode */
1f948b43 340 ((cfi->device_type == CFI_DEVICETYPE_X8) &&
1da177e4
LT
341 (cfi->cfiq->InterfaceDesc == 2)) ||
342 /* x32 in x16 mode */
343 ((cfi->device_type == CFI_DEVICETYPE_X16) &&
1f948b43 344 (cfi->cfiq->InterfaceDesc == 4)))
1da177e4
LT
345 {
346 cfi->addr_unlock1 = 0xaaa;
347 cfi->addr_unlock2 = 0x555;
348 }
349
350 } /* CFI mode */
351 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
352 /* Apply jedec specific fixups */
353 cfi_fixup(mtd, jedec_fixup_table);
354 }
355 /* Apply generic fixups */
356 cfi_fixup(mtd, fixup_table);
357
358 for (i=0; i< cfi->numchips; i++) {
359 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
360 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
361 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
1f948b43
TG
362 }
363
1da177e4 364 map->fldrv = &cfi_amdstd_chipdrv;
1f948b43 365
1da177e4
LT
366 return cfi_amdstd_setup(mtd);
367}
83ea4ef2 368EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
1da177e4
LT
369
370static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
371{
372 struct map_info *map = mtd->priv;
373 struct cfi_private *cfi = map->fldrv_priv;
374 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
375 unsigned long offset = 0;
376 int i,j;
377
1f948b43 378 printk(KERN_NOTICE "number of %s chips: %d\n",
1da177e4 379 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
1f948b43 380 /* Select the correct geometry setup */
1da177e4
LT
381 mtd->size = devsize * cfi->numchips;
382
383 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
384 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
385 * mtd->numeraseregions, GFP_KERNEL);
1f948b43 386 if (!mtd->eraseregions) {
1da177e4
LT
387 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
388 goto setup_err;
389 }
1f948b43 390
1da177e4
LT
391 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
392 unsigned long ernum, ersize;
393 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
394 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
1f948b43 395
1da177e4
LT
396 if (mtd->erasesize < ersize) {
397 mtd->erasesize = ersize;
398 }
399 for (j=0; j<cfi->numchips; j++) {
400 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
401 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
402 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
403 }
404 offset += (ersize * ernum);
405 }
406 if (offset != devsize) {
407 /* Argh */
408 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
409 goto setup_err;
410 }
411#if 0
412 // debug
413 for (i=0; i<mtd->numeraseregions;i++){
414 printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
415 i,mtd->eraseregions[i].offset,
416 mtd->eraseregions[i].erasesize,
417 mtd->eraseregions[i].numblocks);
418 }
419#endif
420
421 /* FIXME: erase-suspend-program is broken. See
422 http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
423 printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
424
425 __module_get(THIS_MODULE);
426 return mtd;
427
428 setup_err:
429 if(mtd) {
fa671646 430 kfree(mtd->eraseregions);
1da177e4
LT
431 kfree(mtd);
432 }
433 kfree(cfi->cmdset_priv);
434 kfree(cfi->cfiq);
435 return NULL;
436}
437
438/*
439 * Return true if the chip is ready.
440 *
441 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
442 * non-suspended sector) and is indicated by no toggle bits toggling.
443 *
444 * Note that anything more complicated than checking if no bits are toggling
445 * (including checking DQ5 for an error status) is tricky to get working
446 * correctly and is therefore not done (particulary with interleaved chips
447 * as each chip must be checked independantly of the others).
448 */
02b15e34 449static int __xipram chip_ready(struct map_info *map, unsigned long addr)
1da177e4
LT
450{
451 map_word d, t;
452
453 d = map_read(map, addr);
454 t = map_read(map, addr);
455
456 return map_word_equal(map, d, t);
457}
458
fb4a90bf
EB
459/*
460 * Return true if the chip is ready and has the correct value.
461 *
462 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
463 * non-suspended sector) and it is indicated by no bits toggling.
464 *
465 * Error are indicated by toggling bits or bits held with the wrong value,
466 * or with bits toggling.
467 *
468 * Note that anything more complicated than checking if no bits are toggling
469 * (including checking DQ5 for an error status) is tricky to get working
470 * correctly and is therefore not done (particulary with interleaved chips
471 * as each chip must be checked independantly of the others).
472 *
473 */
02b15e34 474static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
fb4a90bf
EB
475{
476 map_word oldd, curd;
477
478 oldd = map_read(map, addr);
479 curd = map_read(map, addr);
480
1f948b43 481 return map_word_equal(map, oldd, curd) &&
fb4a90bf
EB
482 map_word_equal(map, curd, expected);
483}
484
1da177e4
LT
485static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
486{
487 DECLARE_WAITQUEUE(wait, current);
488 struct cfi_private *cfi = map->fldrv_priv;
489 unsigned long timeo;
490 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
491
492 resettime:
493 timeo = jiffies + HZ;
494 retry:
495 switch (chip->state) {
496
497 case FL_STATUS:
498 for (;;) {
499 if (chip_ready(map, adr))
500 break;
501
502 if (time_after(jiffies, timeo)) {
503 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
02b15e34 504 spin_unlock(chip->mutex);
1da177e4
LT
505 return -EIO;
506 }
02b15e34 507 spin_unlock(chip->mutex);
1da177e4 508 cfi_udelay(1);
02b15e34 509 spin_lock(chip->mutex);
1da177e4
LT
510 /* Someone else might have been playing with it. */
511 goto retry;
512 }
1f948b43 513
1da177e4
LT
514 case FL_READY:
515 case FL_CFI_QUERY:
516 case FL_JEDEC_QUERY:
517 return 0;
518
519 case FL_ERASING:
520 if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
521 goto sleep;
522
89072ef9
RJ
523 if (!( mode == FL_READY
524 || mode == FL_POINT
1da177e4
LT
525 || !cfip
526 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
89072ef9
RJ
527 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1)
528 )))
1da177e4
LT
529 goto sleep;
530
531 /* We could check to see if we're trying to access the sector
532 * that is currently being erased. However, no user will try
533 * anything like that so we just wait for the timeout. */
534
535 /* Erase suspend */
536 /* It's harmless to issue the Erase-Suspend and Erase-Resume
537 * commands when the erase algorithm isn't in progress. */
538 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
539 chip->oldstate = FL_ERASING;
540 chip->state = FL_ERASE_SUSPENDING;
541 chip->erase_suspended = 1;
542 for (;;) {
543 if (chip_ready(map, adr))
544 break;
545
546 if (time_after(jiffies, timeo)) {
547 /* Should have suspended the erase by now.
548 * Send an Erase-Resume command as either
549 * there was an error (so leave the erase
550 * routine to recover from it) or we trying to
551 * use the erase-in-progress sector. */
552 map_write(map, CMD(0x30), chip->in_progress_block_addr);
553 chip->state = FL_ERASING;
554 chip->oldstate = FL_READY;
555 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
556 return -EIO;
557 }
1f948b43 558
02b15e34 559 spin_unlock(chip->mutex);
1da177e4 560 cfi_udelay(1);
02b15e34 561 spin_lock(chip->mutex);
1da177e4
LT
562 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
563 So we can just loop here. */
564 }
565 chip->state = FL_READY;
566 return 0;
567
02b15e34
TP
568 case FL_XIP_WHILE_ERASING:
569 if (mode != FL_READY && mode != FL_POINT &&
570 (!cfip || !(cfip->EraseSuspend&2)))
571 goto sleep;
572 chip->oldstate = chip->state;
573 chip->state = FL_READY;
574 return 0;
575
1da177e4
LT
576 case FL_POINT:
577 /* Only if there's no operation suspended... */
578 if (mode == FL_READY && chip->oldstate == FL_READY)
579 return 0;
580
581 default:
582 sleep:
583 set_current_state(TASK_UNINTERRUPTIBLE);
584 add_wait_queue(&chip->wq, &wait);
02b15e34 585 spin_unlock(chip->mutex);
1da177e4
LT
586 schedule();
587 remove_wait_queue(&chip->wq, &wait);
02b15e34 588 spin_lock(chip->mutex);
1da177e4
LT
589 goto resettime;
590 }
591}
592
593
594static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
595{
596 struct cfi_private *cfi = map->fldrv_priv;
597
598 switch(chip->oldstate) {
599 case FL_ERASING:
600 chip->state = chip->oldstate;
601 map_write(map, CMD(0x30), chip->in_progress_block_addr);
602 chip->oldstate = FL_READY;
603 chip->state = FL_ERASING;
604 break;
605
02b15e34
TP
606 case FL_XIP_WHILE_ERASING:
607 chip->state = chip->oldstate;
608 chip->oldstate = FL_READY;
609 break;
610
1da177e4
LT
611 case FL_READY:
612 case FL_STATUS:
613 /* We should really make set_vpp() count, rather than doing this */
614 DISABLE_VPP(map);
615 break;
616 default:
617 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
618 }
619 wake_up(&chip->wq);
620}
621
02b15e34
TP
622#ifdef CONFIG_MTD_XIP
623
624/*
625 * No interrupt what so ever can be serviced while the flash isn't in array
626 * mode. This is ensured by the xip_disable() and xip_enable() functions
627 * enclosing any code path where the flash is known not to be in array mode.
628 * And within a XIP disabled code path, only functions marked with __xipram
629 * may be called and nothing else (it's a good thing to inspect generated
630 * assembly to make sure inline functions were actually inlined and that gcc
631 * didn't emit calls to its own support functions). Also configuring MTD CFI
632 * support to a single buswidth and a single interleave is also recommended.
633 */
f8eb321b 634
02b15e34
TP
635static void xip_disable(struct map_info *map, struct flchip *chip,
636 unsigned long adr)
637{
638 /* TODO: chips with no XIP use should ignore and return */
639 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
640 local_irq_disable();
641}
642
643static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
644 unsigned long adr)
645{
646 struct cfi_private *cfi = map->fldrv_priv;
647
648 if (chip->state != FL_POINT && chip->state != FL_READY) {
649 map_write(map, CMD(0xf0), adr);
650 chip->state = FL_READY;
651 }
652 (void) map_read(map, adr);
97f927a4 653 xip_iprefetch();
02b15e34
TP
654 local_irq_enable();
655}
656
657/*
658 * When a delay is required for the flash operation to complete, the
659 * xip_udelay() function is polling for both the given timeout and pending
660 * (but still masked) hardware interrupts. Whenever there is an interrupt
1f948b43 661 * pending then the flash erase operation is suspended, array mode restored
02b15e34
TP
662 * and interrupts unmasked. Task scheduling might also happen at that
663 * point. The CPU eventually returns from the interrupt or the call to
664 * schedule() and the suspended flash operation is resumed for the remaining
665 * of the delay period.
666 *
667 * Warning: this function _will_ fool interrupt latency tracing tools.
668 */
669
670static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
671 unsigned long adr, int usec)
672{
673 struct cfi_private *cfi = map->fldrv_priv;
674 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
675 map_word status, OK = CMD(0x80);
676 unsigned long suspended, start = xip_currtime();
677 flstate_t oldstate;
678
679 do {
680 cpu_relax();
681 if (xip_irqpending() && extp &&
682 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
683 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
684 /*
1f948b43
TG
685 * Let's suspend the erase operation when supported.
686 * Note that we currently don't try to suspend
687 * interleaved chips if there is already another
02b15e34
TP
688 * operation suspended (imagine what happens
689 * when one chip was already done with the current
690 * operation while another chip suspended it, then
691 * we resume the whole thing at once). Yes, it
692 * can happen!
693 */
694 map_write(map, CMD(0xb0), adr);
695 usec -= xip_elapsed_since(start);
696 suspended = xip_currtime();
697 do {
698 if (xip_elapsed_since(suspended) > 100000) {
699 /*
700 * The chip doesn't want to suspend
701 * after waiting for 100 msecs.
702 * This is a critical error but there
703 * is not much we can do here.
704 */
705 return;
706 }
707 status = map_read(map, adr);
708 } while (!map_word_andequal(map, status, OK, OK));
709
710 /* Suspend succeeded */
711 oldstate = chip->state;
712 if (!map_word_bitsset(map, status, CMD(0x40)))
713 break;
714 chip->state = FL_XIP_WHILE_ERASING;
715 chip->erase_suspended = 1;
716 map_write(map, CMD(0xf0), adr);
717 (void) map_read(map, adr);
718 asm volatile (".rep 8; nop; .endr");
719 local_irq_enable();
720 spin_unlock(chip->mutex);
721 asm volatile (".rep 8; nop; .endr");
722 cond_resched();
723
724 /*
725 * We're back. However someone else might have
726 * decided to go write to the chip if we are in
727 * a suspended erase state. If so let's wait
728 * until it's done.
729 */
730 spin_lock(chip->mutex);
731 while (chip->state != FL_XIP_WHILE_ERASING) {
732 DECLARE_WAITQUEUE(wait, current);
733 set_current_state(TASK_UNINTERRUPTIBLE);
734 add_wait_queue(&chip->wq, &wait);
735 spin_unlock(chip->mutex);
736 schedule();
737 remove_wait_queue(&chip->wq, &wait);
738 spin_lock(chip->mutex);
739 }
740 /* Disallow XIP again */
741 local_irq_disable();
742
743 /* Resume the write or erase operation */
744 map_write(map, CMD(0x30), adr);
745 chip->state = oldstate;
746 start = xip_currtime();
747 } else if (usec >= 1000000/HZ) {
748 /*
749 * Try to save on CPU power when waiting delay
750 * is at least a system timer tick period.
751 * No need to be extremely accurate here.
752 */
753 xip_cpu_idle();
754 }
755 status = map_read(map, adr);
756 } while (!map_word_andequal(map, status, OK, OK)
757 && xip_elapsed_since(start) < usec);
758}
759
760#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
761
762/*
763 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
764 * the flash is actively programming or erasing since we have to poll for
765 * the operation to complete anyway. We can't do that in a generic way with
766 * a XIP setup so do it before the actual flash operation in this case
767 * and stub it out from INVALIDATE_CACHE_UDELAY.
768 */
769#define XIP_INVAL_CACHED_RANGE(map, from, size) \
770 INVALIDATE_CACHED_RANGE(map, from, size)
771
772#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
773 UDELAY(map, chip, adr, usec)
774
775/*
776 * Extra notes:
777 *
778 * Activating this XIP support changes the way the code works a bit. For
779 * example the code to suspend the current process when concurrent access
780 * happens is never executed because xip_udelay() will always return with the
781 * same chip state as it was entered with. This is why there is no care for
782 * the presence of add_wait_queue() or schedule() calls from within a couple
783 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
784 * The queueing and scheduling are always happening within xip_udelay().
785 *
786 * Similarly, get_chip() and put_chip() just happen to always be executed
787 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
788 * is in array mode, therefore never executing many cases therein and not
789 * causing any problem with XIP.
790 */
791
792#else
793
794#define xip_disable(map, chip, adr)
795#define xip_enable(map, chip, adr)
796#define XIP_INVAL_CACHED_RANGE(x...)
797
798#define UDELAY(map, chip, adr, usec) \
799do { \
800 spin_unlock(chip->mutex); \
801 cfi_udelay(usec); \
802 spin_lock(chip->mutex); \
803} while (0)
804
805#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
806do { \
807 spin_unlock(chip->mutex); \
808 INVALIDATE_CACHED_RANGE(map, adr, len); \
809 cfi_udelay(usec); \
810 spin_lock(chip->mutex); \
811} while (0)
812
813#endif
1da177e4
LT
814
815static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
816{
817 unsigned long cmd_addr;
818 struct cfi_private *cfi = map->fldrv_priv;
819 int ret;
820
821 adr += chip->start;
822
1f948b43
TG
823 /* Ensure cmd read/writes are aligned. */
824 cmd_addr = adr & ~(map_bankwidth(map)-1);
1da177e4 825
02b15e34 826 spin_lock(chip->mutex);
1da177e4
LT
827 ret = get_chip(map, chip, cmd_addr, FL_READY);
828 if (ret) {
02b15e34 829 spin_unlock(chip->mutex);
1da177e4
LT
830 return ret;
831 }
832
833 if (chip->state != FL_POINT && chip->state != FL_READY) {
834 map_write(map, CMD(0xf0), cmd_addr);
835 chip->state = FL_READY;
836 }
837
838 map_copy_from(map, buf, adr, len);
839
840 put_chip(map, chip, cmd_addr);
841
02b15e34 842 spin_unlock(chip->mutex);
1da177e4
LT
843 return 0;
844}
845
846
847static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
848{
849 struct map_info *map = mtd->priv;
850 struct cfi_private *cfi = map->fldrv_priv;
851 unsigned long ofs;
852 int chipnum;
853 int ret = 0;
854
855 /* ofs: offset within the first chip that the first read should start */
856
857 chipnum = (from >> cfi->chipshift);
858 ofs = from - (chipnum << cfi->chipshift);
859
860
861 *retlen = 0;
862
863 while (len) {
864 unsigned long thislen;
865
866 if (chipnum >= cfi->numchips)
867 break;
868
869 if ((len + ofs -1) >> cfi->chipshift)
870 thislen = (1<<cfi->chipshift) - ofs;
871 else
872 thislen = len;
873
874 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
875 if (ret)
876 break;
877
878 *retlen += thislen;
879 len -= thislen;
880 buf += thislen;
881
882 ofs = 0;
883 chipnum++;
884 }
885 return ret;
886}
887
888
889static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
890{
891 DECLARE_WAITQUEUE(wait, current);
892 unsigned long timeo = jiffies + HZ;
893 struct cfi_private *cfi = map->fldrv_priv;
894
895 retry:
02b15e34 896 spin_lock(chip->mutex);
1da177e4
LT
897
898 if (chip->state != FL_READY){
899#if 0
900 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
901#endif
902 set_current_state(TASK_UNINTERRUPTIBLE);
903 add_wait_queue(&chip->wq, &wait);
1f948b43 904
02b15e34 905 spin_unlock(chip->mutex);
1da177e4
LT
906
907 schedule();
908 remove_wait_queue(&chip->wq, &wait);
909#if 0
910 if(signal_pending(current))
911 return -EINTR;
912#endif
913 timeo = jiffies + HZ;
914
915 goto retry;
1f948b43 916 }
1da177e4
LT
917
918 adr += chip->start;
919
920 chip->state = FL_READY;
921
922 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
923 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
924 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1f948b43 925
1da177e4
LT
926 map_copy_from(map, buf, adr, len);
927
928 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
929 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
930 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
931 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1f948b43 932
1da177e4 933 wake_up(&chip->wq);
02b15e34 934 spin_unlock(chip->mutex);
1da177e4
LT
935
936 return 0;
937}
938
939static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
940{
941 struct map_info *map = mtd->priv;
942 struct cfi_private *cfi = map->fldrv_priv;
943 unsigned long ofs;
944 int chipnum;
945 int ret = 0;
946
947
948 /* ofs: offset within the first chip that the first read should start */
949
950 /* 8 secsi bytes per chip */
951 chipnum=from>>3;
952 ofs=from & 7;
953
954
955 *retlen = 0;
956
957 while (len) {
958 unsigned long thislen;
959
960 if (chipnum >= cfi->numchips)
961 break;
962
963 if ((len + ofs -1) >> 3)
964 thislen = (1<<3) - ofs;
965 else
966 thislen = len;
967
968 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
969 if (ret)
970 break;
971
972 *retlen += thislen;
973 len -= thislen;
974 buf += thislen;
975
976 ofs = 0;
977 chipnum++;
978 }
979 return ret;
980}
981
982
02b15e34 983static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1da177e4
LT
984{
985 struct cfi_private *cfi = map->fldrv_priv;
986 unsigned long timeo = jiffies + HZ;
987 /*
988 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
989 * have a max write time of a few hundreds usec). However, we should
990 * use the maximum timeout value given by the chip at probe time
991 * instead. Unfortunately, struct flchip does have a field for
992 * maximum timeout, only for typical which can be far too short
993 * depending of the conditions. The ' + 1' is to avoid having a
994 * timeout of 0 jiffies if HZ is smaller than 1000.
995 */
996 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
997 int ret = 0;
998 map_word oldd;
999 int retry_cnt = 0;
1000
1001 adr += chip->start;
1002
02b15e34 1003 spin_lock(chip->mutex);
1da177e4
LT
1004 ret = get_chip(map, chip, adr, FL_WRITING);
1005 if (ret) {
02b15e34 1006 spin_unlock(chip->mutex);
1da177e4
LT
1007 return ret;
1008 }
1009
1010 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1011 __func__, adr, datum.x[0] );
1012
1013 /*
1014 * Check for a NOP for the case when the datum to write is already
1015 * present - it saves time and works around buggy chips that corrupt
1016 * data at other locations when 0xff is written to a location that
1017 * already contains 0xff.
1018 */
1019 oldd = map_read(map, adr);
1020 if (map_word_equal(map, oldd, datum)) {
1021 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
1022 __func__);
1023 goto op_done;
1024 }
1025
02b15e34 1026 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1da177e4 1027 ENABLE_VPP(map);
02b15e34 1028 xip_disable(map, chip, adr);
1da177e4
LT
1029 retry:
1030 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1031 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1032 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1033 map_write(map, datum, adr);
1034 chip->state = FL_WRITING;
1035
02b15e34
TP
1036 INVALIDATE_CACHE_UDELAY(map, chip,
1037 adr, map_bankwidth(map),
1038 chip->word_write_time);
1da177e4
LT
1039
1040 /* See comment above for timeout value. */
1f948b43 1041 timeo = jiffies + uWriteTimeout;
1da177e4
LT
1042 for (;;) {
1043 if (chip->state != FL_WRITING) {
1044 /* Someone's suspended the write. Sleep */
1045 DECLARE_WAITQUEUE(wait, current);
1046
1047 set_current_state(TASK_UNINTERRUPTIBLE);
1048 add_wait_queue(&chip->wq, &wait);
02b15e34 1049 spin_unlock(chip->mutex);
1da177e4
LT
1050 schedule();
1051 remove_wait_queue(&chip->wq, &wait);
1052 timeo = jiffies + (HZ / 2); /* FIXME */
02b15e34 1053 spin_lock(chip->mutex);
1da177e4
LT
1054 continue;
1055 }
1056
b95f9609 1057 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
02b15e34 1058 xip_enable(map, chip, adr);
fb4a90bf 1059 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
02b15e34 1060 xip_disable(map, chip, adr);
b95f9609 1061 break;
fb4a90bf 1062 }
1da177e4 1063
b95f9609
KB
1064 if (chip_ready(map, adr))
1065 break;
1066
1da177e4 1067 /* Latency issues. Drop the lock, wait a while and retry */
02b15e34 1068 UDELAY(map, chip, adr, 1);
1da177e4 1069 }
fb4a90bf
EB
1070 /* Did we succeed? */
1071 if (!chip_good(map, adr, datum)) {
1072 /* reset on all failures. */
1073 map_write( map, CMD(0xF0), chip->start );
1074 /* FIXME - should have reset delay before continuing */
1da177e4 1075
1f948b43 1076 if (++retry_cnt <= MAX_WORD_RETRIES)
fb4a90bf 1077 goto retry;
1da177e4 1078
fb4a90bf
EB
1079 ret = -EIO;
1080 }
02b15e34 1081 xip_enable(map, chip, adr);
1da177e4
LT
1082 op_done:
1083 chip->state = FL_READY;
1084 put_chip(map, chip, adr);
02b15e34 1085 spin_unlock(chip->mutex);
1da177e4
LT
1086
1087 return ret;
1088}
1089
1090
1091static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1092 size_t *retlen, const u_char *buf)
1093{
1094 struct map_info *map = mtd->priv;
1095 struct cfi_private *cfi = map->fldrv_priv;
1096 int ret = 0;
1097 int chipnum;
1098 unsigned long ofs, chipstart;
1099 DECLARE_WAITQUEUE(wait, current);
1100
1101 *retlen = 0;
1102 if (!len)
1103 return 0;
1104
1105 chipnum = to >> cfi->chipshift;
1106 ofs = to - (chipnum << cfi->chipshift);
1107 chipstart = cfi->chips[chipnum].start;
1108
1109 /* If it's not bus-aligned, do the first byte write */
1110 if (ofs & (map_bankwidth(map)-1)) {
1111 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1112 int i = ofs - bus_ofs;
1113 int n = 0;
1114 map_word tmp_buf;
1115
1116 retry:
02b15e34 1117 spin_lock(cfi->chips[chipnum].mutex);
1da177e4
LT
1118
1119 if (cfi->chips[chipnum].state != FL_READY) {
1120#if 0
1121 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1122#endif
1123 set_current_state(TASK_UNINTERRUPTIBLE);
1124 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1125
02b15e34 1126 spin_unlock(cfi->chips[chipnum].mutex);
1da177e4
LT
1127
1128 schedule();
1129 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1130#if 0
1131 if(signal_pending(current))
1132 return -EINTR;
1133#endif
1134 goto retry;
1135 }
1136
1137 /* Load 'tmp_buf' with old contents of flash */
1138 tmp_buf = map_read(map, bus_ofs+chipstart);
1139
02b15e34 1140 spin_unlock(cfi->chips[chipnum].mutex);
1da177e4
LT
1141
1142 /* Number of bytes to copy from buffer */
1143 n = min_t(int, len, map_bankwidth(map)-i);
1f948b43 1144
1da177e4
LT
1145 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1146
1f948b43 1147 ret = do_write_oneword(map, &cfi->chips[chipnum],
1da177e4 1148 bus_ofs, tmp_buf);
1f948b43 1149 if (ret)
1da177e4 1150 return ret;
1f948b43 1151
1da177e4
LT
1152 ofs += n;
1153 buf += n;
1154 (*retlen) += n;
1155 len -= n;
1156
1157 if (ofs >> cfi->chipshift) {
1f948b43 1158 chipnum ++;
1da177e4
LT
1159 ofs = 0;
1160 if (chipnum == cfi->numchips)
1161 return 0;
1162 }
1163 }
1f948b43 1164
1da177e4
LT
1165 /* We are now aligned, write as much as possible */
1166 while(len >= map_bankwidth(map)) {
1167 map_word datum;
1168
1169 datum = map_word_load(map, buf);
1170
1171 ret = do_write_oneword(map, &cfi->chips[chipnum],
1172 ofs, datum);
1173 if (ret)
1174 return ret;
1175
1176 ofs += map_bankwidth(map);
1177 buf += map_bankwidth(map);
1178 (*retlen) += map_bankwidth(map);
1179 len -= map_bankwidth(map);
1180
1181 if (ofs >> cfi->chipshift) {
1f948b43 1182 chipnum ++;
1da177e4
LT
1183 ofs = 0;
1184 if (chipnum == cfi->numchips)
1185 return 0;
1186 chipstart = cfi->chips[chipnum].start;
1187 }
1188 }
1189
1190 /* Write the trailing bytes if any */
1191 if (len & (map_bankwidth(map)-1)) {
1192 map_word tmp_buf;
1193
1194 retry1:
02b15e34 1195 spin_lock(cfi->chips[chipnum].mutex);
1da177e4
LT
1196
1197 if (cfi->chips[chipnum].state != FL_READY) {
1198#if 0
1199 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1200#endif
1201 set_current_state(TASK_UNINTERRUPTIBLE);
1202 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1203
02b15e34 1204 spin_unlock(cfi->chips[chipnum].mutex);
1da177e4
LT
1205
1206 schedule();
1207 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1208#if 0
1209 if(signal_pending(current))
1210 return -EINTR;
1211#endif
1212 goto retry1;
1213 }
1214
1215 tmp_buf = map_read(map, ofs + chipstart);
1216
02b15e34 1217 spin_unlock(cfi->chips[chipnum].mutex);
1da177e4
LT
1218
1219 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1f948b43
TG
1220
1221 ret = do_write_oneword(map, &cfi->chips[chipnum],
1da177e4 1222 ofs, tmp_buf);
1f948b43 1223 if (ret)
1da177e4 1224 return ret;
1f948b43 1225
1da177e4
LT
1226 (*retlen) += len;
1227 }
1228
1229 return 0;
1230}
1231
1232
1233/*
1234 * FIXME: interleaved mode not tested, and probably not supported!
1235 */
02b15e34 1236static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1f948b43 1237 unsigned long adr, const u_char *buf,
02b15e34 1238 int len)
1da177e4
LT
1239{
1240 struct cfi_private *cfi = map->fldrv_priv;
1241 unsigned long timeo = jiffies + HZ;
1242 /* see comments in do_write_oneword() regarding uWriteTimeo. */
1243 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1244 int ret = -EIO;
1245 unsigned long cmd_adr;
1246 int z, words;
1247 map_word datum;
1248
1249 adr += chip->start;
1250 cmd_adr = adr;
1251
02b15e34 1252 spin_lock(chip->mutex);
1da177e4
LT
1253 ret = get_chip(map, chip, adr, FL_WRITING);
1254 if (ret) {
02b15e34 1255 spin_unlock(chip->mutex);
1da177e4
LT
1256 return ret;
1257 }
1258
1259 datum = map_word_load(map, buf);
1260
1261 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1262 __func__, adr, datum.x[0] );
1263
02b15e34 1264 XIP_INVAL_CACHED_RANGE(map, adr, len);
1da177e4 1265 ENABLE_VPP(map);
02b15e34 1266 xip_disable(map, chip, cmd_adr);
1f948b43 1267
1da177e4
LT
1268 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1269 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1270 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1271
1272 /* Write Buffer Load */
1273 map_write(map, CMD(0x25), cmd_adr);
1274
1275 chip->state = FL_WRITING_TO_BUFFER;
1276
1277 /* Write length of data to come */
1278 words = len / map_bankwidth(map);
1279 map_write(map, CMD(words - 1), cmd_adr);
1280 /* Write data */
1281 z = 0;
1282 while(z < words * map_bankwidth(map)) {
1283 datum = map_word_load(map, buf);
1284 map_write(map, datum, adr + z);
1285
1286 z += map_bankwidth(map);
1287 buf += map_bankwidth(map);
1288 }
1289 z -= map_bankwidth(map);
1290
1291 adr += z;
1292
1293 /* Write Buffer Program Confirm: GO GO GO */
1294 map_write(map, CMD(0x29), cmd_adr);
1295 chip->state = FL_WRITING;
1296
02b15e34
TP
1297 INVALIDATE_CACHE_UDELAY(map, chip,
1298 adr, map_bankwidth(map),
1299 chip->word_write_time);
1da177e4 1300
1f948b43
TG
1301 timeo = jiffies + uWriteTimeout;
1302
1da177e4
LT
1303 for (;;) {
1304 if (chip->state != FL_WRITING) {
1305 /* Someone's suspended the write. Sleep */
1306 DECLARE_WAITQUEUE(wait, current);
1307
1308 set_current_state(TASK_UNINTERRUPTIBLE);
1309 add_wait_queue(&chip->wq, &wait);
02b15e34 1310 spin_unlock(chip->mutex);
1da177e4
LT
1311 schedule();
1312 remove_wait_queue(&chip->wq, &wait);
1313 timeo = jiffies + (HZ / 2); /* FIXME */
02b15e34 1314 spin_lock(chip->mutex);
1da177e4
LT
1315 continue;
1316 }
1317
b95f9609
KB
1318 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1319 break;
1320
02b15e34
TP
1321 if (chip_ready(map, adr)) {
1322 xip_enable(map, chip, adr);
1da177e4 1323 goto op_done;
02b15e34 1324 }
1da177e4
LT
1325
1326 /* Latency issues. Drop the lock, wait a while and retry */
02b15e34 1327 UDELAY(map, chip, adr, 1);
1da177e4
LT
1328 }
1329
1da177e4
LT
1330 /* reset on all failures. */
1331 map_write( map, CMD(0xF0), chip->start );
02b15e34 1332 xip_enable(map, chip, adr);
1da177e4
LT
1333 /* FIXME - should have reset delay before continuing */
1334
02b15e34
TP
1335 printk(KERN_WARNING "MTD %s(): software timeout\n",
1336 __func__ );
1337
1da177e4
LT
1338 ret = -EIO;
1339 op_done:
1340 chip->state = FL_READY;
1341 put_chip(map, chip, adr);
02b15e34 1342 spin_unlock(chip->mutex);
1da177e4
LT
1343
1344 return ret;
1345}
1346
1347
1348static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1349 size_t *retlen, const u_char *buf)
1350{
1351 struct map_info *map = mtd->priv;
1352 struct cfi_private *cfi = map->fldrv_priv;
1353 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1354 int ret = 0;
1355 int chipnum;
1356 unsigned long ofs;
1357
1358 *retlen = 0;
1359 if (!len)
1360 return 0;
1361
1362 chipnum = to >> cfi->chipshift;
1363 ofs = to - (chipnum << cfi->chipshift);
1364
1365 /* If it's not bus-aligned, do the first word write */
1366 if (ofs & (map_bankwidth(map)-1)) {
1367 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1368 if (local_len > len)
1369 local_len = len;
1370 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1371 local_len, retlen, buf);
1372 if (ret)
1373 return ret;
1374 ofs += local_len;
1375 buf += local_len;
1376 len -= local_len;
1377
1378 if (ofs >> cfi->chipshift) {
1379 chipnum ++;
1380 ofs = 0;
1381 if (chipnum == cfi->numchips)
1382 return 0;
1383 }
1384 }
1385
1386 /* Write buffer is worth it only if more than one word to write... */
1387 while (len >= map_bankwidth(map) * 2) {
1388 /* We must not cross write block boundaries */
1389 int size = wbufsize - (ofs & (wbufsize-1));
1390
1391 if (size > len)
1392 size = len;
1393 if (size % map_bankwidth(map))
1394 size -= size % map_bankwidth(map);
1395
1f948b43 1396 ret = do_write_buffer(map, &cfi->chips[chipnum],
1da177e4
LT
1397 ofs, buf, size);
1398 if (ret)
1399 return ret;
1400
1401 ofs += size;
1402 buf += size;
1403 (*retlen) += size;
1404 len -= size;
1405
1406 if (ofs >> cfi->chipshift) {
1f948b43 1407 chipnum ++;
1da177e4
LT
1408 ofs = 0;
1409 if (chipnum == cfi->numchips)
1410 return 0;
1411 }
1412 }
1413
1414 if (len) {
1415 size_t retlen_dregs = 0;
1416
1417 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1418 len, &retlen_dregs, buf);
1419
1420 *retlen += retlen_dregs;
1421 return ret;
1422 }
1423
1424 return 0;
1425}
1426
1427
1428/*
1429 * Handle devices with one erase region, that only implement
1430 * the chip erase command.
1431 */
02b15e34 1432static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1da177e4
LT
1433{
1434 struct cfi_private *cfi = map->fldrv_priv;
1435 unsigned long timeo = jiffies + HZ;
1436 unsigned long int adr;
1437 DECLARE_WAITQUEUE(wait, current);
1438 int ret = 0;
1439
1440 adr = cfi->addr_unlock1;
1441
02b15e34 1442 spin_lock(chip->mutex);
1da177e4
LT
1443 ret = get_chip(map, chip, adr, FL_WRITING);
1444 if (ret) {
02b15e34 1445 spin_unlock(chip->mutex);
1da177e4
LT
1446 return ret;
1447 }
1448
1449 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1450 __func__, chip->start );
1451
02b15e34 1452 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1da177e4 1453 ENABLE_VPP(map);
02b15e34
TP
1454 xip_disable(map, chip, adr);
1455
1da177e4
LT
1456 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1457 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1458 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1459 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1460 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1461 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1462
1463 chip->state = FL_ERASING;
1464 chip->erase_suspended = 0;
1465 chip->in_progress_block_addr = adr;
1466
02b15e34
TP
1467 INVALIDATE_CACHE_UDELAY(map, chip,
1468 adr, map->size,
1469 chip->erase_time*500);
1da177e4
LT
1470
1471 timeo = jiffies + (HZ*20);
1472
1473 for (;;) {
1474 if (chip->state != FL_ERASING) {
1475 /* Someone's suspended the erase. Sleep */
1476 set_current_state(TASK_UNINTERRUPTIBLE);
1477 add_wait_queue(&chip->wq, &wait);
02b15e34 1478 spin_unlock(chip->mutex);
1da177e4
LT
1479 schedule();
1480 remove_wait_queue(&chip->wq, &wait);
02b15e34 1481 spin_lock(chip->mutex);
1da177e4
LT
1482 continue;
1483 }
1484 if (chip->erase_suspended) {
1485 /* This erase was suspended and resumed.
1486 Adjust the timeout */
1487 timeo = jiffies + (HZ*20); /* FIXME */
1488 chip->erase_suspended = 0;
1489 }
1490
1491 if (chip_ready(map, adr))
fb4a90bf 1492 break;
1da177e4 1493
fb4a90bf
EB
1494 if (time_after(jiffies, timeo)) {
1495 printk(KERN_WARNING "MTD %s(): software timeout\n",
1496 __func__ );
1da177e4 1497 break;
fb4a90bf 1498 }
1da177e4
LT
1499
1500 /* Latency issues. Drop the lock, wait a while and retry */
02b15e34 1501 UDELAY(map, chip, adr, 1000000/HZ);
1da177e4 1502 }
fb4a90bf
EB
1503 /* Did we succeed? */
1504 if (!chip_good(map, adr, map_word_ff(map))) {
1505 /* reset on all failures. */
1506 map_write( map, CMD(0xF0), chip->start );
1507 /* FIXME - should have reset delay before continuing */
1da177e4 1508
fb4a90bf
EB
1509 ret = -EIO;
1510 }
1da177e4 1511
1da177e4 1512 chip->state = FL_READY;
02b15e34 1513 xip_enable(map, chip, adr);
1da177e4 1514 put_chip(map, chip, adr);
02b15e34 1515 spin_unlock(chip->mutex);
1da177e4
LT
1516
1517 return ret;
1518}
1519
1520
02b15e34 1521static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1da177e4
LT
1522{
1523 struct cfi_private *cfi = map->fldrv_priv;
1524 unsigned long timeo = jiffies + HZ;
1525 DECLARE_WAITQUEUE(wait, current);
1526 int ret = 0;
1527
1528 adr += chip->start;
1529
02b15e34 1530 spin_lock(chip->mutex);
1da177e4
LT
1531 ret = get_chip(map, chip, adr, FL_ERASING);
1532 if (ret) {
02b15e34 1533 spin_unlock(chip->mutex);
1da177e4
LT
1534 return ret;
1535 }
1536
1537 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1538 __func__, adr );
1539
02b15e34 1540 XIP_INVAL_CACHED_RANGE(map, adr, len);
1da177e4 1541 ENABLE_VPP(map);
02b15e34
TP
1542 xip_disable(map, chip, adr);
1543
1da177e4
LT
1544 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1545 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1546 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1547 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1548 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1549 map_write(map, CMD(0x30), adr);
1550
1551 chip->state = FL_ERASING;
1552 chip->erase_suspended = 0;
1553 chip->in_progress_block_addr = adr;
02b15e34
TP
1554
1555 INVALIDATE_CACHE_UDELAY(map, chip,
1556 adr, len,
1557 chip->erase_time*500);
1da177e4
LT
1558
1559 timeo = jiffies + (HZ*20);
1560
1561 for (;;) {
1562 if (chip->state != FL_ERASING) {
1563 /* Someone's suspended the erase. Sleep */
1564 set_current_state(TASK_UNINTERRUPTIBLE);
1565 add_wait_queue(&chip->wq, &wait);
02b15e34 1566 spin_unlock(chip->mutex);
1da177e4
LT
1567 schedule();
1568 remove_wait_queue(&chip->wq, &wait);
02b15e34 1569 spin_lock(chip->mutex);
1da177e4
LT
1570 continue;
1571 }
1572 if (chip->erase_suspended) {
1573 /* This erase was suspended and resumed.
1574 Adjust the timeout */
1575 timeo = jiffies + (HZ*20); /* FIXME */
1576 chip->erase_suspended = 0;
1577 }
1578
02b15e34
TP
1579 if (chip_ready(map, adr)) {
1580 xip_enable(map, chip, adr);
fb4a90bf 1581 break;
02b15e34 1582 }
1da177e4 1583
fb4a90bf 1584 if (time_after(jiffies, timeo)) {
02b15e34 1585 xip_enable(map, chip, adr);
fb4a90bf
EB
1586 printk(KERN_WARNING "MTD %s(): software timeout\n",
1587 __func__ );
1da177e4 1588 break;
fb4a90bf 1589 }
1da177e4
LT
1590
1591 /* Latency issues. Drop the lock, wait a while and retry */
02b15e34 1592 UDELAY(map, chip, adr, 1000000/HZ);
1da177e4 1593 }
fb4a90bf 1594 /* Did we succeed? */
22fd9a87 1595 if (!chip_good(map, adr, map_word_ff(map))) {
fb4a90bf
EB
1596 /* reset on all failures. */
1597 map_write( map, CMD(0xF0), chip->start );
1598 /* FIXME - should have reset delay before continuing */
1599
1600 ret = -EIO;
1601 }
1da177e4 1602
1da177e4
LT
1603 chip->state = FL_READY;
1604 put_chip(map, chip, adr);
02b15e34 1605 spin_unlock(chip->mutex);
1da177e4
LT
1606 return ret;
1607}
1608
1609
1610int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1611{
1612 unsigned long ofs, len;
1613 int ret;
1614
1615 ofs = instr->addr;
1616 len = instr->len;
1617
1618 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1619 if (ret)
1620 return ret;
1621
1622 instr->state = MTD_ERASE_DONE;
1623 mtd_erase_callback(instr);
1f948b43 1624
1da177e4
LT
1625 return 0;
1626}
1627
1628
1629static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1630{
1631 struct map_info *map = mtd->priv;
1632 struct cfi_private *cfi = map->fldrv_priv;
1633 int ret = 0;
1634
1635 if (instr->addr != 0)
1636 return -EINVAL;
1637
1638 if (instr->len != mtd->size)
1639 return -EINVAL;
1640
1641 ret = do_erase_chip(map, &cfi->chips[0]);
1642 if (ret)
1643 return ret;
1644
1645 instr->state = MTD_ERASE_DONE;
1646 mtd_erase_callback(instr);
1f948b43 1647
1da177e4
LT
1648 return 0;
1649}
1650
0165508c
HS
1651static int do_atmel_lock(struct map_info *map, struct flchip *chip,
1652 unsigned long adr, int len, void *thunk)
1653{
1654 struct cfi_private *cfi = map->fldrv_priv;
1655 int ret;
1656
1657 spin_lock(chip->mutex);
1658 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
1659 if (ret)
1660 goto out_unlock;
1661 chip->state = FL_LOCKING;
1662
1663 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1664 __func__, adr, len);
1665
1666 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1667 cfi->device_type, NULL);
1668 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1669 cfi->device_type, NULL);
1670 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
1671 cfi->device_type, NULL);
1672 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1673 cfi->device_type, NULL);
1674 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1675 cfi->device_type, NULL);
1676 map_write(map, CMD(0x40), chip->start + adr);
1677
1678 chip->state = FL_READY;
1679 put_chip(map, chip, adr + chip->start);
1680 ret = 0;
1681
1682out_unlock:
1683 spin_unlock(chip->mutex);
1684 return ret;
1685}
1686
1687static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
1688 unsigned long adr, int len, void *thunk)
1689{
1690 struct cfi_private *cfi = map->fldrv_priv;
1691 int ret;
1692
1693 spin_lock(chip->mutex);
1694 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
1695 if (ret)
1696 goto out_unlock;
1697 chip->state = FL_UNLOCKING;
1698
1699 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1700 __func__, adr, len);
1701
1702 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1703 cfi->device_type, NULL);
1704 map_write(map, CMD(0x70), adr);
1705
1706 chip->state = FL_READY;
1707 put_chip(map, chip, adr + chip->start);
1708 ret = 0;
1709
1710out_unlock:
1711 spin_unlock(chip->mutex);
1712 return ret;
1713}
1714
1715static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1716{
1717 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
1718}
1719
1720static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1721{
1722 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
1723}
1724
1da177e4
LT
1725
1726static void cfi_amdstd_sync (struct mtd_info *mtd)
1727{
1728 struct map_info *map = mtd->priv;
1729 struct cfi_private *cfi = map->fldrv_priv;
1730 int i;
1731 struct flchip *chip;
1732 int ret = 0;
1733 DECLARE_WAITQUEUE(wait, current);
1734
1735 for (i=0; !ret && i<cfi->numchips; i++) {
1736 chip = &cfi->chips[i];
1737
1738 retry:
02b15e34 1739 spin_lock(chip->mutex);
1da177e4
LT
1740
1741 switch(chip->state) {
1742 case FL_READY:
1743 case FL_STATUS:
1744 case FL_CFI_QUERY:
1745 case FL_JEDEC_QUERY:
1746 chip->oldstate = chip->state;
1747 chip->state = FL_SYNCING;
1f948b43 1748 /* No need to wake_up() on this state change -
1da177e4
LT
1749 * as the whole point is that nobody can do anything
1750 * with the chip now anyway.
1751 */
1752 case FL_SYNCING:
02b15e34 1753 spin_unlock(chip->mutex);
1da177e4
LT
1754 break;
1755
1756 default:
1757 /* Not an idle state */
1758 add_wait_queue(&chip->wq, &wait);
1f948b43 1759
02b15e34 1760 spin_unlock(chip->mutex);
1da177e4
LT
1761
1762 schedule();
1763
1764 remove_wait_queue(&chip->wq, &wait);
1f948b43 1765
1da177e4
LT
1766 goto retry;
1767 }
1768 }
1769
1770 /* Unlock the chips again */
1771
1772 for (i--; i >=0; i--) {
1773 chip = &cfi->chips[i];
1774
02b15e34 1775 spin_lock(chip->mutex);
1f948b43 1776
1da177e4
LT
1777 if (chip->state == FL_SYNCING) {
1778 chip->state = chip->oldstate;
1779 wake_up(&chip->wq);
1780 }
02b15e34 1781 spin_unlock(chip->mutex);
1da177e4
LT
1782 }
1783}
1784
1785
1786static int cfi_amdstd_suspend(struct mtd_info *mtd)
1787{
1788 struct map_info *map = mtd->priv;
1789 struct cfi_private *cfi = map->fldrv_priv;
1790 int i;
1791 struct flchip *chip;
1792 int ret = 0;
1793
1794 for (i=0; !ret && i<cfi->numchips; i++) {
1795 chip = &cfi->chips[i];
1796
02b15e34 1797 spin_lock(chip->mutex);
1da177e4
LT
1798
1799 switch(chip->state) {
1800 case FL_READY:
1801 case FL_STATUS:
1802 case FL_CFI_QUERY:
1803 case FL_JEDEC_QUERY:
1804 chip->oldstate = chip->state;
1805 chip->state = FL_PM_SUSPENDED;
1f948b43 1806 /* No need to wake_up() on this state change -
1da177e4
LT
1807 * as the whole point is that nobody can do anything
1808 * with the chip now anyway.
1809 */
1810 case FL_PM_SUSPENDED:
1811 break;
1812
1813 default:
1814 ret = -EAGAIN;
1815 break;
1816 }
02b15e34 1817 spin_unlock(chip->mutex);
1da177e4
LT
1818 }
1819
1820 /* Unlock the chips again */
1821
1822 if (ret) {
1823 for (i--; i >=0; i--) {
1824 chip = &cfi->chips[i];
1825
02b15e34 1826 spin_lock(chip->mutex);
1f948b43 1827
1da177e4
LT
1828 if (chip->state == FL_PM_SUSPENDED) {
1829 chip->state = chip->oldstate;
1830 wake_up(&chip->wq);
1831 }
02b15e34 1832 spin_unlock(chip->mutex);
1da177e4
LT
1833 }
1834 }
1f948b43 1835
1da177e4
LT
1836 return ret;
1837}
1838
1839
1840static void cfi_amdstd_resume(struct mtd_info *mtd)
1841{
1842 struct map_info *map = mtd->priv;
1843 struct cfi_private *cfi = map->fldrv_priv;
1844 int i;
1845 struct flchip *chip;
1846
1847 for (i=0; i<cfi->numchips; i++) {
1f948b43 1848
1da177e4
LT
1849 chip = &cfi->chips[i];
1850
02b15e34 1851 spin_lock(chip->mutex);
1f948b43 1852
1da177e4
LT
1853 if (chip->state == FL_PM_SUSPENDED) {
1854 chip->state = FL_READY;
1855 map_write(map, CMD(0xF0), chip->start);
1856 wake_up(&chip->wq);
1857 }
1858 else
1859 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1860
02b15e34 1861 spin_unlock(chip->mutex);
1da177e4
LT
1862 }
1863}
1864
1865static void cfi_amdstd_destroy(struct mtd_info *mtd)
1866{
1867 struct map_info *map = mtd->priv;
1868 struct cfi_private *cfi = map->fldrv_priv;
fa671646 1869
1da177e4
LT
1870 kfree(cfi->cmdset_priv);
1871 kfree(cfi->cfiq);
1872 kfree(cfi);
1873 kfree(mtd->eraseregions);
1874}
1875
1da177e4
LT
1876MODULE_LICENSE("GPL");
1877MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1878MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
This page took 0.285136 seconds and 5 git commands to generate.