[MTD] NAND: Reorganize chip locking
[deliverable/linux.git] / drivers / mtd / chips / cfi_cmdset_0002.c
CommitLineData
1da177e4
LT
1/*
2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4 *
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7 *
8 * 2_by_8 routines added by Simon Munton
9 *
10 * 4_by_16 work by Carolyn J. Smith
11 *
12 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
13 *
14 * This code is GPL
15 *
22fd9a87 16 * $Id: cfi_cmdset_0002.c,v 1.116 2005/05/24 13:29:42 gleixner Exp $
1da177e4
LT
17 *
18 */
19
20#include <linux/config.h>
21#include <linux/module.h>
22#include <linux/types.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/init.h>
26#include <asm/io.h>
27#include <asm/byteorder.h>
28
29#include <linux/errno.h>
30#include <linux/slab.h>
31#include <linux/delay.h>
32#include <linux/interrupt.h>
33#include <linux/mtd/compatmac.h>
34#include <linux/mtd/map.h>
35#include <linux/mtd/mtd.h>
36#include <linux/mtd/cfi.h>
37
38#define AMD_BOOTLOC_BUG
39#define FORCE_WORD_WRITE 0
40
41#define MAX_WORD_RETRIES 3
42
43#define MANUFACTURER_AMD 0x0001
44#define MANUFACTURER_SST 0x00BF
45#define SST49LF004B 0x0060
fb4a90bf 46#define SST49LF008A 0x005a
1da177e4
LT
47
48static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
49static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
50static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
51static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
52static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
53static void cfi_amdstd_sync (struct mtd_info *);
54static int cfi_amdstd_suspend (struct mtd_info *);
55static void cfi_amdstd_resume (struct mtd_info *);
56static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
57
58static void cfi_amdstd_destroy(struct mtd_info *);
59
60struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
61static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
62
63static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
64static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
65#include "fwh_lock.h"
66
67static struct mtd_chip_driver cfi_amdstd_chipdrv = {
68 .probe = NULL, /* Not usable directly */
69 .destroy = cfi_amdstd_destroy,
70 .name = "cfi_cmdset_0002",
71 .module = THIS_MODULE
72};
73
74
75/* #define DEBUG_CFI_FEATURES */
76
77
78#ifdef DEBUG_CFI_FEATURES
79static void cfi_tell_features(struct cfi_pri_amdstd *extp)
80{
81 const char* erase_suspend[3] = {
82 "Not supported", "Read only", "Read/write"
83 };
84 const char* top_bottom[6] = {
85 "No WP", "8x8KiB sectors at top & bottom, no WP",
86 "Bottom boot", "Top boot",
87 "Uniform, Bottom WP", "Uniform, Top WP"
88 };
89
90 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
91 printk(" Address sensitive unlock: %s\n",
92 (extp->SiliconRevision & 1) ? "Not required" : "Required");
93
94 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
95 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
96 else
97 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
98
99 if (extp->BlkProt == 0)
100 printk(" Block protection: Not supported\n");
101 else
102 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
103
104
105 printk(" Temporary block unprotect: %s\n",
106 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
107 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
108 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
109 printk(" Burst mode: %s\n",
110 extp->BurstMode ? "Supported" : "Not supported");
111 if (extp->PageMode == 0)
112 printk(" Page mode: Not supported\n");
113 else
114 printk(" Page mode: %d word page\n", extp->PageMode << 2);
115
116 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
117 extp->VppMin >> 4, extp->VppMin & 0xf);
118 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
119 extp->VppMax >> 4, extp->VppMax & 0xf);
120
121 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
122 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
123 else
124 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
125}
126#endif
127
128#ifdef AMD_BOOTLOC_BUG
129/* Wheee. Bring me the head of someone at AMD. */
130static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
131{
132 struct map_info *map = mtd->priv;
133 struct cfi_private *cfi = map->fldrv_priv;
134 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
135 __u8 major = extp->MajorVersion;
136 __u8 minor = extp->MinorVersion;
137
138 if (((major << 8) | minor) < 0x3131) {
139 /* CFI version 1.0 => don't trust bootloc */
140 if (cfi->id & 0x80) {
141 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
142 extp->TopBottom = 3; /* top boot */
143 } else {
144 extp->TopBottom = 2; /* bottom boot */
145 }
146 }
147}
148#endif
149
150static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
151{
152 struct map_info *map = mtd->priv;
153 struct cfi_private *cfi = map->fldrv_priv;
154 if (cfi->cfiq->BufWriteTimeoutTyp) {
155 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
156 mtd->write = cfi_amdstd_write_buffers;
157 }
158}
159
160static void fixup_use_secsi(struct mtd_info *mtd, void *param)
161{
162 /* Setup for chips with a secsi area */
163 mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
164 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
165}
166
167static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
168{
169 struct map_info *map = mtd->priv;
170 struct cfi_private *cfi = map->fldrv_priv;
171 if ((cfi->cfiq->NumEraseRegions == 1) &&
172 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
173 mtd->erase = cfi_amdstd_erase_chip;
174 }
175
176}
177
178static struct cfi_fixup cfi_fixup_table[] = {
179#ifdef AMD_BOOTLOC_BUG
180 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
181#endif
182 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
183 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
184 { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
185 { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
186 { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
187 { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
188#if !FORCE_WORD_WRITE
189 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
190#endif
191 { 0, 0, NULL, NULL }
192};
193static struct cfi_fixup jedec_fixup_table[] = {
194 { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
fb4a90bf 195 { MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
1da177e4
LT
196 { 0, 0, NULL, NULL }
197};
198
199static struct cfi_fixup fixup_table[] = {
200 /* The CFI vendor ids and the JEDEC vendor IDs appear
201 * to be common. It is like the devices id's are as
202 * well. This table is to pick all cases where
203 * we know that is the case.
204 */
205 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
206 { 0, 0, NULL, NULL }
207};
208
209
210struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
211{
212 struct cfi_private *cfi = map->fldrv_priv;
213 struct mtd_info *mtd;
214 int i;
215
216 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
217 if (!mtd) {
218 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
219 return NULL;
220 }
221 memset(mtd, 0, sizeof(*mtd));
222 mtd->priv = map;
223 mtd->type = MTD_NORFLASH;
224
225 /* Fill in the default mtd operations */
226 mtd->erase = cfi_amdstd_erase_varsize;
227 mtd->write = cfi_amdstd_write_words;
228 mtd->read = cfi_amdstd_read;
229 mtd->sync = cfi_amdstd_sync;
230 mtd->suspend = cfi_amdstd_suspend;
231 mtd->resume = cfi_amdstd_resume;
232 mtd->flags = MTD_CAP_NORFLASH;
233 mtd->name = map->name;
234
235 if (cfi->cfi_mode==CFI_MODE_CFI){
236 unsigned char bootloc;
237 /*
238 * It's a real CFI chip, not one for which the probe
239 * routine faked a CFI structure. So we read the feature
240 * table from it.
241 */
242 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
243 struct cfi_pri_amdstd *extp;
244
245 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
246 if (!extp) {
247 kfree(mtd);
248 return NULL;
249 }
250
251 /* Install our own private info structure */
252 cfi->cmdset_priv = extp;
253
254 /* Apply cfi device specific fixups */
255 cfi_fixup(mtd, cfi_fixup_table);
256
257#ifdef DEBUG_CFI_FEATURES
258 /* Tell the user about it in lots of lovely detail */
259 cfi_tell_features(extp);
260#endif
261
262 bootloc = extp->TopBottom;
263 if ((bootloc != 2) && (bootloc != 3)) {
264 printk(KERN_WARNING "%s: CFI does not contain boot "
265 "bank location. Assuming top.\n", map->name);
266 bootloc = 2;
267 }
268
269 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
270 printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
271
272 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
273 int j = (cfi->cfiq->NumEraseRegions-1)-i;
274 __u32 swap;
275
276 swap = cfi->cfiq->EraseRegionInfo[i];
277 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
278 cfi->cfiq->EraseRegionInfo[j] = swap;
279 }
280 }
281 /* Set the default CFI lock/unlock addresses */
282 cfi->addr_unlock1 = 0x555;
283 cfi->addr_unlock2 = 0x2aa;
284 /* Modify the unlock address if we are in compatibility mode */
285 if ( /* x16 in x8 mode */
286 ((cfi->device_type == CFI_DEVICETYPE_X8) &&
287 (cfi->cfiq->InterfaceDesc == 2)) ||
288 /* x32 in x16 mode */
289 ((cfi->device_type == CFI_DEVICETYPE_X16) &&
290 (cfi->cfiq->InterfaceDesc == 4)))
291 {
292 cfi->addr_unlock1 = 0xaaa;
293 cfi->addr_unlock2 = 0x555;
294 }
295
296 } /* CFI mode */
297 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
298 /* Apply jedec specific fixups */
299 cfi_fixup(mtd, jedec_fixup_table);
300 }
301 /* Apply generic fixups */
302 cfi_fixup(mtd, fixup_table);
303
304 for (i=0; i< cfi->numchips; i++) {
305 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
306 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
307 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
308 }
309
310 map->fldrv = &cfi_amdstd_chipdrv;
311
312 return cfi_amdstd_setup(mtd);
313}
314
315
316static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
317{
318 struct map_info *map = mtd->priv;
319 struct cfi_private *cfi = map->fldrv_priv;
320 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
321 unsigned long offset = 0;
322 int i,j;
323
324 printk(KERN_NOTICE "number of %s chips: %d\n",
325 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
326 /* Select the correct geometry setup */
327 mtd->size = devsize * cfi->numchips;
328
329 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
330 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
331 * mtd->numeraseregions, GFP_KERNEL);
332 if (!mtd->eraseregions) {
333 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
334 goto setup_err;
335 }
336
337 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
338 unsigned long ernum, ersize;
339 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
340 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
341
342 if (mtd->erasesize < ersize) {
343 mtd->erasesize = ersize;
344 }
345 for (j=0; j<cfi->numchips; j++) {
346 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
347 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
348 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
349 }
350 offset += (ersize * ernum);
351 }
352 if (offset != devsize) {
353 /* Argh */
354 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
355 goto setup_err;
356 }
357#if 0
358 // debug
359 for (i=0; i<mtd->numeraseregions;i++){
360 printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
361 i,mtd->eraseregions[i].offset,
362 mtd->eraseregions[i].erasesize,
363 mtd->eraseregions[i].numblocks);
364 }
365#endif
366
367 /* FIXME: erase-suspend-program is broken. See
368 http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
369 printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
370
371 __module_get(THIS_MODULE);
372 return mtd;
373
374 setup_err:
375 if(mtd) {
376 if(mtd->eraseregions)
377 kfree(mtd->eraseregions);
378 kfree(mtd);
379 }
380 kfree(cfi->cmdset_priv);
381 kfree(cfi->cfiq);
382 return NULL;
383}
384
385/*
386 * Return true if the chip is ready.
387 *
388 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
389 * non-suspended sector) and is indicated by no toggle bits toggling.
390 *
391 * Note that anything more complicated than checking if no bits are toggling
392 * (including checking DQ5 for an error status) is tricky to get working
393 * correctly and is therefore not done (particulary with interleaved chips
394 * as each chip must be checked independantly of the others).
395 */
396static int chip_ready(struct map_info *map, unsigned long addr)
397{
398 map_word d, t;
399
400 d = map_read(map, addr);
401 t = map_read(map, addr);
402
403 return map_word_equal(map, d, t);
404}
405
fb4a90bf
EB
406/*
407 * Return true if the chip is ready and has the correct value.
408 *
409 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
410 * non-suspended sector) and it is indicated by no bits toggling.
411 *
412 * Error are indicated by toggling bits or bits held with the wrong value,
413 * or with bits toggling.
414 *
415 * Note that anything more complicated than checking if no bits are toggling
416 * (including checking DQ5 for an error status) is tricky to get working
417 * correctly and is therefore not done (particulary with interleaved chips
418 * as each chip must be checked independantly of the others).
419 *
420 */
421static int chip_good(struct map_info *map, unsigned long addr, map_word expected)
422{
423 map_word oldd, curd;
424
425 oldd = map_read(map, addr);
426 curd = map_read(map, addr);
427
428 return map_word_equal(map, oldd, curd) &&
429 map_word_equal(map, curd, expected);
430}
431
1da177e4
LT
432static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
433{
434 DECLARE_WAITQUEUE(wait, current);
435 struct cfi_private *cfi = map->fldrv_priv;
436 unsigned long timeo;
437 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
438
439 resettime:
440 timeo = jiffies + HZ;
441 retry:
442 switch (chip->state) {
443
444 case FL_STATUS:
445 for (;;) {
446 if (chip_ready(map, adr))
447 break;
448
449 if (time_after(jiffies, timeo)) {
450 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
451 cfi_spin_unlock(chip->mutex);
452 return -EIO;
453 }
454 cfi_spin_unlock(chip->mutex);
455 cfi_udelay(1);
456 cfi_spin_lock(chip->mutex);
457 /* Someone else might have been playing with it. */
458 goto retry;
459 }
460
461 case FL_READY:
462 case FL_CFI_QUERY:
463 case FL_JEDEC_QUERY:
464 return 0;
465
466 case FL_ERASING:
467 if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
468 goto sleep;
469
470 if (!(mode == FL_READY || mode == FL_POINT
471 || !cfip
472 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
473 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1))))
474 goto sleep;
475
476 /* We could check to see if we're trying to access the sector
477 * that is currently being erased. However, no user will try
478 * anything like that so we just wait for the timeout. */
479
480 /* Erase suspend */
481 /* It's harmless to issue the Erase-Suspend and Erase-Resume
482 * commands when the erase algorithm isn't in progress. */
483 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
484 chip->oldstate = FL_ERASING;
485 chip->state = FL_ERASE_SUSPENDING;
486 chip->erase_suspended = 1;
487 for (;;) {
488 if (chip_ready(map, adr))
489 break;
490
491 if (time_after(jiffies, timeo)) {
492 /* Should have suspended the erase by now.
493 * Send an Erase-Resume command as either
494 * there was an error (so leave the erase
495 * routine to recover from it) or we trying to
496 * use the erase-in-progress sector. */
497 map_write(map, CMD(0x30), chip->in_progress_block_addr);
498 chip->state = FL_ERASING;
499 chip->oldstate = FL_READY;
500 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
501 return -EIO;
502 }
503
504 cfi_spin_unlock(chip->mutex);
505 cfi_udelay(1);
506 cfi_spin_lock(chip->mutex);
507 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
508 So we can just loop here. */
509 }
510 chip->state = FL_READY;
511 return 0;
512
513 case FL_POINT:
514 /* Only if there's no operation suspended... */
515 if (mode == FL_READY && chip->oldstate == FL_READY)
516 return 0;
517
518 default:
519 sleep:
520 set_current_state(TASK_UNINTERRUPTIBLE);
521 add_wait_queue(&chip->wq, &wait);
522 cfi_spin_unlock(chip->mutex);
523 schedule();
524 remove_wait_queue(&chip->wq, &wait);
525 cfi_spin_lock(chip->mutex);
526 goto resettime;
527 }
528}
529
530
531static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
532{
533 struct cfi_private *cfi = map->fldrv_priv;
534
535 switch(chip->oldstate) {
536 case FL_ERASING:
537 chip->state = chip->oldstate;
538 map_write(map, CMD(0x30), chip->in_progress_block_addr);
539 chip->oldstate = FL_READY;
540 chip->state = FL_ERASING;
541 break;
542
543 case FL_READY:
544 case FL_STATUS:
545 /* We should really make set_vpp() count, rather than doing this */
546 DISABLE_VPP(map);
547 break;
548 default:
549 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
550 }
551 wake_up(&chip->wq);
552}
553
554
555static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
556{
557 unsigned long cmd_addr;
558 struct cfi_private *cfi = map->fldrv_priv;
559 int ret;
560
561 adr += chip->start;
562
563 /* Ensure cmd read/writes are aligned. */
564 cmd_addr = adr & ~(map_bankwidth(map)-1);
565
566 cfi_spin_lock(chip->mutex);
567 ret = get_chip(map, chip, cmd_addr, FL_READY);
568 if (ret) {
569 cfi_spin_unlock(chip->mutex);
570 return ret;
571 }
572
573 if (chip->state != FL_POINT && chip->state != FL_READY) {
574 map_write(map, CMD(0xf0), cmd_addr);
575 chip->state = FL_READY;
576 }
577
578 map_copy_from(map, buf, adr, len);
579
580 put_chip(map, chip, cmd_addr);
581
582 cfi_spin_unlock(chip->mutex);
583 return 0;
584}
585
586
587static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
588{
589 struct map_info *map = mtd->priv;
590 struct cfi_private *cfi = map->fldrv_priv;
591 unsigned long ofs;
592 int chipnum;
593 int ret = 0;
594
595 /* ofs: offset within the first chip that the first read should start */
596
597 chipnum = (from >> cfi->chipshift);
598 ofs = from - (chipnum << cfi->chipshift);
599
600
601 *retlen = 0;
602
603 while (len) {
604 unsigned long thislen;
605
606 if (chipnum >= cfi->numchips)
607 break;
608
609 if ((len + ofs -1) >> cfi->chipshift)
610 thislen = (1<<cfi->chipshift) - ofs;
611 else
612 thislen = len;
613
614 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
615 if (ret)
616 break;
617
618 *retlen += thislen;
619 len -= thislen;
620 buf += thislen;
621
622 ofs = 0;
623 chipnum++;
624 }
625 return ret;
626}
627
628
629static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
630{
631 DECLARE_WAITQUEUE(wait, current);
632 unsigned long timeo = jiffies + HZ;
633 struct cfi_private *cfi = map->fldrv_priv;
634
635 retry:
636 cfi_spin_lock(chip->mutex);
637
638 if (chip->state != FL_READY){
639#if 0
640 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
641#endif
642 set_current_state(TASK_UNINTERRUPTIBLE);
643 add_wait_queue(&chip->wq, &wait);
644
645 cfi_spin_unlock(chip->mutex);
646
647 schedule();
648 remove_wait_queue(&chip->wq, &wait);
649#if 0
650 if(signal_pending(current))
651 return -EINTR;
652#endif
653 timeo = jiffies + HZ;
654
655 goto retry;
656 }
657
658 adr += chip->start;
659
660 chip->state = FL_READY;
661
662 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
663 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
664 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
665
666 map_copy_from(map, buf, adr, len);
667
668 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
669 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
670 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
671 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
672
673 wake_up(&chip->wq);
674 cfi_spin_unlock(chip->mutex);
675
676 return 0;
677}
678
679static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
680{
681 struct map_info *map = mtd->priv;
682 struct cfi_private *cfi = map->fldrv_priv;
683 unsigned long ofs;
684 int chipnum;
685 int ret = 0;
686
687
688 /* ofs: offset within the first chip that the first read should start */
689
690 /* 8 secsi bytes per chip */
691 chipnum=from>>3;
692 ofs=from & 7;
693
694
695 *retlen = 0;
696
697 while (len) {
698 unsigned long thislen;
699
700 if (chipnum >= cfi->numchips)
701 break;
702
703 if ((len + ofs -1) >> 3)
704 thislen = (1<<3) - ofs;
705 else
706 thislen = len;
707
708 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
709 if (ret)
710 break;
711
712 *retlen += thislen;
713 len -= thislen;
714 buf += thislen;
715
716 ofs = 0;
717 chipnum++;
718 }
719 return ret;
720}
721
722
723static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
724{
725 struct cfi_private *cfi = map->fldrv_priv;
726 unsigned long timeo = jiffies + HZ;
727 /*
728 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
729 * have a max write time of a few hundreds usec). However, we should
730 * use the maximum timeout value given by the chip at probe time
731 * instead. Unfortunately, struct flchip does have a field for
732 * maximum timeout, only for typical which can be far too short
733 * depending of the conditions. The ' + 1' is to avoid having a
734 * timeout of 0 jiffies if HZ is smaller than 1000.
735 */
736 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
737 int ret = 0;
738 map_word oldd;
739 int retry_cnt = 0;
740
741 adr += chip->start;
742
743 cfi_spin_lock(chip->mutex);
744 ret = get_chip(map, chip, adr, FL_WRITING);
745 if (ret) {
746 cfi_spin_unlock(chip->mutex);
747 return ret;
748 }
749
750 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
751 __func__, adr, datum.x[0] );
752
753 /*
754 * Check for a NOP for the case when the datum to write is already
755 * present - it saves time and works around buggy chips that corrupt
756 * data at other locations when 0xff is written to a location that
757 * already contains 0xff.
758 */
759 oldd = map_read(map, adr);
760 if (map_word_equal(map, oldd, datum)) {
761 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
762 __func__);
763 goto op_done;
764 }
765
766 ENABLE_VPP(map);
767 retry:
768 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
769 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
770 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
771 map_write(map, datum, adr);
772 chip->state = FL_WRITING;
773
774 cfi_spin_unlock(chip->mutex);
775 cfi_udelay(chip->word_write_time);
776 cfi_spin_lock(chip->mutex);
777
778 /* See comment above for timeout value. */
779 timeo = jiffies + uWriteTimeout;
780 for (;;) {
781 if (chip->state != FL_WRITING) {
782 /* Someone's suspended the write. Sleep */
783 DECLARE_WAITQUEUE(wait, current);
784
785 set_current_state(TASK_UNINTERRUPTIBLE);
786 add_wait_queue(&chip->wq, &wait);
787 cfi_spin_unlock(chip->mutex);
788 schedule();
789 remove_wait_queue(&chip->wq, &wait);
790 timeo = jiffies + (HZ / 2); /* FIXME */
791 cfi_spin_lock(chip->mutex);
792 continue;
793 }
794
795 if (chip_ready(map, adr))
fb4a90bf 796 break;
1da177e4 797
fb4a90bf
EB
798 if (time_after(jiffies, timeo)) {
799 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1da177e4 800 break;
fb4a90bf 801 }
1da177e4
LT
802
803 /* Latency issues. Drop the lock, wait a while and retry */
804 cfi_spin_unlock(chip->mutex);
805 cfi_udelay(1);
806 cfi_spin_lock(chip->mutex);
807 }
fb4a90bf
EB
808 /* Did we succeed? */
809 if (!chip_good(map, adr, datum)) {
810 /* reset on all failures. */
811 map_write( map, CMD(0xF0), chip->start );
812 /* FIXME - should have reset delay before continuing */
1da177e4 813
fb4a90bf
EB
814 if (++retry_cnt <= MAX_WORD_RETRIES)
815 goto retry;
1da177e4 816
fb4a90bf
EB
817 ret = -EIO;
818 }
1da177e4
LT
819 op_done:
820 chip->state = FL_READY;
821 put_chip(map, chip, adr);
822 cfi_spin_unlock(chip->mutex);
823
824 return ret;
825}
826
827
828static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
829 size_t *retlen, const u_char *buf)
830{
831 struct map_info *map = mtd->priv;
832 struct cfi_private *cfi = map->fldrv_priv;
833 int ret = 0;
834 int chipnum;
835 unsigned long ofs, chipstart;
836 DECLARE_WAITQUEUE(wait, current);
837
838 *retlen = 0;
839 if (!len)
840 return 0;
841
842 chipnum = to >> cfi->chipshift;
843 ofs = to - (chipnum << cfi->chipshift);
844 chipstart = cfi->chips[chipnum].start;
845
846 /* If it's not bus-aligned, do the first byte write */
847 if (ofs & (map_bankwidth(map)-1)) {
848 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
849 int i = ofs - bus_ofs;
850 int n = 0;
851 map_word tmp_buf;
852
853 retry:
854 cfi_spin_lock(cfi->chips[chipnum].mutex);
855
856 if (cfi->chips[chipnum].state != FL_READY) {
857#if 0
858 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
859#endif
860 set_current_state(TASK_UNINTERRUPTIBLE);
861 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
862
863 cfi_spin_unlock(cfi->chips[chipnum].mutex);
864
865 schedule();
866 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
867#if 0
868 if(signal_pending(current))
869 return -EINTR;
870#endif
871 goto retry;
872 }
873
874 /* Load 'tmp_buf' with old contents of flash */
875 tmp_buf = map_read(map, bus_ofs+chipstart);
876
877 cfi_spin_unlock(cfi->chips[chipnum].mutex);
878
879 /* Number of bytes to copy from buffer */
880 n = min_t(int, len, map_bankwidth(map)-i);
881
882 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
883
884 ret = do_write_oneword(map, &cfi->chips[chipnum],
885 bus_ofs, tmp_buf);
886 if (ret)
887 return ret;
888
889 ofs += n;
890 buf += n;
891 (*retlen) += n;
892 len -= n;
893
894 if (ofs >> cfi->chipshift) {
895 chipnum ++;
896 ofs = 0;
897 if (chipnum == cfi->numchips)
898 return 0;
899 }
900 }
901
902 /* We are now aligned, write as much as possible */
903 while(len >= map_bankwidth(map)) {
904 map_word datum;
905
906 datum = map_word_load(map, buf);
907
908 ret = do_write_oneword(map, &cfi->chips[chipnum],
909 ofs, datum);
910 if (ret)
911 return ret;
912
913 ofs += map_bankwidth(map);
914 buf += map_bankwidth(map);
915 (*retlen) += map_bankwidth(map);
916 len -= map_bankwidth(map);
917
918 if (ofs >> cfi->chipshift) {
919 chipnum ++;
920 ofs = 0;
921 if (chipnum == cfi->numchips)
922 return 0;
923 chipstart = cfi->chips[chipnum].start;
924 }
925 }
926
927 /* Write the trailing bytes if any */
928 if (len & (map_bankwidth(map)-1)) {
929 map_word tmp_buf;
930
931 retry1:
932 cfi_spin_lock(cfi->chips[chipnum].mutex);
933
934 if (cfi->chips[chipnum].state != FL_READY) {
935#if 0
936 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
937#endif
938 set_current_state(TASK_UNINTERRUPTIBLE);
939 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
940
941 cfi_spin_unlock(cfi->chips[chipnum].mutex);
942
943 schedule();
944 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
945#if 0
946 if(signal_pending(current))
947 return -EINTR;
948#endif
949 goto retry1;
950 }
951
952 tmp_buf = map_read(map, ofs + chipstart);
953
954 cfi_spin_unlock(cfi->chips[chipnum].mutex);
955
956 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
957
958 ret = do_write_oneword(map, &cfi->chips[chipnum],
959 ofs, tmp_buf);
960 if (ret)
961 return ret;
962
963 (*retlen) += len;
964 }
965
966 return 0;
967}
968
969
970/*
971 * FIXME: interleaved mode not tested, and probably not supported!
972 */
973static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
974 unsigned long adr, const u_char *buf, int len)
975{
976 struct cfi_private *cfi = map->fldrv_priv;
977 unsigned long timeo = jiffies + HZ;
978 /* see comments in do_write_oneword() regarding uWriteTimeo. */
979 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
980 int ret = -EIO;
981 unsigned long cmd_adr;
982 int z, words;
983 map_word datum;
984
985 adr += chip->start;
986 cmd_adr = adr;
987
988 cfi_spin_lock(chip->mutex);
989 ret = get_chip(map, chip, adr, FL_WRITING);
990 if (ret) {
991 cfi_spin_unlock(chip->mutex);
992 return ret;
993 }
994
995 datum = map_word_load(map, buf);
996
997 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
998 __func__, adr, datum.x[0] );
999
1000 ENABLE_VPP(map);
1001 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1002 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1003 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1004
1005 /* Write Buffer Load */
1006 map_write(map, CMD(0x25), cmd_adr);
1007
1008 chip->state = FL_WRITING_TO_BUFFER;
1009
1010 /* Write length of data to come */
1011 words = len / map_bankwidth(map);
1012 map_write(map, CMD(words - 1), cmd_adr);
1013 /* Write data */
1014 z = 0;
1015 while(z < words * map_bankwidth(map)) {
1016 datum = map_word_load(map, buf);
1017 map_write(map, datum, adr + z);
1018
1019 z += map_bankwidth(map);
1020 buf += map_bankwidth(map);
1021 }
1022 z -= map_bankwidth(map);
1023
1024 adr += z;
1025
1026 /* Write Buffer Program Confirm: GO GO GO */
1027 map_write(map, CMD(0x29), cmd_adr);
1028 chip->state = FL_WRITING;
1029
1030 cfi_spin_unlock(chip->mutex);
1031 cfi_udelay(chip->buffer_write_time);
1032 cfi_spin_lock(chip->mutex);
1033
1034 timeo = jiffies + uWriteTimeout;
1035
1036 for (;;) {
1037 if (chip->state != FL_WRITING) {
1038 /* Someone's suspended the write. Sleep */
1039 DECLARE_WAITQUEUE(wait, current);
1040
1041 set_current_state(TASK_UNINTERRUPTIBLE);
1042 add_wait_queue(&chip->wq, &wait);
1043 cfi_spin_unlock(chip->mutex);
1044 schedule();
1045 remove_wait_queue(&chip->wq, &wait);
1046 timeo = jiffies + (HZ / 2); /* FIXME */
1047 cfi_spin_lock(chip->mutex);
1048 continue;
1049 }
1050
1051 if (chip_ready(map, adr))
1052 goto op_done;
1053
1054 if( time_after(jiffies, timeo))
1055 break;
1056
1057 /* Latency issues. Drop the lock, wait a while and retry */
1058 cfi_spin_unlock(chip->mutex);
1059 cfi_udelay(1);
1060 cfi_spin_lock(chip->mutex);
1061 }
1062
1063 printk(KERN_WARNING "MTD %s(): software timeout\n",
1064 __func__ );
1065
1066 /* reset on all failures. */
1067 map_write( map, CMD(0xF0), chip->start );
1068 /* FIXME - should have reset delay before continuing */
1069
1070 ret = -EIO;
1071 op_done:
1072 chip->state = FL_READY;
1073 put_chip(map, chip, adr);
1074 cfi_spin_unlock(chip->mutex);
1075
1076 return ret;
1077}
1078
1079
1080static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1081 size_t *retlen, const u_char *buf)
1082{
1083 struct map_info *map = mtd->priv;
1084 struct cfi_private *cfi = map->fldrv_priv;
1085 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1086 int ret = 0;
1087 int chipnum;
1088 unsigned long ofs;
1089
1090 *retlen = 0;
1091 if (!len)
1092 return 0;
1093
1094 chipnum = to >> cfi->chipshift;
1095 ofs = to - (chipnum << cfi->chipshift);
1096
1097 /* If it's not bus-aligned, do the first word write */
1098 if (ofs & (map_bankwidth(map)-1)) {
1099 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1100 if (local_len > len)
1101 local_len = len;
1102 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1103 local_len, retlen, buf);
1104 if (ret)
1105 return ret;
1106 ofs += local_len;
1107 buf += local_len;
1108 len -= local_len;
1109
1110 if (ofs >> cfi->chipshift) {
1111 chipnum ++;
1112 ofs = 0;
1113 if (chipnum == cfi->numchips)
1114 return 0;
1115 }
1116 }
1117
1118 /* Write buffer is worth it only if more than one word to write... */
1119 while (len >= map_bankwidth(map) * 2) {
1120 /* We must not cross write block boundaries */
1121 int size = wbufsize - (ofs & (wbufsize-1));
1122
1123 if (size > len)
1124 size = len;
1125 if (size % map_bankwidth(map))
1126 size -= size % map_bankwidth(map);
1127
1128 ret = do_write_buffer(map, &cfi->chips[chipnum],
1129 ofs, buf, size);
1130 if (ret)
1131 return ret;
1132
1133 ofs += size;
1134 buf += size;
1135 (*retlen) += size;
1136 len -= size;
1137
1138 if (ofs >> cfi->chipshift) {
1139 chipnum ++;
1140 ofs = 0;
1141 if (chipnum == cfi->numchips)
1142 return 0;
1143 }
1144 }
1145
1146 if (len) {
1147 size_t retlen_dregs = 0;
1148
1149 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1150 len, &retlen_dregs, buf);
1151
1152 *retlen += retlen_dregs;
1153 return ret;
1154 }
1155
1156 return 0;
1157}
1158
1159
1160/*
1161 * Handle devices with one erase region, that only implement
1162 * the chip erase command.
1163 */
1164static inline int do_erase_chip(struct map_info *map, struct flchip *chip)
1165{
1166 struct cfi_private *cfi = map->fldrv_priv;
1167 unsigned long timeo = jiffies + HZ;
1168 unsigned long int adr;
1169 DECLARE_WAITQUEUE(wait, current);
1170 int ret = 0;
1171
1172 adr = cfi->addr_unlock1;
1173
1174 cfi_spin_lock(chip->mutex);
1175 ret = get_chip(map, chip, adr, FL_WRITING);
1176 if (ret) {
1177 cfi_spin_unlock(chip->mutex);
1178 return ret;
1179 }
1180
1181 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1182 __func__, chip->start );
1183
1184 ENABLE_VPP(map);
1185 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1186 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1187 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1188 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1189 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1190 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1191
1192 chip->state = FL_ERASING;
1193 chip->erase_suspended = 0;
1194 chip->in_progress_block_addr = adr;
1195
1196 cfi_spin_unlock(chip->mutex);
1197 msleep(chip->erase_time/2);
1198 cfi_spin_lock(chip->mutex);
1199
1200 timeo = jiffies + (HZ*20);
1201
1202 for (;;) {
1203 if (chip->state != FL_ERASING) {
1204 /* Someone's suspended the erase. Sleep */
1205 set_current_state(TASK_UNINTERRUPTIBLE);
1206 add_wait_queue(&chip->wq, &wait);
1207 cfi_spin_unlock(chip->mutex);
1208 schedule();
1209 remove_wait_queue(&chip->wq, &wait);
1210 cfi_spin_lock(chip->mutex);
1211 continue;
1212 }
1213 if (chip->erase_suspended) {
1214 /* This erase was suspended and resumed.
1215 Adjust the timeout */
1216 timeo = jiffies + (HZ*20); /* FIXME */
1217 chip->erase_suspended = 0;
1218 }
1219
1220 if (chip_ready(map, adr))
fb4a90bf 1221 break;
1da177e4 1222
fb4a90bf
EB
1223 if (time_after(jiffies, timeo)) {
1224 printk(KERN_WARNING "MTD %s(): software timeout\n",
1225 __func__ );
1da177e4 1226 break;
fb4a90bf 1227 }
1da177e4
LT
1228
1229 /* Latency issues. Drop the lock, wait a while and retry */
1230 cfi_spin_unlock(chip->mutex);
1231 set_current_state(TASK_UNINTERRUPTIBLE);
1232 schedule_timeout(1);
1233 cfi_spin_lock(chip->mutex);
1234 }
fb4a90bf
EB
1235 /* Did we succeed? */
1236 if (!chip_good(map, adr, map_word_ff(map))) {
1237 /* reset on all failures. */
1238 map_write( map, CMD(0xF0), chip->start );
1239 /* FIXME - should have reset delay before continuing */
1da177e4 1240
fb4a90bf
EB
1241 ret = -EIO;
1242 }
1da177e4 1243
1da177e4
LT
1244 chip->state = FL_READY;
1245 put_chip(map, chip, adr);
1246 cfi_spin_unlock(chip->mutex);
1247
1248 return ret;
1249}
1250
1251
1252static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1253{
1254 struct cfi_private *cfi = map->fldrv_priv;
1255 unsigned long timeo = jiffies + HZ;
1256 DECLARE_WAITQUEUE(wait, current);
1257 int ret = 0;
1258
1259 adr += chip->start;
1260
1261 cfi_spin_lock(chip->mutex);
1262 ret = get_chip(map, chip, adr, FL_ERASING);
1263 if (ret) {
1264 cfi_spin_unlock(chip->mutex);
1265 return ret;
1266 }
1267
1268 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1269 __func__, adr );
1270
1271 ENABLE_VPP(map);
1272 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1273 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1274 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1275 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1276 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1277 map_write(map, CMD(0x30), adr);
1278
1279 chip->state = FL_ERASING;
1280 chip->erase_suspended = 0;
1281 chip->in_progress_block_addr = adr;
1282
1283 cfi_spin_unlock(chip->mutex);
1284 msleep(chip->erase_time/2);
1285 cfi_spin_lock(chip->mutex);
1286
1287 timeo = jiffies + (HZ*20);
1288
1289 for (;;) {
1290 if (chip->state != FL_ERASING) {
1291 /* Someone's suspended the erase. Sleep */
1292 set_current_state(TASK_UNINTERRUPTIBLE);
1293 add_wait_queue(&chip->wq, &wait);
1294 cfi_spin_unlock(chip->mutex);
1295 schedule();
1296 remove_wait_queue(&chip->wq, &wait);
1297 cfi_spin_lock(chip->mutex);
1298 continue;
1299 }
1300 if (chip->erase_suspended) {
1301 /* This erase was suspended and resumed.
1302 Adjust the timeout */
1303 timeo = jiffies + (HZ*20); /* FIXME */
1304 chip->erase_suspended = 0;
1305 }
1306
1307 if (chip_ready(map, adr))
fb4a90bf 1308 break;
1da177e4 1309
fb4a90bf
EB
1310 if (time_after(jiffies, timeo)) {
1311 printk(KERN_WARNING "MTD %s(): software timeout\n",
1312 __func__ );
1da177e4 1313 break;
fb4a90bf 1314 }
1da177e4
LT
1315
1316 /* Latency issues. Drop the lock, wait a while and retry */
1317 cfi_spin_unlock(chip->mutex);
1318 set_current_state(TASK_UNINTERRUPTIBLE);
1319 schedule_timeout(1);
1320 cfi_spin_lock(chip->mutex);
1321 }
fb4a90bf 1322 /* Did we succeed? */
22fd9a87 1323 if (!chip_good(map, adr, map_word_ff(map))) {
fb4a90bf
EB
1324 /* reset on all failures. */
1325 map_write( map, CMD(0xF0), chip->start );
1326 /* FIXME - should have reset delay before continuing */
1327
1328 ret = -EIO;
1329 }
1da177e4 1330
1da177e4
LT
1331 chip->state = FL_READY;
1332 put_chip(map, chip, adr);
1333 cfi_spin_unlock(chip->mutex);
1334 return ret;
1335}
1336
1337
1338int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1339{
1340 unsigned long ofs, len;
1341 int ret;
1342
1343 ofs = instr->addr;
1344 len = instr->len;
1345
1346 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1347 if (ret)
1348 return ret;
1349
1350 instr->state = MTD_ERASE_DONE;
1351 mtd_erase_callback(instr);
1352
1353 return 0;
1354}
1355
1356
1357static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1358{
1359 struct map_info *map = mtd->priv;
1360 struct cfi_private *cfi = map->fldrv_priv;
1361 int ret = 0;
1362
1363 if (instr->addr != 0)
1364 return -EINVAL;
1365
1366 if (instr->len != mtd->size)
1367 return -EINVAL;
1368
1369 ret = do_erase_chip(map, &cfi->chips[0]);
1370 if (ret)
1371 return ret;
1372
1373 instr->state = MTD_ERASE_DONE;
1374 mtd_erase_callback(instr);
1375
1376 return 0;
1377}
1378
1379
1380static void cfi_amdstd_sync (struct mtd_info *mtd)
1381{
1382 struct map_info *map = mtd->priv;
1383 struct cfi_private *cfi = map->fldrv_priv;
1384 int i;
1385 struct flchip *chip;
1386 int ret = 0;
1387 DECLARE_WAITQUEUE(wait, current);
1388
1389 for (i=0; !ret && i<cfi->numchips; i++) {
1390 chip = &cfi->chips[i];
1391
1392 retry:
1393 cfi_spin_lock(chip->mutex);
1394
1395 switch(chip->state) {
1396 case FL_READY:
1397 case FL_STATUS:
1398 case FL_CFI_QUERY:
1399 case FL_JEDEC_QUERY:
1400 chip->oldstate = chip->state;
1401 chip->state = FL_SYNCING;
1402 /* No need to wake_up() on this state change -
1403 * as the whole point is that nobody can do anything
1404 * with the chip now anyway.
1405 */
1406 case FL_SYNCING:
1407 cfi_spin_unlock(chip->mutex);
1408 break;
1409
1410 default:
1411 /* Not an idle state */
1412 add_wait_queue(&chip->wq, &wait);
1413
1414 cfi_spin_unlock(chip->mutex);
1415
1416 schedule();
1417
1418 remove_wait_queue(&chip->wq, &wait);
1419
1420 goto retry;
1421 }
1422 }
1423
1424 /* Unlock the chips again */
1425
1426 for (i--; i >=0; i--) {
1427 chip = &cfi->chips[i];
1428
1429 cfi_spin_lock(chip->mutex);
1430
1431 if (chip->state == FL_SYNCING) {
1432 chip->state = chip->oldstate;
1433 wake_up(&chip->wq);
1434 }
1435 cfi_spin_unlock(chip->mutex);
1436 }
1437}
1438
1439
1440static int cfi_amdstd_suspend(struct mtd_info *mtd)
1441{
1442 struct map_info *map = mtd->priv;
1443 struct cfi_private *cfi = map->fldrv_priv;
1444 int i;
1445 struct flchip *chip;
1446 int ret = 0;
1447
1448 for (i=0; !ret && i<cfi->numchips; i++) {
1449 chip = &cfi->chips[i];
1450
1451 cfi_spin_lock(chip->mutex);
1452
1453 switch(chip->state) {
1454 case FL_READY:
1455 case FL_STATUS:
1456 case FL_CFI_QUERY:
1457 case FL_JEDEC_QUERY:
1458 chip->oldstate = chip->state;
1459 chip->state = FL_PM_SUSPENDED;
1460 /* No need to wake_up() on this state change -
1461 * as the whole point is that nobody can do anything
1462 * with the chip now anyway.
1463 */
1464 case FL_PM_SUSPENDED:
1465 break;
1466
1467 default:
1468 ret = -EAGAIN;
1469 break;
1470 }
1471 cfi_spin_unlock(chip->mutex);
1472 }
1473
1474 /* Unlock the chips again */
1475
1476 if (ret) {
1477 for (i--; i >=0; i--) {
1478 chip = &cfi->chips[i];
1479
1480 cfi_spin_lock(chip->mutex);
1481
1482 if (chip->state == FL_PM_SUSPENDED) {
1483 chip->state = chip->oldstate;
1484 wake_up(&chip->wq);
1485 }
1486 cfi_spin_unlock(chip->mutex);
1487 }
1488 }
1489
1490 return ret;
1491}
1492
1493
1494static void cfi_amdstd_resume(struct mtd_info *mtd)
1495{
1496 struct map_info *map = mtd->priv;
1497 struct cfi_private *cfi = map->fldrv_priv;
1498 int i;
1499 struct flchip *chip;
1500
1501 for (i=0; i<cfi->numchips; i++) {
1502
1503 chip = &cfi->chips[i];
1504
1505 cfi_spin_lock(chip->mutex);
1506
1507 if (chip->state == FL_PM_SUSPENDED) {
1508 chip->state = FL_READY;
1509 map_write(map, CMD(0xF0), chip->start);
1510 wake_up(&chip->wq);
1511 }
1512 else
1513 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1514
1515 cfi_spin_unlock(chip->mutex);
1516 }
1517}
1518
1519static void cfi_amdstd_destroy(struct mtd_info *mtd)
1520{
1521 struct map_info *map = mtd->priv;
1522 struct cfi_private *cfi = map->fldrv_priv;
1523 kfree(cfi->cmdset_priv);
1524 kfree(cfi->cfiq);
1525 kfree(cfi);
1526 kfree(mtd->eraseregions);
1527}
1528
1529static char im_name[]="cfi_cmdset_0002";
1530
1531
1532static int __init cfi_amdstd_init(void)
1533{
1534 inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0002);
1535 return 0;
1536}
1537
1538
1539static void __exit cfi_amdstd_exit(void)
1540{
1541 inter_module_unregister(im_name);
1542}
1543
1544
1545module_init(cfi_amdstd_init);
1546module_exit(cfi_amdstd_exit);
1547
1548MODULE_LICENSE("GPL");
1549MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1550MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
This page took 0.092736 seconds and 5 git commands to generate.