[PATCH] kfree cleanup: drivers/mtd
[deliverable/linux.git] / drivers / mtd / chips / cfi_cmdset_0002.c
1 /*
2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4 *
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
8 *
9 * 2_by_8 routines added by Simon Munton
10 *
11 * 4_by_16 work by Carolyn J. Smith
12 *
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
14 * by Nicolas Pitre)
15 *
16 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
17 *
18 * This code is GPL
19 *
20 * $Id: cfi_cmdset_0002.c,v 1.118 2005/07/04 22:34:29 gleixner Exp $
21 *
22 */
23
24 #include <linux/config.h>
25 #include <linux/module.h>
26 #include <linux/types.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/init.h>
30 #include <asm/io.h>
31 #include <asm/byteorder.h>
32
33 #include <linux/errno.h>
34 #include <linux/slab.h>
35 #include <linux/delay.h>
36 #include <linux/interrupt.h>
37 #include <linux/mtd/compatmac.h>
38 #include <linux/mtd/map.h>
39 #include <linux/mtd/mtd.h>
40 #include <linux/mtd/cfi.h>
41 #include <linux/mtd/xip.h>
42
43 #define AMD_BOOTLOC_BUG
44 #define FORCE_WORD_WRITE 0
45
46 #define MAX_WORD_RETRIES 3
47
48 #define MANUFACTURER_AMD 0x0001
49 #define MANUFACTURER_SST 0x00BF
50 #define SST49LF004B 0x0060
51 #define SST49LF008A 0x005a
52
53 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
54 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
55 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
56 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
57 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
58 static void cfi_amdstd_sync (struct mtd_info *);
59 static int cfi_amdstd_suspend (struct mtd_info *);
60 static void cfi_amdstd_resume (struct mtd_info *);
61 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62
63 static void cfi_amdstd_destroy(struct mtd_info *);
64
65 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
66 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
67
68 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
69 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
70 #include "fwh_lock.h"
71
72 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
73 .probe = NULL, /* Not usable directly */
74 .destroy = cfi_amdstd_destroy,
75 .name = "cfi_cmdset_0002",
76 .module = THIS_MODULE
77 };
78
79
80 /* #define DEBUG_CFI_FEATURES */
81
82
83 #ifdef DEBUG_CFI_FEATURES
84 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
85 {
86 const char* erase_suspend[3] = {
87 "Not supported", "Read only", "Read/write"
88 };
89 const char* top_bottom[6] = {
90 "No WP", "8x8KiB sectors at top & bottom, no WP",
91 "Bottom boot", "Top boot",
92 "Uniform, Bottom WP", "Uniform, Top WP"
93 };
94
95 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
96 printk(" Address sensitive unlock: %s\n",
97 (extp->SiliconRevision & 1) ? "Not required" : "Required");
98
99 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
100 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
101 else
102 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
103
104 if (extp->BlkProt == 0)
105 printk(" Block protection: Not supported\n");
106 else
107 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
108
109
110 printk(" Temporary block unprotect: %s\n",
111 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
112 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
113 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
114 printk(" Burst mode: %s\n",
115 extp->BurstMode ? "Supported" : "Not supported");
116 if (extp->PageMode == 0)
117 printk(" Page mode: Not supported\n");
118 else
119 printk(" Page mode: %d word page\n", extp->PageMode << 2);
120
121 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
122 extp->VppMin >> 4, extp->VppMin & 0xf);
123 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
124 extp->VppMax >> 4, extp->VppMax & 0xf);
125
126 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
127 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
128 else
129 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
130 }
131 #endif
132
133 #ifdef AMD_BOOTLOC_BUG
134 /* Wheee. Bring me the head of someone at AMD. */
135 static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
136 {
137 struct map_info *map = mtd->priv;
138 struct cfi_private *cfi = map->fldrv_priv;
139 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
140 __u8 major = extp->MajorVersion;
141 __u8 minor = extp->MinorVersion;
142
143 if (((major << 8) | minor) < 0x3131) {
144 /* CFI version 1.0 => don't trust bootloc */
145 if (cfi->id & 0x80) {
146 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
147 extp->TopBottom = 3; /* top boot */
148 } else {
149 extp->TopBottom = 2; /* bottom boot */
150 }
151 }
152 }
153 #endif
154
155 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
156 {
157 struct map_info *map = mtd->priv;
158 struct cfi_private *cfi = map->fldrv_priv;
159 if (cfi->cfiq->BufWriteTimeoutTyp) {
160 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
161 mtd->write = cfi_amdstd_write_buffers;
162 }
163 }
164
165 static void fixup_use_secsi(struct mtd_info *mtd, void *param)
166 {
167 /* Setup for chips with a secsi area */
168 mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
169 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
170 }
171
172 static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
173 {
174 struct map_info *map = mtd->priv;
175 struct cfi_private *cfi = map->fldrv_priv;
176 if ((cfi->cfiq->NumEraseRegions == 1) &&
177 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
178 mtd->erase = cfi_amdstd_erase_chip;
179 }
180
181 }
182
183 static struct cfi_fixup cfi_fixup_table[] = {
184 #ifdef AMD_BOOTLOC_BUG
185 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
186 #endif
187 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
188 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
189 { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
190 { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
191 { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
192 { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
193 #if !FORCE_WORD_WRITE
194 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
195 #endif
196 { 0, 0, NULL, NULL }
197 };
198 static struct cfi_fixup jedec_fixup_table[] = {
199 { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
200 { MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
201 { 0, 0, NULL, NULL }
202 };
203
204 static struct cfi_fixup fixup_table[] = {
205 /* The CFI vendor ids and the JEDEC vendor IDs appear
206 * to be common. It is like the devices id's are as
207 * well. This table is to pick all cases where
208 * we know that is the case.
209 */
210 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
211 { 0, 0, NULL, NULL }
212 };
213
214
215 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
216 {
217 struct cfi_private *cfi = map->fldrv_priv;
218 struct mtd_info *mtd;
219 int i;
220
221 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
222 if (!mtd) {
223 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
224 return NULL;
225 }
226 memset(mtd, 0, sizeof(*mtd));
227 mtd->priv = map;
228 mtd->type = MTD_NORFLASH;
229
230 /* Fill in the default mtd operations */
231 mtd->erase = cfi_amdstd_erase_varsize;
232 mtd->write = cfi_amdstd_write_words;
233 mtd->read = cfi_amdstd_read;
234 mtd->sync = cfi_amdstd_sync;
235 mtd->suspend = cfi_amdstd_suspend;
236 mtd->resume = cfi_amdstd_resume;
237 mtd->flags = MTD_CAP_NORFLASH;
238 mtd->name = map->name;
239
240 if (cfi->cfi_mode==CFI_MODE_CFI){
241 unsigned char bootloc;
242 /*
243 * It's a real CFI chip, not one for which the probe
244 * routine faked a CFI structure. So we read the feature
245 * table from it.
246 */
247 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
248 struct cfi_pri_amdstd *extp;
249
250 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
251 if (!extp) {
252 kfree(mtd);
253 return NULL;
254 }
255
256 /* Install our own private info structure */
257 cfi->cmdset_priv = extp;
258
259 /* Apply cfi device specific fixups */
260 cfi_fixup(mtd, cfi_fixup_table);
261
262 #ifdef DEBUG_CFI_FEATURES
263 /* Tell the user about it in lots of lovely detail */
264 cfi_tell_features(extp);
265 #endif
266
267 bootloc = extp->TopBottom;
268 if ((bootloc != 2) && (bootloc != 3)) {
269 printk(KERN_WARNING "%s: CFI does not contain boot "
270 "bank location. Assuming top.\n", map->name);
271 bootloc = 2;
272 }
273
274 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
275 printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
276
277 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
278 int j = (cfi->cfiq->NumEraseRegions-1)-i;
279 __u32 swap;
280
281 swap = cfi->cfiq->EraseRegionInfo[i];
282 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
283 cfi->cfiq->EraseRegionInfo[j] = swap;
284 }
285 }
286 /* Set the default CFI lock/unlock addresses */
287 cfi->addr_unlock1 = 0x555;
288 cfi->addr_unlock2 = 0x2aa;
289 /* Modify the unlock address if we are in compatibility mode */
290 if ( /* x16 in x8 mode */
291 ((cfi->device_type == CFI_DEVICETYPE_X8) &&
292 (cfi->cfiq->InterfaceDesc == 2)) ||
293 /* x32 in x16 mode */
294 ((cfi->device_type == CFI_DEVICETYPE_X16) &&
295 (cfi->cfiq->InterfaceDesc == 4)))
296 {
297 cfi->addr_unlock1 = 0xaaa;
298 cfi->addr_unlock2 = 0x555;
299 }
300
301 } /* CFI mode */
302 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
303 /* Apply jedec specific fixups */
304 cfi_fixup(mtd, jedec_fixup_table);
305 }
306 /* Apply generic fixups */
307 cfi_fixup(mtd, fixup_table);
308
309 for (i=0; i< cfi->numchips; i++) {
310 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
311 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
312 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
313 }
314
315 map->fldrv = &cfi_amdstd_chipdrv;
316
317 return cfi_amdstd_setup(mtd);
318 }
319
320
321 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
322 {
323 struct map_info *map = mtd->priv;
324 struct cfi_private *cfi = map->fldrv_priv;
325 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
326 unsigned long offset = 0;
327 int i,j;
328
329 printk(KERN_NOTICE "number of %s chips: %d\n",
330 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
331 /* Select the correct geometry setup */
332 mtd->size = devsize * cfi->numchips;
333
334 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
335 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
336 * mtd->numeraseregions, GFP_KERNEL);
337 if (!mtd->eraseregions) {
338 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
339 goto setup_err;
340 }
341
342 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
343 unsigned long ernum, ersize;
344 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
345 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
346
347 if (mtd->erasesize < ersize) {
348 mtd->erasesize = ersize;
349 }
350 for (j=0; j<cfi->numchips; j++) {
351 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
352 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
353 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
354 }
355 offset += (ersize * ernum);
356 }
357 if (offset != devsize) {
358 /* Argh */
359 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
360 goto setup_err;
361 }
362 #if 0
363 // debug
364 for (i=0; i<mtd->numeraseregions;i++){
365 printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
366 i,mtd->eraseregions[i].offset,
367 mtd->eraseregions[i].erasesize,
368 mtd->eraseregions[i].numblocks);
369 }
370 #endif
371
372 /* FIXME: erase-suspend-program is broken. See
373 http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
374 printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
375
376 __module_get(THIS_MODULE);
377 return mtd;
378
379 setup_err:
380 if(mtd) {
381 kfree(mtd->eraseregions);
382 kfree(mtd);
383 }
384 kfree(cfi->cmdset_priv);
385 kfree(cfi->cfiq);
386 return NULL;
387 }
388
389 /*
390 * Return true if the chip is ready.
391 *
392 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
393 * non-suspended sector) and is indicated by no toggle bits toggling.
394 *
395 * Note that anything more complicated than checking if no bits are toggling
396 * (including checking DQ5 for an error status) is tricky to get working
397 * correctly and is therefore not done (particulary with interleaved chips
398 * as each chip must be checked independantly of the others).
399 */
400 static int __xipram chip_ready(struct map_info *map, unsigned long addr)
401 {
402 map_word d, t;
403
404 d = map_read(map, addr);
405 t = map_read(map, addr);
406
407 return map_word_equal(map, d, t);
408 }
409
410 /*
411 * Return true if the chip is ready and has the correct value.
412 *
413 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
414 * non-suspended sector) and it is indicated by no bits toggling.
415 *
416 * Error are indicated by toggling bits or bits held with the wrong value,
417 * or with bits toggling.
418 *
419 * Note that anything more complicated than checking if no bits are toggling
420 * (including checking DQ5 for an error status) is tricky to get working
421 * correctly and is therefore not done (particulary with interleaved chips
422 * as each chip must be checked independantly of the others).
423 *
424 */
425 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
426 {
427 map_word oldd, curd;
428
429 oldd = map_read(map, addr);
430 curd = map_read(map, addr);
431
432 return map_word_equal(map, oldd, curd) &&
433 map_word_equal(map, curd, expected);
434 }
435
436 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
437 {
438 DECLARE_WAITQUEUE(wait, current);
439 struct cfi_private *cfi = map->fldrv_priv;
440 unsigned long timeo;
441 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
442
443 resettime:
444 timeo = jiffies + HZ;
445 retry:
446 switch (chip->state) {
447
448 case FL_STATUS:
449 for (;;) {
450 if (chip_ready(map, adr))
451 break;
452
453 if (time_after(jiffies, timeo)) {
454 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
455 spin_unlock(chip->mutex);
456 return -EIO;
457 }
458 spin_unlock(chip->mutex);
459 cfi_udelay(1);
460 spin_lock(chip->mutex);
461 /* Someone else might have been playing with it. */
462 goto retry;
463 }
464
465 case FL_READY:
466 case FL_CFI_QUERY:
467 case FL_JEDEC_QUERY:
468 return 0;
469
470 case FL_ERASING:
471 if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
472 goto sleep;
473
474 if (!(mode == FL_READY || mode == FL_POINT
475 || !cfip
476 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
477 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1))))
478 goto sleep;
479
480 /* We could check to see if we're trying to access the sector
481 * that is currently being erased. However, no user will try
482 * anything like that so we just wait for the timeout. */
483
484 /* Erase suspend */
485 /* It's harmless to issue the Erase-Suspend and Erase-Resume
486 * commands when the erase algorithm isn't in progress. */
487 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
488 chip->oldstate = FL_ERASING;
489 chip->state = FL_ERASE_SUSPENDING;
490 chip->erase_suspended = 1;
491 for (;;) {
492 if (chip_ready(map, adr))
493 break;
494
495 if (time_after(jiffies, timeo)) {
496 /* Should have suspended the erase by now.
497 * Send an Erase-Resume command as either
498 * there was an error (so leave the erase
499 * routine to recover from it) or we trying to
500 * use the erase-in-progress sector. */
501 map_write(map, CMD(0x30), chip->in_progress_block_addr);
502 chip->state = FL_ERASING;
503 chip->oldstate = FL_READY;
504 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
505 return -EIO;
506 }
507
508 spin_unlock(chip->mutex);
509 cfi_udelay(1);
510 spin_lock(chip->mutex);
511 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
512 So we can just loop here. */
513 }
514 chip->state = FL_READY;
515 return 0;
516
517 case FL_XIP_WHILE_ERASING:
518 if (mode != FL_READY && mode != FL_POINT &&
519 (!cfip || !(cfip->EraseSuspend&2)))
520 goto sleep;
521 chip->oldstate = chip->state;
522 chip->state = FL_READY;
523 return 0;
524
525 case FL_POINT:
526 /* Only if there's no operation suspended... */
527 if (mode == FL_READY && chip->oldstate == FL_READY)
528 return 0;
529
530 default:
531 sleep:
532 set_current_state(TASK_UNINTERRUPTIBLE);
533 add_wait_queue(&chip->wq, &wait);
534 spin_unlock(chip->mutex);
535 schedule();
536 remove_wait_queue(&chip->wq, &wait);
537 spin_lock(chip->mutex);
538 goto resettime;
539 }
540 }
541
542
543 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
544 {
545 struct cfi_private *cfi = map->fldrv_priv;
546
547 switch(chip->oldstate) {
548 case FL_ERASING:
549 chip->state = chip->oldstate;
550 map_write(map, CMD(0x30), chip->in_progress_block_addr);
551 chip->oldstate = FL_READY;
552 chip->state = FL_ERASING;
553 break;
554
555 case FL_XIP_WHILE_ERASING:
556 chip->state = chip->oldstate;
557 chip->oldstate = FL_READY;
558 break;
559
560 case FL_READY:
561 case FL_STATUS:
562 /* We should really make set_vpp() count, rather than doing this */
563 DISABLE_VPP(map);
564 break;
565 default:
566 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
567 }
568 wake_up(&chip->wq);
569 }
570
571 #ifdef CONFIG_MTD_XIP
572
573 /*
574 * No interrupt what so ever can be serviced while the flash isn't in array
575 * mode. This is ensured by the xip_disable() and xip_enable() functions
576 * enclosing any code path where the flash is known not to be in array mode.
577 * And within a XIP disabled code path, only functions marked with __xipram
578 * may be called and nothing else (it's a good thing to inspect generated
579 * assembly to make sure inline functions were actually inlined and that gcc
580 * didn't emit calls to its own support functions). Also configuring MTD CFI
581 * support to a single buswidth and a single interleave is also recommended.
582 */
583
584 static void xip_disable(struct map_info *map, struct flchip *chip,
585 unsigned long adr)
586 {
587 /* TODO: chips with no XIP use should ignore and return */
588 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
589 local_irq_disable();
590 }
591
592 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
593 unsigned long adr)
594 {
595 struct cfi_private *cfi = map->fldrv_priv;
596
597 if (chip->state != FL_POINT && chip->state != FL_READY) {
598 map_write(map, CMD(0xf0), adr);
599 chip->state = FL_READY;
600 }
601 (void) map_read(map, adr);
602 xip_iprefetch();
603 local_irq_enable();
604 }
605
606 /*
607 * When a delay is required for the flash operation to complete, the
608 * xip_udelay() function is polling for both the given timeout and pending
609 * (but still masked) hardware interrupts. Whenever there is an interrupt
610 * pending then the flash erase operation is suspended, array mode restored
611 * and interrupts unmasked. Task scheduling might also happen at that
612 * point. The CPU eventually returns from the interrupt or the call to
613 * schedule() and the suspended flash operation is resumed for the remaining
614 * of the delay period.
615 *
616 * Warning: this function _will_ fool interrupt latency tracing tools.
617 */
618
619 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
620 unsigned long adr, int usec)
621 {
622 struct cfi_private *cfi = map->fldrv_priv;
623 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
624 map_word status, OK = CMD(0x80);
625 unsigned long suspended, start = xip_currtime();
626 flstate_t oldstate;
627
628 do {
629 cpu_relax();
630 if (xip_irqpending() && extp &&
631 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
632 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
633 /*
634 * Let's suspend the erase operation when supported.
635 * Note that we currently don't try to suspend
636 * interleaved chips if there is already another
637 * operation suspended (imagine what happens
638 * when one chip was already done with the current
639 * operation while another chip suspended it, then
640 * we resume the whole thing at once). Yes, it
641 * can happen!
642 */
643 map_write(map, CMD(0xb0), adr);
644 usec -= xip_elapsed_since(start);
645 suspended = xip_currtime();
646 do {
647 if (xip_elapsed_since(suspended) > 100000) {
648 /*
649 * The chip doesn't want to suspend
650 * after waiting for 100 msecs.
651 * This is a critical error but there
652 * is not much we can do here.
653 */
654 return;
655 }
656 status = map_read(map, adr);
657 } while (!map_word_andequal(map, status, OK, OK));
658
659 /* Suspend succeeded */
660 oldstate = chip->state;
661 if (!map_word_bitsset(map, status, CMD(0x40)))
662 break;
663 chip->state = FL_XIP_WHILE_ERASING;
664 chip->erase_suspended = 1;
665 map_write(map, CMD(0xf0), adr);
666 (void) map_read(map, adr);
667 asm volatile (".rep 8; nop; .endr");
668 local_irq_enable();
669 spin_unlock(chip->mutex);
670 asm volatile (".rep 8; nop; .endr");
671 cond_resched();
672
673 /*
674 * We're back. However someone else might have
675 * decided to go write to the chip if we are in
676 * a suspended erase state. If so let's wait
677 * until it's done.
678 */
679 spin_lock(chip->mutex);
680 while (chip->state != FL_XIP_WHILE_ERASING) {
681 DECLARE_WAITQUEUE(wait, current);
682 set_current_state(TASK_UNINTERRUPTIBLE);
683 add_wait_queue(&chip->wq, &wait);
684 spin_unlock(chip->mutex);
685 schedule();
686 remove_wait_queue(&chip->wq, &wait);
687 spin_lock(chip->mutex);
688 }
689 /* Disallow XIP again */
690 local_irq_disable();
691
692 /* Resume the write or erase operation */
693 map_write(map, CMD(0x30), adr);
694 chip->state = oldstate;
695 start = xip_currtime();
696 } else if (usec >= 1000000/HZ) {
697 /*
698 * Try to save on CPU power when waiting delay
699 * is at least a system timer tick period.
700 * No need to be extremely accurate here.
701 */
702 xip_cpu_idle();
703 }
704 status = map_read(map, adr);
705 } while (!map_word_andequal(map, status, OK, OK)
706 && xip_elapsed_since(start) < usec);
707 }
708
709 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
710
711 /*
712 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
713 * the flash is actively programming or erasing since we have to poll for
714 * the operation to complete anyway. We can't do that in a generic way with
715 * a XIP setup so do it before the actual flash operation in this case
716 * and stub it out from INVALIDATE_CACHE_UDELAY.
717 */
718 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
719 INVALIDATE_CACHED_RANGE(map, from, size)
720
721 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
722 UDELAY(map, chip, adr, usec)
723
724 /*
725 * Extra notes:
726 *
727 * Activating this XIP support changes the way the code works a bit. For
728 * example the code to suspend the current process when concurrent access
729 * happens is never executed because xip_udelay() will always return with the
730 * same chip state as it was entered with. This is why there is no care for
731 * the presence of add_wait_queue() or schedule() calls from within a couple
732 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
733 * The queueing and scheduling are always happening within xip_udelay().
734 *
735 * Similarly, get_chip() and put_chip() just happen to always be executed
736 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
737 * is in array mode, therefore never executing many cases therein and not
738 * causing any problem with XIP.
739 */
740
741 #else
742
743 #define xip_disable(map, chip, adr)
744 #define xip_enable(map, chip, adr)
745 #define XIP_INVAL_CACHED_RANGE(x...)
746
747 #define UDELAY(map, chip, adr, usec) \
748 do { \
749 spin_unlock(chip->mutex); \
750 cfi_udelay(usec); \
751 spin_lock(chip->mutex); \
752 } while (0)
753
754 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
755 do { \
756 spin_unlock(chip->mutex); \
757 INVALIDATE_CACHED_RANGE(map, adr, len); \
758 cfi_udelay(usec); \
759 spin_lock(chip->mutex); \
760 } while (0)
761
762 #endif
763
764 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
765 {
766 unsigned long cmd_addr;
767 struct cfi_private *cfi = map->fldrv_priv;
768 int ret;
769
770 adr += chip->start;
771
772 /* Ensure cmd read/writes are aligned. */
773 cmd_addr = adr & ~(map_bankwidth(map)-1);
774
775 spin_lock(chip->mutex);
776 ret = get_chip(map, chip, cmd_addr, FL_READY);
777 if (ret) {
778 spin_unlock(chip->mutex);
779 return ret;
780 }
781
782 if (chip->state != FL_POINT && chip->state != FL_READY) {
783 map_write(map, CMD(0xf0), cmd_addr);
784 chip->state = FL_READY;
785 }
786
787 map_copy_from(map, buf, adr, len);
788
789 put_chip(map, chip, cmd_addr);
790
791 spin_unlock(chip->mutex);
792 return 0;
793 }
794
795
796 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
797 {
798 struct map_info *map = mtd->priv;
799 struct cfi_private *cfi = map->fldrv_priv;
800 unsigned long ofs;
801 int chipnum;
802 int ret = 0;
803
804 /* ofs: offset within the first chip that the first read should start */
805
806 chipnum = (from >> cfi->chipshift);
807 ofs = from - (chipnum << cfi->chipshift);
808
809
810 *retlen = 0;
811
812 while (len) {
813 unsigned long thislen;
814
815 if (chipnum >= cfi->numchips)
816 break;
817
818 if ((len + ofs -1) >> cfi->chipshift)
819 thislen = (1<<cfi->chipshift) - ofs;
820 else
821 thislen = len;
822
823 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
824 if (ret)
825 break;
826
827 *retlen += thislen;
828 len -= thislen;
829 buf += thislen;
830
831 ofs = 0;
832 chipnum++;
833 }
834 return ret;
835 }
836
837
838 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
839 {
840 DECLARE_WAITQUEUE(wait, current);
841 unsigned long timeo = jiffies + HZ;
842 struct cfi_private *cfi = map->fldrv_priv;
843
844 retry:
845 spin_lock(chip->mutex);
846
847 if (chip->state != FL_READY){
848 #if 0
849 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
850 #endif
851 set_current_state(TASK_UNINTERRUPTIBLE);
852 add_wait_queue(&chip->wq, &wait);
853
854 spin_unlock(chip->mutex);
855
856 schedule();
857 remove_wait_queue(&chip->wq, &wait);
858 #if 0
859 if(signal_pending(current))
860 return -EINTR;
861 #endif
862 timeo = jiffies + HZ;
863
864 goto retry;
865 }
866
867 adr += chip->start;
868
869 chip->state = FL_READY;
870
871 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
872 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
873 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
874
875 map_copy_from(map, buf, adr, len);
876
877 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
878 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
879 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
880 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
881
882 wake_up(&chip->wq);
883 spin_unlock(chip->mutex);
884
885 return 0;
886 }
887
888 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
889 {
890 struct map_info *map = mtd->priv;
891 struct cfi_private *cfi = map->fldrv_priv;
892 unsigned long ofs;
893 int chipnum;
894 int ret = 0;
895
896
897 /* ofs: offset within the first chip that the first read should start */
898
899 /* 8 secsi bytes per chip */
900 chipnum=from>>3;
901 ofs=from & 7;
902
903
904 *retlen = 0;
905
906 while (len) {
907 unsigned long thislen;
908
909 if (chipnum >= cfi->numchips)
910 break;
911
912 if ((len + ofs -1) >> 3)
913 thislen = (1<<3) - ofs;
914 else
915 thislen = len;
916
917 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
918 if (ret)
919 break;
920
921 *retlen += thislen;
922 len -= thislen;
923 buf += thislen;
924
925 ofs = 0;
926 chipnum++;
927 }
928 return ret;
929 }
930
931
932 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
933 {
934 struct cfi_private *cfi = map->fldrv_priv;
935 unsigned long timeo = jiffies + HZ;
936 /*
937 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
938 * have a max write time of a few hundreds usec). However, we should
939 * use the maximum timeout value given by the chip at probe time
940 * instead. Unfortunately, struct flchip does have a field for
941 * maximum timeout, only for typical which can be far too short
942 * depending of the conditions. The ' + 1' is to avoid having a
943 * timeout of 0 jiffies if HZ is smaller than 1000.
944 */
945 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
946 int ret = 0;
947 map_word oldd;
948 int retry_cnt = 0;
949
950 adr += chip->start;
951
952 spin_lock(chip->mutex);
953 ret = get_chip(map, chip, adr, FL_WRITING);
954 if (ret) {
955 spin_unlock(chip->mutex);
956 return ret;
957 }
958
959 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
960 __func__, adr, datum.x[0] );
961
962 /*
963 * Check for a NOP for the case when the datum to write is already
964 * present - it saves time and works around buggy chips that corrupt
965 * data at other locations when 0xff is written to a location that
966 * already contains 0xff.
967 */
968 oldd = map_read(map, adr);
969 if (map_word_equal(map, oldd, datum)) {
970 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
971 __func__);
972 goto op_done;
973 }
974
975 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
976 ENABLE_VPP(map);
977 xip_disable(map, chip, adr);
978 retry:
979 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
980 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
981 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
982 map_write(map, datum, adr);
983 chip->state = FL_WRITING;
984
985 INVALIDATE_CACHE_UDELAY(map, chip,
986 adr, map_bankwidth(map),
987 chip->word_write_time);
988
989 /* See comment above for timeout value. */
990 timeo = jiffies + uWriteTimeout;
991 for (;;) {
992 if (chip->state != FL_WRITING) {
993 /* Someone's suspended the write. Sleep */
994 DECLARE_WAITQUEUE(wait, current);
995
996 set_current_state(TASK_UNINTERRUPTIBLE);
997 add_wait_queue(&chip->wq, &wait);
998 spin_unlock(chip->mutex);
999 schedule();
1000 remove_wait_queue(&chip->wq, &wait);
1001 timeo = jiffies + (HZ / 2); /* FIXME */
1002 spin_lock(chip->mutex);
1003 continue;
1004 }
1005
1006 if (chip_ready(map, adr))
1007 break;
1008
1009 if (time_after(jiffies, timeo)) {
1010 xip_enable(map, chip, adr);
1011 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1012 xip_disable(map, chip, adr);
1013 break;
1014 }
1015
1016 /* Latency issues. Drop the lock, wait a while and retry */
1017 UDELAY(map, chip, adr, 1);
1018 }
1019 /* Did we succeed? */
1020 if (!chip_good(map, adr, datum)) {
1021 /* reset on all failures. */
1022 map_write( map, CMD(0xF0), chip->start );
1023 /* FIXME - should have reset delay before continuing */
1024
1025 if (++retry_cnt <= MAX_WORD_RETRIES)
1026 goto retry;
1027
1028 ret = -EIO;
1029 }
1030 xip_enable(map, chip, adr);
1031 op_done:
1032 chip->state = FL_READY;
1033 put_chip(map, chip, adr);
1034 spin_unlock(chip->mutex);
1035
1036 return ret;
1037 }
1038
1039
1040 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1041 size_t *retlen, const u_char *buf)
1042 {
1043 struct map_info *map = mtd->priv;
1044 struct cfi_private *cfi = map->fldrv_priv;
1045 int ret = 0;
1046 int chipnum;
1047 unsigned long ofs, chipstart;
1048 DECLARE_WAITQUEUE(wait, current);
1049
1050 *retlen = 0;
1051 if (!len)
1052 return 0;
1053
1054 chipnum = to >> cfi->chipshift;
1055 ofs = to - (chipnum << cfi->chipshift);
1056 chipstart = cfi->chips[chipnum].start;
1057
1058 /* If it's not bus-aligned, do the first byte write */
1059 if (ofs & (map_bankwidth(map)-1)) {
1060 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1061 int i = ofs - bus_ofs;
1062 int n = 0;
1063 map_word tmp_buf;
1064
1065 retry:
1066 spin_lock(cfi->chips[chipnum].mutex);
1067
1068 if (cfi->chips[chipnum].state != FL_READY) {
1069 #if 0
1070 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1071 #endif
1072 set_current_state(TASK_UNINTERRUPTIBLE);
1073 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1074
1075 spin_unlock(cfi->chips[chipnum].mutex);
1076
1077 schedule();
1078 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1079 #if 0
1080 if(signal_pending(current))
1081 return -EINTR;
1082 #endif
1083 goto retry;
1084 }
1085
1086 /* Load 'tmp_buf' with old contents of flash */
1087 tmp_buf = map_read(map, bus_ofs+chipstart);
1088
1089 spin_unlock(cfi->chips[chipnum].mutex);
1090
1091 /* Number of bytes to copy from buffer */
1092 n = min_t(int, len, map_bankwidth(map)-i);
1093
1094 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1095
1096 ret = do_write_oneword(map, &cfi->chips[chipnum],
1097 bus_ofs, tmp_buf);
1098 if (ret)
1099 return ret;
1100
1101 ofs += n;
1102 buf += n;
1103 (*retlen) += n;
1104 len -= n;
1105
1106 if (ofs >> cfi->chipshift) {
1107 chipnum ++;
1108 ofs = 0;
1109 if (chipnum == cfi->numchips)
1110 return 0;
1111 }
1112 }
1113
1114 /* We are now aligned, write as much as possible */
1115 while(len >= map_bankwidth(map)) {
1116 map_word datum;
1117
1118 datum = map_word_load(map, buf);
1119
1120 ret = do_write_oneword(map, &cfi->chips[chipnum],
1121 ofs, datum);
1122 if (ret)
1123 return ret;
1124
1125 ofs += map_bankwidth(map);
1126 buf += map_bankwidth(map);
1127 (*retlen) += map_bankwidth(map);
1128 len -= map_bankwidth(map);
1129
1130 if (ofs >> cfi->chipshift) {
1131 chipnum ++;
1132 ofs = 0;
1133 if (chipnum == cfi->numchips)
1134 return 0;
1135 chipstart = cfi->chips[chipnum].start;
1136 }
1137 }
1138
1139 /* Write the trailing bytes if any */
1140 if (len & (map_bankwidth(map)-1)) {
1141 map_word tmp_buf;
1142
1143 retry1:
1144 spin_lock(cfi->chips[chipnum].mutex);
1145
1146 if (cfi->chips[chipnum].state != FL_READY) {
1147 #if 0
1148 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1149 #endif
1150 set_current_state(TASK_UNINTERRUPTIBLE);
1151 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1152
1153 spin_unlock(cfi->chips[chipnum].mutex);
1154
1155 schedule();
1156 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1157 #if 0
1158 if(signal_pending(current))
1159 return -EINTR;
1160 #endif
1161 goto retry1;
1162 }
1163
1164 tmp_buf = map_read(map, ofs + chipstart);
1165
1166 spin_unlock(cfi->chips[chipnum].mutex);
1167
1168 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1169
1170 ret = do_write_oneword(map, &cfi->chips[chipnum],
1171 ofs, tmp_buf);
1172 if (ret)
1173 return ret;
1174
1175 (*retlen) += len;
1176 }
1177
1178 return 0;
1179 }
1180
1181
1182 /*
1183 * FIXME: interleaved mode not tested, and probably not supported!
1184 */
1185 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1186 unsigned long adr, const u_char *buf,
1187 int len)
1188 {
1189 struct cfi_private *cfi = map->fldrv_priv;
1190 unsigned long timeo = jiffies + HZ;
1191 /* see comments in do_write_oneword() regarding uWriteTimeo. */
1192 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1193 int ret = -EIO;
1194 unsigned long cmd_adr;
1195 int z, words;
1196 map_word datum;
1197
1198 adr += chip->start;
1199 cmd_adr = adr;
1200
1201 spin_lock(chip->mutex);
1202 ret = get_chip(map, chip, adr, FL_WRITING);
1203 if (ret) {
1204 spin_unlock(chip->mutex);
1205 return ret;
1206 }
1207
1208 datum = map_word_load(map, buf);
1209
1210 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1211 __func__, adr, datum.x[0] );
1212
1213 XIP_INVAL_CACHED_RANGE(map, adr, len);
1214 ENABLE_VPP(map);
1215 xip_disable(map, chip, cmd_adr);
1216
1217 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1218 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1219 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1220
1221 /* Write Buffer Load */
1222 map_write(map, CMD(0x25), cmd_adr);
1223
1224 chip->state = FL_WRITING_TO_BUFFER;
1225
1226 /* Write length of data to come */
1227 words = len / map_bankwidth(map);
1228 map_write(map, CMD(words - 1), cmd_adr);
1229 /* Write data */
1230 z = 0;
1231 while(z < words * map_bankwidth(map)) {
1232 datum = map_word_load(map, buf);
1233 map_write(map, datum, adr + z);
1234
1235 z += map_bankwidth(map);
1236 buf += map_bankwidth(map);
1237 }
1238 z -= map_bankwidth(map);
1239
1240 adr += z;
1241
1242 /* Write Buffer Program Confirm: GO GO GO */
1243 map_write(map, CMD(0x29), cmd_adr);
1244 chip->state = FL_WRITING;
1245
1246 INVALIDATE_CACHE_UDELAY(map, chip,
1247 adr, map_bankwidth(map),
1248 chip->word_write_time);
1249
1250 timeo = jiffies + uWriteTimeout;
1251
1252 for (;;) {
1253 if (chip->state != FL_WRITING) {
1254 /* Someone's suspended the write. Sleep */
1255 DECLARE_WAITQUEUE(wait, current);
1256
1257 set_current_state(TASK_UNINTERRUPTIBLE);
1258 add_wait_queue(&chip->wq, &wait);
1259 spin_unlock(chip->mutex);
1260 schedule();
1261 remove_wait_queue(&chip->wq, &wait);
1262 timeo = jiffies + (HZ / 2); /* FIXME */
1263 spin_lock(chip->mutex);
1264 continue;
1265 }
1266
1267 if (chip_ready(map, adr)) {
1268 xip_enable(map, chip, adr);
1269 goto op_done;
1270 }
1271
1272 if( time_after(jiffies, timeo))
1273 break;
1274
1275 /* Latency issues. Drop the lock, wait a while and retry */
1276 UDELAY(map, chip, adr, 1);
1277 }
1278
1279 /* reset on all failures. */
1280 map_write( map, CMD(0xF0), chip->start );
1281 xip_enable(map, chip, adr);
1282 /* FIXME - should have reset delay before continuing */
1283
1284 printk(KERN_WARNING "MTD %s(): software timeout\n",
1285 __func__ );
1286
1287 ret = -EIO;
1288 op_done:
1289 chip->state = FL_READY;
1290 put_chip(map, chip, adr);
1291 spin_unlock(chip->mutex);
1292
1293 return ret;
1294 }
1295
1296
1297 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1298 size_t *retlen, const u_char *buf)
1299 {
1300 struct map_info *map = mtd->priv;
1301 struct cfi_private *cfi = map->fldrv_priv;
1302 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1303 int ret = 0;
1304 int chipnum;
1305 unsigned long ofs;
1306
1307 *retlen = 0;
1308 if (!len)
1309 return 0;
1310
1311 chipnum = to >> cfi->chipshift;
1312 ofs = to - (chipnum << cfi->chipshift);
1313
1314 /* If it's not bus-aligned, do the first word write */
1315 if (ofs & (map_bankwidth(map)-1)) {
1316 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1317 if (local_len > len)
1318 local_len = len;
1319 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1320 local_len, retlen, buf);
1321 if (ret)
1322 return ret;
1323 ofs += local_len;
1324 buf += local_len;
1325 len -= local_len;
1326
1327 if (ofs >> cfi->chipshift) {
1328 chipnum ++;
1329 ofs = 0;
1330 if (chipnum == cfi->numchips)
1331 return 0;
1332 }
1333 }
1334
1335 /* Write buffer is worth it only if more than one word to write... */
1336 while (len >= map_bankwidth(map) * 2) {
1337 /* We must not cross write block boundaries */
1338 int size = wbufsize - (ofs & (wbufsize-1));
1339
1340 if (size > len)
1341 size = len;
1342 if (size % map_bankwidth(map))
1343 size -= size % map_bankwidth(map);
1344
1345 ret = do_write_buffer(map, &cfi->chips[chipnum],
1346 ofs, buf, size);
1347 if (ret)
1348 return ret;
1349
1350 ofs += size;
1351 buf += size;
1352 (*retlen) += size;
1353 len -= size;
1354
1355 if (ofs >> cfi->chipshift) {
1356 chipnum ++;
1357 ofs = 0;
1358 if (chipnum == cfi->numchips)
1359 return 0;
1360 }
1361 }
1362
1363 if (len) {
1364 size_t retlen_dregs = 0;
1365
1366 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1367 len, &retlen_dregs, buf);
1368
1369 *retlen += retlen_dregs;
1370 return ret;
1371 }
1372
1373 return 0;
1374 }
1375
1376
1377 /*
1378 * Handle devices with one erase region, that only implement
1379 * the chip erase command.
1380 */
1381 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1382 {
1383 struct cfi_private *cfi = map->fldrv_priv;
1384 unsigned long timeo = jiffies + HZ;
1385 unsigned long int adr;
1386 DECLARE_WAITQUEUE(wait, current);
1387 int ret = 0;
1388
1389 adr = cfi->addr_unlock1;
1390
1391 spin_lock(chip->mutex);
1392 ret = get_chip(map, chip, adr, FL_WRITING);
1393 if (ret) {
1394 spin_unlock(chip->mutex);
1395 return ret;
1396 }
1397
1398 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1399 __func__, chip->start );
1400
1401 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1402 ENABLE_VPP(map);
1403 xip_disable(map, chip, adr);
1404
1405 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1406 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1407 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1408 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1409 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1410 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1411
1412 chip->state = FL_ERASING;
1413 chip->erase_suspended = 0;
1414 chip->in_progress_block_addr = adr;
1415
1416 INVALIDATE_CACHE_UDELAY(map, chip,
1417 adr, map->size,
1418 chip->erase_time*500);
1419
1420 timeo = jiffies + (HZ*20);
1421
1422 for (;;) {
1423 if (chip->state != FL_ERASING) {
1424 /* Someone's suspended the erase. Sleep */
1425 set_current_state(TASK_UNINTERRUPTIBLE);
1426 add_wait_queue(&chip->wq, &wait);
1427 spin_unlock(chip->mutex);
1428 schedule();
1429 remove_wait_queue(&chip->wq, &wait);
1430 spin_lock(chip->mutex);
1431 continue;
1432 }
1433 if (chip->erase_suspended) {
1434 /* This erase was suspended and resumed.
1435 Adjust the timeout */
1436 timeo = jiffies + (HZ*20); /* FIXME */
1437 chip->erase_suspended = 0;
1438 }
1439
1440 if (chip_ready(map, adr))
1441 break;
1442
1443 if (time_after(jiffies, timeo)) {
1444 printk(KERN_WARNING "MTD %s(): software timeout\n",
1445 __func__ );
1446 break;
1447 }
1448
1449 /* Latency issues. Drop the lock, wait a while and retry */
1450 UDELAY(map, chip, adr, 1000000/HZ);
1451 }
1452 /* Did we succeed? */
1453 if (!chip_good(map, adr, map_word_ff(map))) {
1454 /* reset on all failures. */
1455 map_write( map, CMD(0xF0), chip->start );
1456 /* FIXME - should have reset delay before continuing */
1457
1458 ret = -EIO;
1459 }
1460
1461 chip->state = FL_READY;
1462 xip_enable(map, chip, adr);
1463 put_chip(map, chip, adr);
1464 spin_unlock(chip->mutex);
1465
1466 return ret;
1467 }
1468
1469
1470 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1471 {
1472 struct cfi_private *cfi = map->fldrv_priv;
1473 unsigned long timeo = jiffies + HZ;
1474 DECLARE_WAITQUEUE(wait, current);
1475 int ret = 0;
1476
1477 adr += chip->start;
1478
1479 spin_lock(chip->mutex);
1480 ret = get_chip(map, chip, adr, FL_ERASING);
1481 if (ret) {
1482 spin_unlock(chip->mutex);
1483 return ret;
1484 }
1485
1486 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1487 __func__, adr );
1488
1489 XIP_INVAL_CACHED_RANGE(map, adr, len);
1490 ENABLE_VPP(map);
1491 xip_disable(map, chip, adr);
1492
1493 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1494 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1495 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1496 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1497 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1498 map_write(map, CMD(0x30), adr);
1499
1500 chip->state = FL_ERASING;
1501 chip->erase_suspended = 0;
1502 chip->in_progress_block_addr = adr;
1503
1504 INVALIDATE_CACHE_UDELAY(map, chip,
1505 adr, len,
1506 chip->erase_time*500);
1507
1508 timeo = jiffies + (HZ*20);
1509
1510 for (;;) {
1511 if (chip->state != FL_ERASING) {
1512 /* Someone's suspended the erase. Sleep */
1513 set_current_state(TASK_UNINTERRUPTIBLE);
1514 add_wait_queue(&chip->wq, &wait);
1515 spin_unlock(chip->mutex);
1516 schedule();
1517 remove_wait_queue(&chip->wq, &wait);
1518 spin_lock(chip->mutex);
1519 continue;
1520 }
1521 if (chip->erase_suspended) {
1522 /* This erase was suspended and resumed.
1523 Adjust the timeout */
1524 timeo = jiffies + (HZ*20); /* FIXME */
1525 chip->erase_suspended = 0;
1526 }
1527
1528 if (chip_ready(map, adr)) {
1529 xip_enable(map, chip, adr);
1530 break;
1531 }
1532
1533 if (time_after(jiffies, timeo)) {
1534 xip_enable(map, chip, adr);
1535 printk(KERN_WARNING "MTD %s(): software timeout\n",
1536 __func__ );
1537 break;
1538 }
1539
1540 /* Latency issues. Drop the lock, wait a while and retry */
1541 UDELAY(map, chip, adr, 1000000/HZ);
1542 }
1543 /* Did we succeed? */
1544 if (!chip_good(map, adr, map_word_ff(map))) {
1545 /* reset on all failures. */
1546 map_write( map, CMD(0xF0), chip->start );
1547 /* FIXME - should have reset delay before continuing */
1548
1549 ret = -EIO;
1550 }
1551
1552 chip->state = FL_READY;
1553 put_chip(map, chip, adr);
1554 spin_unlock(chip->mutex);
1555 return ret;
1556 }
1557
1558
1559 int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1560 {
1561 unsigned long ofs, len;
1562 int ret;
1563
1564 ofs = instr->addr;
1565 len = instr->len;
1566
1567 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1568 if (ret)
1569 return ret;
1570
1571 instr->state = MTD_ERASE_DONE;
1572 mtd_erase_callback(instr);
1573
1574 return 0;
1575 }
1576
1577
1578 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1579 {
1580 struct map_info *map = mtd->priv;
1581 struct cfi_private *cfi = map->fldrv_priv;
1582 int ret = 0;
1583
1584 if (instr->addr != 0)
1585 return -EINVAL;
1586
1587 if (instr->len != mtd->size)
1588 return -EINVAL;
1589
1590 ret = do_erase_chip(map, &cfi->chips[0]);
1591 if (ret)
1592 return ret;
1593
1594 instr->state = MTD_ERASE_DONE;
1595 mtd_erase_callback(instr);
1596
1597 return 0;
1598 }
1599
1600
1601 static void cfi_amdstd_sync (struct mtd_info *mtd)
1602 {
1603 struct map_info *map = mtd->priv;
1604 struct cfi_private *cfi = map->fldrv_priv;
1605 int i;
1606 struct flchip *chip;
1607 int ret = 0;
1608 DECLARE_WAITQUEUE(wait, current);
1609
1610 for (i=0; !ret && i<cfi->numchips; i++) {
1611 chip = &cfi->chips[i];
1612
1613 retry:
1614 spin_lock(chip->mutex);
1615
1616 switch(chip->state) {
1617 case FL_READY:
1618 case FL_STATUS:
1619 case FL_CFI_QUERY:
1620 case FL_JEDEC_QUERY:
1621 chip->oldstate = chip->state;
1622 chip->state = FL_SYNCING;
1623 /* No need to wake_up() on this state change -
1624 * as the whole point is that nobody can do anything
1625 * with the chip now anyway.
1626 */
1627 case FL_SYNCING:
1628 spin_unlock(chip->mutex);
1629 break;
1630
1631 default:
1632 /* Not an idle state */
1633 add_wait_queue(&chip->wq, &wait);
1634
1635 spin_unlock(chip->mutex);
1636
1637 schedule();
1638
1639 remove_wait_queue(&chip->wq, &wait);
1640
1641 goto retry;
1642 }
1643 }
1644
1645 /* Unlock the chips again */
1646
1647 for (i--; i >=0; i--) {
1648 chip = &cfi->chips[i];
1649
1650 spin_lock(chip->mutex);
1651
1652 if (chip->state == FL_SYNCING) {
1653 chip->state = chip->oldstate;
1654 wake_up(&chip->wq);
1655 }
1656 spin_unlock(chip->mutex);
1657 }
1658 }
1659
1660
1661 static int cfi_amdstd_suspend(struct mtd_info *mtd)
1662 {
1663 struct map_info *map = mtd->priv;
1664 struct cfi_private *cfi = map->fldrv_priv;
1665 int i;
1666 struct flchip *chip;
1667 int ret = 0;
1668
1669 for (i=0; !ret && i<cfi->numchips; i++) {
1670 chip = &cfi->chips[i];
1671
1672 spin_lock(chip->mutex);
1673
1674 switch(chip->state) {
1675 case FL_READY:
1676 case FL_STATUS:
1677 case FL_CFI_QUERY:
1678 case FL_JEDEC_QUERY:
1679 chip->oldstate = chip->state;
1680 chip->state = FL_PM_SUSPENDED;
1681 /* No need to wake_up() on this state change -
1682 * as the whole point is that nobody can do anything
1683 * with the chip now anyway.
1684 */
1685 case FL_PM_SUSPENDED:
1686 break;
1687
1688 default:
1689 ret = -EAGAIN;
1690 break;
1691 }
1692 spin_unlock(chip->mutex);
1693 }
1694
1695 /* Unlock the chips again */
1696
1697 if (ret) {
1698 for (i--; i >=0; i--) {
1699 chip = &cfi->chips[i];
1700
1701 spin_lock(chip->mutex);
1702
1703 if (chip->state == FL_PM_SUSPENDED) {
1704 chip->state = chip->oldstate;
1705 wake_up(&chip->wq);
1706 }
1707 spin_unlock(chip->mutex);
1708 }
1709 }
1710
1711 return ret;
1712 }
1713
1714
1715 static void cfi_amdstd_resume(struct mtd_info *mtd)
1716 {
1717 struct map_info *map = mtd->priv;
1718 struct cfi_private *cfi = map->fldrv_priv;
1719 int i;
1720 struct flchip *chip;
1721
1722 for (i=0; i<cfi->numchips; i++) {
1723
1724 chip = &cfi->chips[i];
1725
1726 spin_lock(chip->mutex);
1727
1728 if (chip->state == FL_PM_SUSPENDED) {
1729 chip->state = FL_READY;
1730 map_write(map, CMD(0xF0), chip->start);
1731 wake_up(&chip->wq);
1732 }
1733 else
1734 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1735
1736 spin_unlock(chip->mutex);
1737 }
1738 }
1739
1740 static void cfi_amdstd_destroy(struct mtd_info *mtd)
1741 {
1742 struct map_info *map = mtd->priv;
1743 struct cfi_private *cfi = map->fldrv_priv;
1744
1745 kfree(cfi->cmdset_priv);
1746 kfree(cfi->cfiq);
1747 kfree(cfi);
1748 kfree(mtd->eraseregions);
1749 }
1750
1751 static char im_name[]="cfi_cmdset_0002";
1752
1753
1754 static int __init cfi_amdstd_init(void)
1755 {
1756 inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0002);
1757 return 0;
1758 }
1759
1760
1761 static void __exit cfi_amdstd_exit(void)
1762 {
1763 inter_module_unregister(im_name);
1764 }
1765
1766
1767 module_init(cfi_amdstd_init);
1768 module_exit(cfi_amdstd_exit);
1769
1770 MODULE_LICENSE("GPL");
1771 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1772 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
This page took 0.070596 seconds and 6 git commands to generate.