mtd: cfi_cmdset_0002: do not fail on no extended query table as they are both optional
[deliverable/linux.git] / drivers / mtd / chips / cfi_cmdset_0002.c
CommitLineData
1da177e4
LT
1/*
2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4 *
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
02b15e34 7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
1da177e4
LT
8 *
9 * 2_by_8 routines added by Simon Munton
10 *
11 * 4_by_16 work by Carolyn J. Smith
12 *
1f948b43 13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
02b15e34 14 * by Nicolas Pitre)
1f948b43 15 *
87e92c06
CM
16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
17 *
1da177e4
LT
18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
19 *
20 * This code is GPL
1da177e4
LT
21 */
22
1da177e4
LT
23#include <linux/module.h>
24#include <linux/types.h>
25#include <linux/kernel.h>
26#include <linux/sched.h>
27#include <linux/init.h>
28#include <asm/io.h>
29#include <asm/byteorder.h>
30
31#include <linux/errno.h>
32#include <linux/slab.h>
33#include <linux/delay.h>
34#include <linux/interrupt.h>
eafe1311 35#include <linux/reboot.h>
1da177e4
LT
36#include <linux/mtd/compatmac.h>
37#include <linux/mtd/map.h>
38#include <linux/mtd/mtd.h>
39#include <linux/mtd/cfi.h>
02b15e34 40#include <linux/mtd/xip.h>
1da177e4
LT
41
42#define AMD_BOOTLOC_BUG
43#define FORCE_WORD_WRITE 0
44
45#define MAX_WORD_RETRIES 3
46
1da177e4 47#define SST49LF004B 0x0060
89072ef9 48#define SST49LF040B 0x0050
fb4a90bf 49#define SST49LF008A 0x005a
0165508c 50#define AT49BV6416 0x00d6
1da177e4
LT
51
52static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
53static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
55static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
56static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
57static void cfi_amdstd_sync (struct mtd_info *);
58static int cfi_amdstd_suspend (struct mtd_info *);
59static void cfi_amdstd_resume (struct mtd_info *);
eafe1311 60static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
1da177e4
LT
61static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62
63static void cfi_amdstd_destroy(struct mtd_info *);
64
65struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
66static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
67
68static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
69static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
70#include "fwh_lock.h"
71
69423d99
AH
72static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
73static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
0165508c 74
1da177e4
LT
75static struct mtd_chip_driver cfi_amdstd_chipdrv = {
76 .probe = NULL, /* Not usable directly */
77 .destroy = cfi_amdstd_destroy,
78 .name = "cfi_cmdset_0002",
79 .module = THIS_MODULE
80};
81
82
83/* #define DEBUG_CFI_FEATURES */
84
85
86#ifdef DEBUG_CFI_FEATURES
87static void cfi_tell_features(struct cfi_pri_amdstd *extp)
88{
89 const char* erase_suspend[3] = {
90 "Not supported", "Read only", "Read/write"
91 };
92 const char* top_bottom[6] = {
93 "No WP", "8x8KiB sectors at top & bottom, no WP",
94 "Bottom boot", "Top boot",
95 "Uniform, Bottom WP", "Uniform, Top WP"
96 };
97
98 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
1f948b43 99 printk(" Address sensitive unlock: %s\n",
1da177e4
LT
100 (extp->SiliconRevision & 1) ? "Not required" : "Required");
101
102 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
103 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
104 else
105 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
106
107 if (extp->BlkProt == 0)
108 printk(" Block protection: Not supported\n");
109 else
110 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
111
112
113 printk(" Temporary block unprotect: %s\n",
114 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
115 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
116 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
117 printk(" Burst mode: %s\n",
118 extp->BurstMode ? "Supported" : "Not supported");
119 if (extp->PageMode == 0)
120 printk(" Page mode: Not supported\n");
121 else
122 printk(" Page mode: %d word page\n", extp->PageMode << 2);
123
1f948b43 124 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
1da177e4 125 extp->VppMin >> 4, extp->VppMin & 0xf);
1f948b43 126 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
1da177e4
LT
127 extp->VppMax >> 4, extp->VppMax & 0xf);
128
129 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
130 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
131 else
132 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
133}
134#endif
135
136#ifdef AMD_BOOTLOC_BUG
137/* Wheee. Bring me the head of someone at AMD. */
138static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
139{
140 struct map_info *map = mtd->priv;
141 struct cfi_private *cfi = map->fldrv_priv;
142 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
143 __u8 major = extp->MajorVersion;
144 __u8 minor = extp->MinorVersion;
145
146 if (((major << 8) | minor) < 0x3131) {
147 /* CFI version 1.0 => don't trust bootloc */
87e92c06
CM
148
149 DEBUG(MTD_DEBUG_LEVEL1,
150 "%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
151 map->name, cfi->mfr, cfi->id);
152
153 /* AFAICS all 29LV400 with a bottom boot block have a device ID
154 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
155 * These were badly detected as they have the 0x80 bit set
156 * so treat them as a special case.
157 */
158 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
159
160 /* Macronix added CFI to their 2nd generation
161 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
162 * Fujitsu, Spansion, EON, ESI and older Macronix)
163 * has CFI.
164 *
165 * Therefore also check the manufacturer.
166 * This reduces the risk of false detection due to
167 * the 8-bit device ID.
168 */
f3e69c65 169 (cfi->mfr == CFI_MFR_MACRONIX)) {
87e92c06
CM
170 DEBUG(MTD_DEBUG_LEVEL1,
171 "%s: Macronix MX29LV400C with bottom boot block"
172 " detected\n", map->name);
173 extp->TopBottom = 2; /* bottom boot */
174 } else
1da177e4
LT
175 if (cfi->id & 0x80) {
176 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
177 extp->TopBottom = 3; /* top boot */
178 } else {
179 extp->TopBottom = 2; /* bottom boot */
180 }
87e92c06
CM
181
182 DEBUG(MTD_DEBUG_LEVEL1,
183 "%s: AMD CFI PRI V%c.%c has no boot block field;"
184 " deduced %s from Device ID\n", map->name, major, minor,
185 extp->TopBottom == 2 ? "bottom" : "top");
1da177e4
LT
186 }
187}
188#endif
189
190static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
191{
192 struct map_info *map = mtd->priv;
193 struct cfi_private *cfi = map->fldrv_priv;
194 if (cfi->cfiq->BufWriteTimeoutTyp) {
195 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
196 mtd->write = cfi_amdstd_write_buffers;
197 }
198}
199
5b0c5c2c
HS
200/* Atmel chips don't use the same PRI format as AMD chips */
201static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
202{
203 struct map_info *map = mtd->priv;
204 struct cfi_private *cfi = map->fldrv_priv;
205 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
206 struct cfi_pri_atmel atmel_pri;
207
208 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
de591dac 209 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
5b0c5c2c
HS
210
211 if (atmel_pri.Features & 0x02)
212 extp->EraseSuspend = 2;
213
be8f78b8
HS
214 /* Some chips got it backwards... */
215 if (cfi->id == AT49BV6416) {
216 if (atmel_pri.BottomBoot)
217 extp->TopBottom = 3;
218 else
219 extp->TopBottom = 2;
220 } else {
221 if (atmel_pri.BottomBoot)
222 extp->TopBottom = 2;
223 else
224 extp->TopBottom = 3;
225 }
d10a39d1
HCE
226
227 /* burst write mode not supported */
228 cfi->cfiq->BufWriteTimeoutTyp = 0;
229 cfi->cfiq->BufWriteTimeoutMax = 0;
5b0c5c2c
HS
230}
231
1da177e4
LT
232static void fixup_use_secsi(struct mtd_info *mtd, void *param)
233{
234 /* Setup for chips with a secsi area */
235 mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
236 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
237}
238
239static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
240{
241 struct map_info *map = mtd->priv;
242 struct cfi_private *cfi = map->fldrv_priv;
243 if ((cfi->cfiq->NumEraseRegions == 1) &&
244 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
245 mtd->erase = cfi_amdstd_erase_chip;
246 }
1f948b43 247
1da177e4
LT
248}
249
0165508c
HS
250/*
251 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
252 * locked by default.
253 */
254static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
255{
256 mtd->lock = cfi_atmel_lock;
257 mtd->unlock = cfi_atmel_unlock;
e619a75f 258 mtd->flags |= MTD_POWERUP_LOCK;
0165508c
HS
259}
260
70b07255
TP
261static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
262{
263 struct map_info *map = mtd->priv;
264 struct cfi_private *cfi = map->fldrv_priv;
265
266 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
267 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
268 pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
269 }
270}
271
272static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
273{
274 struct map_info *map = mtd->priv;
275 struct cfi_private *cfi = map->fldrv_priv;
276
277 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
278 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
279 pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
280 }
281}
282
1da177e4 283static struct cfi_fixup cfi_fixup_table[] = {
d10a39d1 284 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
1da177e4
LT
285#ifdef AMD_BOOTLOC_BUG
286 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
f3e69c65 287 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
1da177e4
LT
288#endif
289 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
290 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
291 { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
292 { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
293 { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
294 { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
70b07255
TP
295 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, },
296 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, },
297 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, },
298 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, },
1da177e4
LT
299#if !FORCE_WORD_WRITE
300 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
301#endif
302 { 0, 0, NULL, NULL }
303};
304static struct cfi_fixup jedec_fixup_table[] = {
f3e69c65
GL
305 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
306 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
307 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
1da177e4
LT
308 { 0, 0, NULL, NULL }
309};
310
311static struct cfi_fixup fixup_table[] = {
312 /* The CFI vendor ids and the JEDEC vendor IDs appear
313 * to be common. It is like the devices id's are as
314 * well. This table is to pick all cases where
315 * we know that is the case.
316 */
317 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
0165508c 318 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
1da177e4
LT
319 { 0, 0, NULL, NULL }
320};
321
322
fefae48b
WG
323static void cfi_fixup_major_minor(struct cfi_private *cfi,
324 struct cfi_pri_amdstd *extp)
325{
326 if (cfi->mfr == CFI_MFR_SAMSUNG && cfi->id == 0x257e &&
327 extp->MajorVersion == '0')
328 extp->MajorVersion = '1';
329}
330
1da177e4
LT
331struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
332{
333 struct cfi_private *cfi = map->fldrv_priv;
334 struct mtd_info *mtd;
335 int i;
336
95b93a0c 337 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
1da177e4
LT
338 if (!mtd) {
339 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
340 return NULL;
341 }
1da177e4
LT
342 mtd->priv = map;
343 mtd->type = MTD_NORFLASH;
344
345 /* Fill in the default mtd operations */
346 mtd->erase = cfi_amdstd_erase_varsize;
347 mtd->write = cfi_amdstd_write_words;
348 mtd->read = cfi_amdstd_read;
349 mtd->sync = cfi_amdstd_sync;
350 mtd->suspend = cfi_amdstd_suspend;
351 mtd->resume = cfi_amdstd_resume;
352 mtd->flags = MTD_CAP_NORFLASH;
353 mtd->name = map->name;
783ed81f 354 mtd->writesize = 1;
1da177e4 355
eafe1311
KC
356 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
357
1da177e4
LT
358 if (cfi->cfi_mode==CFI_MODE_CFI){
359 unsigned char bootloc;
1da177e4
LT
360 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
361 struct cfi_pri_amdstd *extp;
362
363 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
564b8497
GL
364 if (extp) {
365 /*
366 * It's a real CFI chip, not one for which the probe
367 * routine faked a CFI structure.
368 */
369 cfi_fixup_major_minor(cfi, extp);
370
371 if (extp->MajorVersion != '1' ||
372 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
373 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
374 "version %c.%c.\n", extp->MajorVersion,
375 extp->MinorVersion);
376 kfree(extp);
377 kfree(mtd);
378 return NULL;
379 }
d88f977b 380
564b8497
GL
381 /* Install our own private info structure */
382 cfi->cmdset_priv = extp;
1da177e4 383
564b8497
GL
384 /* Apply cfi device specific fixups */
385 cfi_fixup(mtd, cfi_fixup_table);
1da177e4
LT
386
387#ifdef DEBUG_CFI_FEATURES
564b8497
GL
388 /* Tell the user about it in lots of lovely detail */
389 cfi_tell_features(extp);
1f948b43 390#endif
1da177e4 391
564b8497
GL
392 bootloc = extp->TopBottom;
393 if ((bootloc != 2) && (bootloc != 3)) {
394 printk(KERN_WARNING "%s: CFI does not contain boot "
395 "bank location. Assuming top.\n", map->name);
396 bootloc = 2;
397 }
1da177e4 398
564b8497
GL
399 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
400 printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
1f948b43 401
564b8497
GL
402 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
403 int j = (cfi->cfiq->NumEraseRegions-1)-i;
404 __u32 swap;
1f948b43 405
564b8497
GL
406 swap = cfi->cfiq->EraseRegionInfo[i];
407 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
408 cfi->cfiq->EraseRegionInfo[j] = swap;
409 }
1da177e4 410 }
564b8497
GL
411 /* Set the default CFI lock/unlock addresses */
412 cfi->addr_unlock1 = 0x555;
413 cfi->addr_unlock2 = 0x2aa;
414 }
415
416 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
417 kfree(mtd);
418 return NULL;
1da177e4 419 }
1da177e4
LT
420
421 } /* CFI mode */
422 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
423 /* Apply jedec specific fixups */
424 cfi_fixup(mtd, jedec_fixup_table);
425 }
426 /* Apply generic fixups */
427 cfi_fixup(mtd, fixup_table);
428
429 for (i=0; i< cfi->numchips; i++) {
430 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
431 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
432 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
83d48091
VS
433 cfi->chips[i].ref_point_counter = 0;
434 init_waitqueue_head(&(cfi->chips[i].wq));
1f948b43
TG
435 }
436
1da177e4 437 map->fldrv = &cfi_amdstd_chipdrv;
1f948b43 438
1da177e4
LT
439 return cfi_amdstd_setup(mtd);
440}
83ea4ef2 441EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
1da177e4
LT
442
443static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
444{
445 struct map_info *map = mtd->priv;
446 struct cfi_private *cfi = map->fldrv_priv;
447 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
448 unsigned long offset = 0;
449 int i,j;
450
1f948b43 451 printk(KERN_NOTICE "number of %s chips: %d\n",
1da177e4 452 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
1f948b43 453 /* Select the correct geometry setup */
1da177e4
LT
454 mtd->size = devsize * cfi->numchips;
455
456 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
457 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
458 * mtd->numeraseregions, GFP_KERNEL);
1f948b43 459 if (!mtd->eraseregions) {
1da177e4
LT
460 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
461 goto setup_err;
462 }
1f948b43 463
1da177e4
LT
464 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
465 unsigned long ernum, ersize;
466 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
467 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
1f948b43 468
1da177e4
LT
469 if (mtd->erasesize < ersize) {
470 mtd->erasesize = ersize;
471 }
472 for (j=0; j<cfi->numchips; j++) {
473 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
474 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
475 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
476 }
477 offset += (ersize * ernum);
478 }
479 if (offset != devsize) {
480 /* Argh */
481 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
482 goto setup_err;
483 }
484#if 0
485 // debug
486 for (i=0; i<mtd->numeraseregions;i++){
487 printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
488 i,mtd->eraseregions[i].offset,
489 mtd->eraseregions[i].erasesize,
490 mtd->eraseregions[i].numblocks);
491 }
492#endif
493
1da177e4 494 __module_get(THIS_MODULE);
eafe1311 495 register_reboot_notifier(&mtd->reboot_notifier);
1da177e4
LT
496 return mtd;
497
498 setup_err:
17fabf15
JS
499 kfree(mtd->eraseregions);
500 kfree(mtd);
1da177e4
LT
501 kfree(cfi->cmdset_priv);
502 kfree(cfi->cfiq);
503 return NULL;
504}
505
506/*
507 * Return true if the chip is ready.
508 *
509 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
510 * non-suspended sector) and is indicated by no toggle bits toggling.
511 *
512 * Note that anything more complicated than checking if no bits are toggling
513 * (including checking DQ5 for an error status) is tricky to get working
514 * correctly and is therefore not done (particulary with interleaved chips
515 * as each chip must be checked independantly of the others).
516 */
02b15e34 517static int __xipram chip_ready(struct map_info *map, unsigned long addr)
1da177e4
LT
518{
519 map_word d, t;
520
521 d = map_read(map, addr);
522 t = map_read(map, addr);
523
524 return map_word_equal(map, d, t);
525}
526
fb4a90bf
EB
527/*
528 * Return true if the chip is ready and has the correct value.
529 *
530 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
531 * non-suspended sector) and it is indicated by no bits toggling.
532 *
533 * Error are indicated by toggling bits or bits held with the wrong value,
534 * or with bits toggling.
535 *
536 * Note that anything more complicated than checking if no bits are toggling
537 * (including checking DQ5 for an error status) is tricky to get working
538 * correctly and is therefore not done (particulary with interleaved chips
539 * as each chip must be checked independantly of the others).
540 *
541 */
02b15e34 542static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
fb4a90bf
EB
543{
544 map_word oldd, curd;
545
546 oldd = map_read(map, addr);
547 curd = map_read(map, addr);
548
1f948b43 549 return map_word_equal(map, oldd, curd) &&
fb4a90bf
EB
550 map_word_equal(map, curd, expected);
551}
552
1da177e4
LT
553static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
554{
555 DECLARE_WAITQUEUE(wait, current);
556 struct cfi_private *cfi = map->fldrv_priv;
557 unsigned long timeo;
558 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
559
560 resettime:
561 timeo = jiffies + HZ;
562 retry:
563 switch (chip->state) {
564
565 case FL_STATUS:
566 for (;;) {
567 if (chip_ready(map, adr))
568 break;
569
570 if (time_after(jiffies, timeo)) {
571 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
1da177e4
LT
572 return -EIO;
573 }
c4e77376 574 mutex_unlock(&chip->mutex);
1da177e4 575 cfi_udelay(1);
c4e77376 576 mutex_lock(&chip->mutex);
1da177e4
LT
577 /* Someone else might have been playing with it. */
578 goto retry;
579 }
1f948b43 580
1da177e4
LT
581 case FL_READY:
582 case FL_CFI_QUERY:
583 case FL_JEDEC_QUERY:
584 return 0;
585
586 case FL_ERASING:
2695eab9
JT
587 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
588 !(mode == FL_READY || mode == FL_POINT ||
589 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
1da177e4
LT
590 goto sleep;
591
592 /* We could check to see if we're trying to access the sector
593 * that is currently being erased. However, no user will try
594 * anything like that so we just wait for the timeout. */
595
596 /* Erase suspend */
597 /* It's harmless to issue the Erase-Suspend and Erase-Resume
598 * commands when the erase algorithm isn't in progress. */
599 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
600 chip->oldstate = FL_ERASING;
601 chip->state = FL_ERASE_SUSPENDING;
602 chip->erase_suspended = 1;
603 for (;;) {
604 if (chip_ready(map, adr))
605 break;
606
607 if (time_after(jiffies, timeo)) {
608 /* Should have suspended the erase by now.
609 * Send an Erase-Resume command as either
610 * there was an error (so leave the erase
611 * routine to recover from it) or we trying to
612 * use the erase-in-progress sector. */
613 map_write(map, CMD(0x30), chip->in_progress_block_addr);
614 chip->state = FL_ERASING;
615 chip->oldstate = FL_READY;
616 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
617 return -EIO;
618 }
1f948b43 619
c4e77376 620 mutex_unlock(&chip->mutex);
1da177e4 621 cfi_udelay(1);
c4e77376 622 mutex_lock(&chip->mutex);
1da177e4
LT
623 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
624 So we can just loop here. */
625 }
626 chip->state = FL_READY;
627 return 0;
628
02b15e34
TP
629 case FL_XIP_WHILE_ERASING:
630 if (mode != FL_READY && mode != FL_POINT &&
631 (!cfip || !(cfip->EraseSuspend&2)))
632 goto sleep;
633 chip->oldstate = chip->state;
634 chip->state = FL_READY;
635 return 0;
636
eafe1311
KC
637 case FL_SHUTDOWN:
638 /* The machine is rebooting */
639 return -EIO;
640
1da177e4
LT
641 case FL_POINT:
642 /* Only if there's no operation suspended... */
643 if (mode == FL_READY && chip->oldstate == FL_READY)
644 return 0;
645
646 default:
647 sleep:
648 set_current_state(TASK_UNINTERRUPTIBLE);
649 add_wait_queue(&chip->wq, &wait);
c4e77376 650 mutex_unlock(&chip->mutex);
1da177e4
LT
651 schedule();
652 remove_wait_queue(&chip->wq, &wait);
c4e77376 653 mutex_lock(&chip->mutex);
1da177e4
LT
654 goto resettime;
655 }
656}
657
658
659static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
660{
661 struct cfi_private *cfi = map->fldrv_priv;
662
663 switch(chip->oldstate) {
664 case FL_ERASING:
665 chip->state = chip->oldstate;
666 map_write(map, CMD(0x30), chip->in_progress_block_addr);
667 chip->oldstate = FL_READY;
668 chip->state = FL_ERASING;
669 break;
670
02b15e34
TP
671 case FL_XIP_WHILE_ERASING:
672 chip->state = chip->oldstate;
673 chip->oldstate = FL_READY;
674 break;
675
1da177e4
LT
676 case FL_READY:
677 case FL_STATUS:
678 /* We should really make set_vpp() count, rather than doing this */
679 DISABLE_VPP(map);
680 break;
681 default:
682 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
683 }
684 wake_up(&chip->wq);
685}
686
02b15e34
TP
687#ifdef CONFIG_MTD_XIP
688
689/*
690 * No interrupt what so ever can be serviced while the flash isn't in array
691 * mode. This is ensured by the xip_disable() and xip_enable() functions
692 * enclosing any code path where the flash is known not to be in array mode.
693 * And within a XIP disabled code path, only functions marked with __xipram
694 * may be called and nothing else (it's a good thing to inspect generated
695 * assembly to make sure inline functions were actually inlined and that gcc
696 * didn't emit calls to its own support functions). Also configuring MTD CFI
697 * support to a single buswidth and a single interleave is also recommended.
698 */
f8eb321b 699
02b15e34
TP
700static void xip_disable(struct map_info *map, struct flchip *chip,
701 unsigned long adr)
702{
703 /* TODO: chips with no XIP use should ignore and return */
704 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
705 local_irq_disable();
706}
707
708static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
709 unsigned long adr)
710{
711 struct cfi_private *cfi = map->fldrv_priv;
712
713 if (chip->state != FL_POINT && chip->state != FL_READY) {
714 map_write(map, CMD(0xf0), adr);
715 chip->state = FL_READY;
716 }
717 (void) map_read(map, adr);
97f927a4 718 xip_iprefetch();
02b15e34
TP
719 local_irq_enable();
720}
721
722/*
723 * When a delay is required for the flash operation to complete, the
724 * xip_udelay() function is polling for both the given timeout and pending
725 * (but still masked) hardware interrupts. Whenever there is an interrupt
1f948b43 726 * pending then the flash erase operation is suspended, array mode restored
02b15e34
TP
727 * and interrupts unmasked. Task scheduling might also happen at that
728 * point. The CPU eventually returns from the interrupt or the call to
729 * schedule() and the suspended flash operation is resumed for the remaining
730 * of the delay period.
731 *
732 * Warning: this function _will_ fool interrupt latency tracing tools.
733 */
734
735static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
736 unsigned long adr, int usec)
737{
738 struct cfi_private *cfi = map->fldrv_priv;
739 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
740 map_word status, OK = CMD(0x80);
741 unsigned long suspended, start = xip_currtime();
742 flstate_t oldstate;
743
744 do {
745 cpu_relax();
746 if (xip_irqpending() && extp &&
747 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
748 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
749 /*
1f948b43
TG
750 * Let's suspend the erase operation when supported.
751 * Note that we currently don't try to suspend
752 * interleaved chips if there is already another
02b15e34
TP
753 * operation suspended (imagine what happens
754 * when one chip was already done with the current
755 * operation while another chip suspended it, then
756 * we resume the whole thing at once). Yes, it
757 * can happen!
758 */
759 map_write(map, CMD(0xb0), adr);
760 usec -= xip_elapsed_since(start);
761 suspended = xip_currtime();
762 do {
763 if (xip_elapsed_since(suspended) > 100000) {
764 /*
765 * The chip doesn't want to suspend
766 * after waiting for 100 msecs.
767 * This is a critical error but there
768 * is not much we can do here.
769 */
770 return;
771 }
772 status = map_read(map, adr);
773 } while (!map_word_andequal(map, status, OK, OK));
774
775 /* Suspend succeeded */
776 oldstate = chip->state;
777 if (!map_word_bitsset(map, status, CMD(0x40)))
778 break;
779 chip->state = FL_XIP_WHILE_ERASING;
780 chip->erase_suspended = 1;
781 map_write(map, CMD(0xf0), adr);
782 (void) map_read(map, adr);
ca5c23c3 783 xip_iprefetch();
02b15e34 784 local_irq_enable();
c4e77376 785 mutex_unlock(&chip->mutex);
ca5c23c3 786 xip_iprefetch();
02b15e34
TP
787 cond_resched();
788
789 /*
790 * We're back. However someone else might have
791 * decided to go write to the chip if we are in
792 * a suspended erase state. If so let's wait
793 * until it's done.
794 */
c4e77376 795 mutex_lock(&chip->mutex);
02b15e34
TP
796 while (chip->state != FL_XIP_WHILE_ERASING) {
797 DECLARE_WAITQUEUE(wait, current);
798 set_current_state(TASK_UNINTERRUPTIBLE);
799 add_wait_queue(&chip->wq, &wait);
c4e77376 800 mutex_unlock(&chip->mutex);
02b15e34
TP
801 schedule();
802 remove_wait_queue(&chip->wq, &wait);
c4e77376 803 mutex_lock(&chip->mutex);
02b15e34
TP
804 }
805 /* Disallow XIP again */
806 local_irq_disable();
807
808 /* Resume the write or erase operation */
809 map_write(map, CMD(0x30), adr);
810 chip->state = oldstate;
811 start = xip_currtime();
812 } else if (usec >= 1000000/HZ) {
813 /*
814 * Try to save on CPU power when waiting delay
815 * is at least a system timer tick period.
816 * No need to be extremely accurate here.
817 */
818 xip_cpu_idle();
819 }
820 status = map_read(map, adr);
821 } while (!map_word_andequal(map, status, OK, OK)
822 && xip_elapsed_since(start) < usec);
823}
824
825#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
826
827/*
828 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
829 * the flash is actively programming or erasing since we have to poll for
830 * the operation to complete anyway. We can't do that in a generic way with
831 * a XIP setup so do it before the actual flash operation in this case
832 * and stub it out from INVALIDATE_CACHE_UDELAY.
833 */
834#define XIP_INVAL_CACHED_RANGE(map, from, size) \
835 INVALIDATE_CACHED_RANGE(map, from, size)
836
837#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
838 UDELAY(map, chip, adr, usec)
839
840/*
841 * Extra notes:
842 *
843 * Activating this XIP support changes the way the code works a bit. For
844 * example the code to suspend the current process when concurrent access
845 * happens is never executed because xip_udelay() will always return with the
846 * same chip state as it was entered with. This is why there is no care for
847 * the presence of add_wait_queue() or schedule() calls from within a couple
848 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
849 * The queueing and scheduling are always happening within xip_udelay().
850 *
851 * Similarly, get_chip() and put_chip() just happen to always be executed
852 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
853 * is in array mode, therefore never executing many cases therein and not
854 * causing any problem with XIP.
855 */
856
857#else
858
859#define xip_disable(map, chip, adr)
860#define xip_enable(map, chip, adr)
861#define XIP_INVAL_CACHED_RANGE(x...)
862
863#define UDELAY(map, chip, adr, usec) \
864do { \
c4e77376 865 mutex_unlock(&chip->mutex); \
02b15e34 866 cfi_udelay(usec); \
c4e77376 867 mutex_lock(&chip->mutex); \
02b15e34
TP
868} while (0)
869
870#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
871do { \
c4e77376 872 mutex_unlock(&chip->mutex); \
02b15e34
TP
873 INVALIDATE_CACHED_RANGE(map, adr, len); \
874 cfi_udelay(usec); \
c4e77376 875 mutex_lock(&chip->mutex); \
02b15e34
TP
876} while (0)
877
878#endif
1da177e4
LT
879
880static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
881{
882 unsigned long cmd_addr;
883 struct cfi_private *cfi = map->fldrv_priv;
884 int ret;
885
886 adr += chip->start;
887
1f948b43
TG
888 /* Ensure cmd read/writes are aligned. */
889 cmd_addr = adr & ~(map_bankwidth(map)-1);
1da177e4 890
c4e77376 891 mutex_lock(&chip->mutex);
1da177e4
LT
892 ret = get_chip(map, chip, cmd_addr, FL_READY);
893 if (ret) {
c4e77376 894 mutex_unlock(&chip->mutex);
1da177e4
LT
895 return ret;
896 }
897
898 if (chip->state != FL_POINT && chip->state != FL_READY) {
899 map_write(map, CMD(0xf0), cmd_addr);
900 chip->state = FL_READY;
901 }
902
903 map_copy_from(map, buf, adr, len);
904
905 put_chip(map, chip, cmd_addr);
906
c4e77376 907 mutex_unlock(&chip->mutex);
1da177e4
LT
908 return 0;
909}
910
911
912static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
913{
914 struct map_info *map = mtd->priv;
915 struct cfi_private *cfi = map->fldrv_priv;
916 unsigned long ofs;
917 int chipnum;
918 int ret = 0;
919
920 /* ofs: offset within the first chip that the first read should start */
921
922 chipnum = (from >> cfi->chipshift);
923 ofs = from - (chipnum << cfi->chipshift);
924
925
926 *retlen = 0;
927
928 while (len) {
929 unsigned long thislen;
930
931 if (chipnum >= cfi->numchips)
932 break;
933
934 if ((len + ofs -1) >> cfi->chipshift)
935 thislen = (1<<cfi->chipshift) - ofs;
936 else
937 thislen = len;
938
939 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
940 if (ret)
941 break;
942
943 *retlen += thislen;
944 len -= thislen;
945 buf += thislen;
946
947 ofs = 0;
948 chipnum++;
949 }
950 return ret;
951}
952
953
954static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
955{
956 DECLARE_WAITQUEUE(wait, current);
957 unsigned long timeo = jiffies + HZ;
958 struct cfi_private *cfi = map->fldrv_priv;
959
960 retry:
c4e77376 961 mutex_lock(&chip->mutex);
1da177e4
LT
962
963 if (chip->state != FL_READY){
964#if 0
965 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
966#endif
967 set_current_state(TASK_UNINTERRUPTIBLE);
968 add_wait_queue(&chip->wq, &wait);
1f948b43 969
c4e77376 970 mutex_unlock(&chip->mutex);
1da177e4
LT
971
972 schedule();
973 remove_wait_queue(&chip->wq, &wait);
974#if 0
975 if(signal_pending(current))
976 return -EINTR;
977#endif
978 timeo = jiffies + HZ;
979
980 goto retry;
1f948b43 981 }
1da177e4
LT
982
983 adr += chip->start;
984
985 chip->state = FL_READY;
986
987 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
988 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
989 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1f948b43 990
1da177e4
LT
991 map_copy_from(map, buf, adr, len);
992
993 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
994 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
995 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
996 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1f948b43 997
1da177e4 998 wake_up(&chip->wq);
c4e77376 999 mutex_unlock(&chip->mutex);
1da177e4
LT
1000
1001 return 0;
1002}
1003
1004static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1005{
1006 struct map_info *map = mtd->priv;
1007 struct cfi_private *cfi = map->fldrv_priv;
1008 unsigned long ofs;
1009 int chipnum;
1010 int ret = 0;
1011
1012
1013 /* ofs: offset within the first chip that the first read should start */
1014
1015 /* 8 secsi bytes per chip */
1016 chipnum=from>>3;
1017 ofs=from & 7;
1018
1019
1020 *retlen = 0;
1021
1022 while (len) {
1023 unsigned long thislen;
1024
1025 if (chipnum >= cfi->numchips)
1026 break;
1027
1028 if ((len + ofs -1) >> 3)
1029 thislen = (1<<3) - ofs;
1030 else
1031 thislen = len;
1032
1033 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1034 if (ret)
1035 break;
1036
1037 *retlen += thislen;
1038 len -= thislen;
1039 buf += thislen;
1040
1041 ofs = 0;
1042 chipnum++;
1043 }
1044 return ret;
1045}
1046
1047
02b15e34 1048static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1da177e4
LT
1049{
1050 struct cfi_private *cfi = map->fldrv_priv;
1051 unsigned long timeo = jiffies + HZ;
1052 /*
1053 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1054 * have a max write time of a few hundreds usec). However, we should
1055 * use the maximum timeout value given by the chip at probe time
1056 * instead. Unfortunately, struct flchip does have a field for
1057 * maximum timeout, only for typical which can be far too short
1058 * depending of the conditions. The ' + 1' is to avoid having a
1059 * timeout of 0 jiffies if HZ is smaller than 1000.
1060 */
1061 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1062 int ret = 0;
1063 map_word oldd;
1064 int retry_cnt = 0;
1065
1066 adr += chip->start;
1067
c4e77376 1068 mutex_lock(&chip->mutex);
1da177e4
LT
1069 ret = get_chip(map, chip, adr, FL_WRITING);
1070 if (ret) {
c4e77376 1071 mutex_unlock(&chip->mutex);
1da177e4
LT
1072 return ret;
1073 }
1074
1075 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1076 __func__, adr, datum.x[0] );
1077
1078 /*
1079 * Check for a NOP for the case when the datum to write is already
1080 * present - it saves time and works around buggy chips that corrupt
1081 * data at other locations when 0xff is written to a location that
1082 * already contains 0xff.
1083 */
1084 oldd = map_read(map, adr);
1085 if (map_word_equal(map, oldd, datum)) {
1086 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
1087 __func__);
1088 goto op_done;
1089 }
1090
02b15e34 1091 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1da177e4 1092 ENABLE_VPP(map);
02b15e34 1093 xip_disable(map, chip, adr);
1da177e4
LT
1094 retry:
1095 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1096 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1097 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1098 map_write(map, datum, adr);
1099 chip->state = FL_WRITING;
1100
02b15e34
TP
1101 INVALIDATE_CACHE_UDELAY(map, chip,
1102 adr, map_bankwidth(map),
1103 chip->word_write_time);
1da177e4
LT
1104
1105 /* See comment above for timeout value. */
1f948b43 1106 timeo = jiffies + uWriteTimeout;
1da177e4
LT
1107 for (;;) {
1108 if (chip->state != FL_WRITING) {
1109 /* Someone's suspended the write. Sleep */
1110 DECLARE_WAITQUEUE(wait, current);
1111
1112 set_current_state(TASK_UNINTERRUPTIBLE);
1113 add_wait_queue(&chip->wq, &wait);
c4e77376 1114 mutex_unlock(&chip->mutex);
1da177e4
LT
1115 schedule();
1116 remove_wait_queue(&chip->wq, &wait);
1117 timeo = jiffies + (HZ / 2); /* FIXME */
c4e77376 1118 mutex_lock(&chip->mutex);
1da177e4
LT
1119 continue;
1120 }
1121
b95f9609 1122 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
02b15e34 1123 xip_enable(map, chip, adr);
fb4a90bf 1124 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
02b15e34 1125 xip_disable(map, chip, adr);
b95f9609 1126 break;
fb4a90bf 1127 }
1da177e4 1128
b95f9609
KB
1129 if (chip_ready(map, adr))
1130 break;
1131
1da177e4 1132 /* Latency issues. Drop the lock, wait a while and retry */
02b15e34 1133 UDELAY(map, chip, adr, 1);
1da177e4 1134 }
fb4a90bf
EB
1135 /* Did we succeed? */
1136 if (!chip_good(map, adr, datum)) {
1137 /* reset on all failures. */
1138 map_write( map, CMD(0xF0), chip->start );
1139 /* FIXME - should have reset delay before continuing */
1da177e4 1140
1f948b43 1141 if (++retry_cnt <= MAX_WORD_RETRIES)
fb4a90bf 1142 goto retry;
1da177e4 1143
fb4a90bf
EB
1144 ret = -EIO;
1145 }
02b15e34 1146 xip_enable(map, chip, adr);
1da177e4
LT
1147 op_done:
1148 chip->state = FL_READY;
1149 put_chip(map, chip, adr);
c4e77376 1150 mutex_unlock(&chip->mutex);
1da177e4
LT
1151
1152 return ret;
1153}
1154
1155
1156static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1157 size_t *retlen, const u_char *buf)
1158{
1159 struct map_info *map = mtd->priv;
1160 struct cfi_private *cfi = map->fldrv_priv;
1161 int ret = 0;
1162 int chipnum;
1163 unsigned long ofs, chipstart;
1164 DECLARE_WAITQUEUE(wait, current);
1165
1166 *retlen = 0;
1167 if (!len)
1168 return 0;
1169
1170 chipnum = to >> cfi->chipshift;
1171 ofs = to - (chipnum << cfi->chipshift);
1172 chipstart = cfi->chips[chipnum].start;
1173
1174 /* If it's not bus-aligned, do the first byte write */
1175 if (ofs & (map_bankwidth(map)-1)) {
1176 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1177 int i = ofs - bus_ofs;
1178 int n = 0;
1179 map_word tmp_buf;
1180
1181 retry:
c4e77376 1182 mutex_lock(&cfi->chips[chipnum].mutex);
1da177e4
LT
1183
1184 if (cfi->chips[chipnum].state != FL_READY) {
1185#if 0
1186 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1187#endif
1188 set_current_state(TASK_UNINTERRUPTIBLE);
1189 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1190
c4e77376 1191 mutex_unlock(&cfi->chips[chipnum].mutex);
1da177e4
LT
1192
1193 schedule();
1194 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1195#if 0
1196 if(signal_pending(current))
1197 return -EINTR;
1198#endif
1199 goto retry;
1200 }
1201
1202 /* Load 'tmp_buf' with old contents of flash */
1203 tmp_buf = map_read(map, bus_ofs+chipstart);
1204
c4e77376 1205 mutex_unlock(&cfi->chips[chipnum].mutex);
1da177e4
LT
1206
1207 /* Number of bytes to copy from buffer */
1208 n = min_t(int, len, map_bankwidth(map)-i);
1f948b43 1209
1da177e4
LT
1210 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1211
1f948b43 1212 ret = do_write_oneword(map, &cfi->chips[chipnum],
1da177e4 1213 bus_ofs, tmp_buf);
1f948b43 1214 if (ret)
1da177e4 1215 return ret;
1f948b43 1216
1da177e4
LT
1217 ofs += n;
1218 buf += n;
1219 (*retlen) += n;
1220 len -= n;
1221
1222 if (ofs >> cfi->chipshift) {
1f948b43 1223 chipnum ++;
1da177e4
LT
1224 ofs = 0;
1225 if (chipnum == cfi->numchips)
1226 return 0;
1227 }
1228 }
1f948b43 1229
1da177e4
LT
1230 /* We are now aligned, write as much as possible */
1231 while(len >= map_bankwidth(map)) {
1232 map_word datum;
1233
1234 datum = map_word_load(map, buf);
1235
1236 ret = do_write_oneword(map, &cfi->chips[chipnum],
1237 ofs, datum);
1238 if (ret)
1239 return ret;
1240
1241 ofs += map_bankwidth(map);
1242 buf += map_bankwidth(map);
1243 (*retlen) += map_bankwidth(map);
1244 len -= map_bankwidth(map);
1245
1246 if (ofs >> cfi->chipshift) {
1f948b43 1247 chipnum ++;
1da177e4
LT
1248 ofs = 0;
1249 if (chipnum == cfi->numchips)
1250 return 0;
1251 chipstart = cfi->chips[chipnum].start;
1252 }
1253 }
1254
1255 /* Write the trailing bytes if any */
1256 if (len & (map_bankwidth(map)-1)) {
1257 map_word tmp_buf;
1258
1259 retry1:
c4e77376 1260 mutex_lock(&cfi->chips[chipnum].mutex);
1da177e4
LT
1261
1262 if (cfi->chips[chipnum].state != FL_READY) {
1263#if 0
1264 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1265#endif
1266 set_current_state(TASK_UNINTERRUPTIBLE);
1267 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1268
c4e77376 1269 mutex_unlock(&cfi->chips[chipnum].mutex);
1da177e4
LT
1270
1271 schedule();
1272 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1273#if 0
1274 if(signal_pending(current))
1275 return -EINTR;
1276#endif
1277 goto retry1;
1278 }
1279
1280 tmp_buf = map_read(map, ofs + chipstart);
1281
c4e77376 1282 mutex_unlock(&cfi->chips[chipnum].mutex);
1da177e4
LT
1283
1284 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1f948b43
TG
1285
1286 ret = do_write_oneword(map, &cfi->chips[chipnum],
1da177e4 1287 ofs, tmp_buf);
1f948b43 1288 if (ret)
1da177e4 1289 return ret;
1f948b43 1290
1da177e4
LT
1291 (*retlen) += len;
1292 }
1293
1294 return 0;
1295}
1296
1297
1298/*
1299 * FIXME: interleaved mode not tested, and probably not supported!
1300 */
02b15e34 1301static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1f948b43 1302 unsigned long adr, const u_char *buf,
02b15e34 1303 int len)
1da177e4
LT
1304{
1305 struct cfi_private *cfi = map->fldrv_priv;
1306 unsigned long timeo = jiffies + HZ;
1307 /* see comments in do_write_oneword() regarding uWriteTimeo. */
1308 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1309 int ret = -EIO;
1310 unsigned long cmd_adr;
1311 int z, words;
1312 map_word datum;
1313
1314 adr += chip->start;
1315 cmd_adr = adr;
1316
c4e77376 1317 mutex_lock(&chip->mutex);
1da177e4
LT
1318 ret = get_chip(map, chip, adr, FL_WRITING);
1319 if (ret) {
c4e77376 1320 mutex_unlock(&chip->mutex);
1da177e4
LT
1321 return ret;
1322 }
1323
1324 datum = map_word_load(map, buf);
1325
1326 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1327 __func__, adr, datum.x[0] );
1328
02b15e34 1329 XIP_INVAL_CACHED_RANGE(map, adr, len);
1da177e4 1330 ENABLE_VPP(map);
02b15e34 1331 xip_disable(map, chip, cmd_adr);
1f948b43 1332
1da177e4
LT
1333 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1334 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1335 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1336
1337 /* Write Buffer Load */
1338 map_write(map, CMD(0x25), cmd_adr);
1339
1340 chip->state = FL_WRITING_TO_BUFFER;
1341
1342 /* Write length of data to come */
1343 words = len / map_bankwidth(map);
1344 map_write(map, CMD(words - 1), cmd_adr);
1345 /* Write data */
1346 z = 0;
1347 while(z < words * map_bankwidth(map)) {
1348 datum = map_word_load(map, buf);
1349 map_write(map, datum, adr + z);
1350
1351 z += map_bankwidth(map);
1352 buf += map_bankwidth(map);
1353 }
1354 z -= map_bankwidth(map);
1355
1356 adr += z;
1357
1358 /* Write Buffer Program Confirm: GO GO GO */
1359 map_write(map, CMD(0x29), cmd_adr);
1360 chip->state = FL_WRITING;
1361
02b15e34
TP
1362 INVALIDATE_CACHE_UDELAY(map, chip,
1363 adr, map_bankwidth(map),
1364 chip->word_write_time);
1da177e4 1365
1f948b43
TG
1366 timeo = jiffies + uWriteTimeout;
1367
1da177e4
LT
1368 for (;;) {
1369 if (chip->state != FL_WRITING) {
1370 /* Someone's suspended the write. Sleep */
1371 DECLARE_WAITQUEUE(wait, current);
1372
1373 set_current_state(TASK_UNINTERRUPTIBLE);
1374 add_wait_queue(&chip->wq, &wait);
c4e77376 1375 mutex_unlock(&chip->mutex);
1da177e4
LT
1376 schedule();
1377 remove_wait_queue(&chip->wq, &wait);
1378 timeo = jiffies + (HZ / 2); /* FIXME */
c4e77376 1379 mutex_lock(&chip->mutex);
1da177e4
LT
1380 continue;
1381 }
1382
b95f9609
KB
1383 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1384 break;
1385
02b15e34
TP
1386 if (chip_ready(map, adr)) {
1387 xip_enable(map, chip, adr);
1da177e4 1388 goto op_done;
02b15e34 1389 }
1da177e4
LT
1390
1391 /* Latency issues. Drop the lock, wait a while and retry */
02b15e34 1392 UDELAY(map, chip, adr, 1);
1da177e4
LT
1393 }
1394
1da177e4
LT
1395 /* reset on all failures. */
1396 map_write( map, CMD(0xF0), chip->start );
02b15e34 1397 xip_enable(map, chip, adr);
1da177e4
LT
1398 /* FIXME - should have reset delay before continuing */
1399
02b15e34
TP
1400 printk(KERN_WARNING "MTD %s(): software timeout\n",
1401 __func__ );
1402
1da177e4
LT
1403 ret = -EIO;
1404 op_done:
1405 chip->state = FL_READY;
1406 put_chip(map, chip, adr);
c4e77376 1407 mutex_unlock(&chip->mutex);
1da177e4
LT
1408
1409 return ret;
1410}
1411
1412
1413static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1414 size_t *retlen, const u_char *buf)
1415{
1416 struct map_info *map = mtd->priv;
1417 struct cfi_private *cfi = map->fldrv_priv;
1418 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1419 int ret = 0;
1420 int chipnum;
1421 unsigned long ofs;
1422
1423 *retlen = 0;
1424 if (!len)
1425 return 0;
1426
1427 chipnum = to >> cfi->chipshift;
1428 ofs = to - (chipnum << cfi->chipshift);
1429
1430 /* If it's not bus-aligned, do the first word write */
1431 if (ofs & (map_bankwidth(map)-1)) {
1432 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1433 if (local_len > len)
1434 local_len = len;
1435 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1436 local_len, retlen, buf);
1437 if (ret)
1438 return ret;
1439 ofs += local_len;
1440 buf += local_len;
1441 len -= local_len;
1442
1443 if (ofs >> cfi->chipshift) {
1444 chipnum ++;
1445 ofs = 0;
1446 if (chipnum == cfi->numchips)
1447 return 0;
1448 }
1449 }
1450
1451 /* Write buffer is worth it only if more than one word to write... */
1452 while (len >= map_bankwidth(map) * 2) {
1453 /* We must not cross write block boundaries */
1454 int size = wbufsize - (ofs & (wbufsize-1));
1455
1456 if (size > len)
1457 size = len;
1458 if (size % map_bankwidth(map))
1459 size -= size % map_bankwidth(map);
1460
1f948b43 1461 ret = do_write_buffer(map, &cfi->chips[chipnum],
1da177e4
LT
1462 ofs, buf, size);
1463 if (ret)
1464 return ret;
1465
1466 ofs += size;
1467 buf += size;
1468 (*retlen) += size;
1469 len -= size;
1470
1471 if (ofs >> cfi->chipshift) {
1f948b43 1472 chipnum ++;
1da177e4
LT
1473 ofs = 0;
1474 if (chipnum == cfi->numchips)
1475 return 0;
1476 }
1477 }
1478
1479 if (len) {
1480 size_t retlen_dregs = 0;
1481
1482 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1483 len, &retlen_dregs, buf);
1484
1485 *retlen += retlen_dregs;
1486 return ret;
1487 }
1488
1489 return 0;
1490}
1491
1492
1493/*
1494 * Handle devices with one erase region, that only implement
1495 * the chip erase command.
1496 */
02b15e34 1497static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1da177e4
LT
1498{
1499 struct cfi_private *cfi = map->fldrv_priv;
1500 unsigned long timeo = jiffies + HZ;
1501 unsigned long int adr;
1502 DECLARE_WAITQUEUE(wait, current);
1503 int ret = 0;
1504
1505 adr = cfi->addr_unlock1;
1506
c4e77376 1507 mutex_lock(&chip->mutex);
1da177e4
LT
1508 ret = get_chip(map, chip, adr, FL_WRITING);
1509 if (ret) {
c4e77376 1510 mutex_unlock(&chip->mutex);
1da177e4
LT
1511 return ret;
1512 }
1513
1514 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1515 __func__, chip->start );
1516
02b15e34 1517 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1da177e4 1518 ENABLE_VPP(map);
02b15e34
TP
1519 xip_disable(map, chip, adr);
1520
1da177e4
LT
1521 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1522 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1523 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1524 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1525 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1526 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1527
1528 chip->state = FL_ERASING;
1529 chip->erase_suspended = 0;
1530 chip->in_progress_block_addr = adr;
1531
02b15e34
TP
1532 INVALIDATE_CACHE_UDELAY(map, chip,
1533 adr, map->size,
1534 chip->erase_time*500);
1da177e4
LT
1535
1536 timeo = jiffies + (HZ*20);
1537
1538 for (;;) {
1539 if (chip->state != FL_ERASING) {
1540 /* Someone's suspended the erase. Sleep */
1541 set_current_state(TASK_UNINTERRUPTIBLE);
1542 add_wait_queue(&chip->wq, &wait);
c4e77376 1543 mutex_unlock(&chip->mutex);
1da177e4
LT
1544 schedule();
1545 remove_wait_queue(&chip->wq, &wait);
c4e77376 1546 mutex_lock(&chip->mutex);
1da177e4
LT
1547 continue;
1548 }
1549 if (chip->erase_suspended) {
1550 /* This erase was suspended and resumed.
1551 Adjust the timeout */
1552 timeo = jiffies + (HZ*20); /* FIXME */
1553 chip->erase_suspended = 0;
1554 }
1555
1556 if (chip_ready(map, adr))
fb4a90bf 1557 break;
1da177e4 1558
fb4a90bf
EB
1559 if (time_after(jiffies, timeo)) {
1560 printk(KERN_WARNING "MTD %s(): software timeout\n",
1561 __func__ );
1da177e4 1562 break;
fb4a90bf 1563 }
1da177e4
LT
1564
1565 /* Latency issues. Drop the lock, wait a while and retry */
02b15e34 1566 UDELAY(map, chip, adr, 1000000/HZ);
1da177e4 1567 }
fb4a90bf
EB
1568 /* Did we succeed? */
1569 if (!chip_good(map, adr, map_word_ff(map))) {
1570 /* reset on all failures. */
1571 map_write( map, CMD(0xF0), chip->start );
1572 /* FIXME - should have reset delay before continuing */
1da177e4 1573
fb4a90bf
EB
1574 ret = -EIO;
1575 }
1da177e4 1576
1da177e4 1577 chip->state = FL_READY;
02b15e34 1578 xip_enable(map, chip, adr);
1da177e4 1579 put_chip(map, chip, adr);
c4e77376 1580 mutex_unlock(&chip->mutex);
1da177e4
LT
1581
1582 return ret;
1583}
1584
1585
02b15e34 1586static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1da177e4
LT
1587{
1588 struct cfi_private *cfi = map->fldrv_priv;
1589 unsigned long timeo = jiffies + HZ;
1590 DECLARE_WAITQUEUE(wait, current);
1591 int ret = 0;
1592
1593 adr += chip->start;
1594
c4e77376 1595 mutex_lock(&chip->mutex);
1da177e4
LT
1596 ret = get_chip(map, chip, adr, FL_ERASING);
1597 if (ret) {
c4e77376 1598 mutex_unlock(&chip->mutex);
1da177e4
LT
1599 return ret;
1600 }
1601
1602 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1603 __func__, adr );
1604
02b15e34 1605 XIP_INVAL_CACHED_RANGE(map, adr, len);
1da177e4 1606 ENABLE_VPP(map);
02b15e34
TP
1607 xip_disable(map, chip, adr);
1608
1da177e4
LT
1609 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1610 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1611 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1612 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1613 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1614 map_write(map, CMD(0x30), adr);
1615
1616 chip->state = FL_ERASING;
1617 chip->erase_suspended = 0;
1618 chip->in_progress_block_addr = adr;
02b15e34
TP
1619
1620 INVALIDATE_CACHE_UDELAY(map, chip,
1621 adr, len,
1622 chip->erase_time*500);
1da177e4
LT
1623
1624 timeo = jiffies + (HZ*20);
1625
1626 for (;;) {
1627 if (chip->state != FL_ERASING) {
1628 /* Someone's suspended the erase. Sleep */
1629 set_current_state(TASK_UNINTERRUPTIBLE);
1630 add_wait_queue(&chip->wq, &wait);
c4e77376 1631 mutex_unlock(&chip->mutex);
1da177e4
LT
1632 schedule();
1633 remove_wait_queue(&chip->wq, &wait);
c4e77376 1634 mutex_lock(&chip->mutex);
1da177e4
LT
1635 continue;
1636 }
1637 if (chip->erase_suspended) {
1638 /* This erase was suspended and resumed.
1639 Adjust the timeout */
1640 timeo = jiffies + (HZ*20); /* FIXME */
1641 chip->erase_suspended = 0;
1642 }
1643
02b15e34
TP
1644 if (chip_ready(map, adr)) {
1645 xip_enable(map, chip, adr);
fb4a90bf 1646 break;
02b15e34 1647 }
1da177e4 1648
fb4a90bf 1649 if (time_after(jiffies, timeo)) {
02b15e34 1650 xip_enable(map, chip, adr);
fb4a90bf
EB
1651 printk(KERN_WARNING "MTD %s(): software timeout\n",
1652 __func__ );
1da177e4 1653 break;
fb4a90bf 1654 }
1da177e4
LT
1655
1656 /* Latency issues. Drop the lock, wait a while and retry */
02b15e34 1657 UDELAY(map, chip, adr, 1000000/HZ);
1da177e4 1658 }
fb4a90bf 1659 /* Did we succeed? */
22fd9a87 1660 if (!chip_good(map, adr, map_word_ff(map))) {
fb4a90bf
EB
1661 /* reset on all failures. */
1662 map_write( map, CMD(0xF0), chip->start );
1663 /* FIXME - should have reset delay before continuing */
1664
1665 ret = -EIO;
1666 }
1da177e4 1667
1da177e4
LT
1668 chip->state = FL_READY;
1669 put_chip(map, chip, adr);
c4e77376 1670 mutex_unlock(&chip->mutex);
1da177e4
LT
1671 return ret;
1672}
1673
1674
ce0f33ad 1675static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1da177e4
LT
1676{
1677 unsigned long ofs, len;
1678 int ret;
1679
1680 ofs = instr->addr;
1681 len = instr->len;
1682
1683 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1684 if (ret)
1685 return ret;
1686
1687 instr->state = MTD_ERASE_DONE;
1688 mtd_erase_callback(instr);
1f948b43 1689
1da177e4
LT
1690 return 0;
1691}
1692
1693
1694static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1695{
1696 struct map_info *map = mtd->priv;
1697 struct cfi_private *cfi = map->fldrv_priv;
1698 int ret = 0;
1699
1700 if (instr->addr != 0)
1701 return -EINVAL;
1702
1703 if (instr->len != mtd->size)
1704 return -EINVAL;
1705
1706 ret = do_erase_chip(map, &cfi->chips[0]);
1707 if (ret)
1708 return ret;
1709
1710 instr->state = MTD_ERASE_DONE;
1711 mtd_erase_callback(instr);
1f948b43 1712
1da177e4
LT
1713 return 0;
1714}
1715
0165508c
HS
1716static int do_atmel_lock(struct map_info *map, struct flchip *chip,
1717 unsigned long adr, int len, void *thunk)
1718{
1719 struct cfi_private *cfi = map->fldrv_priv;
1720 int ret;
1721
c4e77376 1722 mutex_lock(&chip->mutex);
0165508c
HS
1723 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
1724 if (ret)
1725 goto out_unlock;
1726 chip->state = FL_LOCKING;
1727
1728 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1729 __func__, adr, len);
1730
1731 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1732 cfi->device_type, NULL);
1733 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1734 cfi->device_type, NULL);
1735 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
1736 cfi->device_type, NULL);
1737 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1738 cfi->device_type, NULL);
1739 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1740 cfi->device_type, NULL);
1741 map_write(map, CMD(0x40), chip->start + adr);
1742
1743 chip->state = FL_READY;
1744 put_chip(map, chip, adr + chip->start);
1745 ret = 0;
1746
1747out_unlock:
c4e77376 1748 mutex_unlock(&chip->mutex);
0165508c
HS
1749 return ret;
1750}
1751
1752static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
1753 unsigned long adr, int len, void *thunk)
1754{
1755 struct cfi_private *cfi = map->fldrv_priv;
1756 int ret;
1757
c4e77376 1758 mutex_lock(&chip->mutex);
0165508c
HS
1759 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
1760 if (ret)
1761 goto out_unlock;
1762 chip->state = FL_UNLOCKING;
1763
1764 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1765 __func__, adr, len);
1766
1767 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1768 cfi->device_type, NULL);
1769 map_write(map, CMD(0x70), adr);
1770
1771 chip->state = FL_READY;
1772 put_chip(map, chip, adr + chip->start);
1773 ret = 0;
1774
1775out_unlock:
c4e77376 1776 mutex_unlock(&chip->mutex);
0165508c
HS
1777 return ret;
1778}
1779
69423d99 1780static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
0165508c
HS
1781{
1782 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
1783}
1784
69423d99 1785static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
0165508c
HS
1786{
1787 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
1788}
1789
1da177e4
LT
1790
1791static void cfi_amdstd_sync (struct mtd_info *mtd)
1792{
1793 struct map_info *map = mtd->priv;
1794 struct cfi_private *cfi = map->fldrv_priv;
1795 int i;
1796 struct flchip *chip;
1797 int ret = 0;
1798 DECLARE_WAITQUEUE(wait, current);
1799
1800 for (i=0; !ret && i<cfi->numchips; i++) {
1801 chip = &cfi->chips[i];
1802
1803 retry:
c4e77376 1804 mutex_lock(&chip->mutex);
1da177e4
LT
1805
1806 switch(chip->state) {
1807 case FL_READY:
1808 case FL_STATUS:
1809 case FL_CFI_QUERY:
1810 case FL_JEDEC_QUERY:
1811 chip->oldstate = chip->state;
1812 chip->state = FL_SYNCING;
1f948b43 1813 /* No need to wake_up() on this state change -
1da177e4
LT
1814 * as the whole point is that nobody can do anything
1815 * with the chip now anyway.
1816 */
1817 case FL_SYNCING:
c4e77376 1818 mutex_unlock(&chip->mutex);
1da177e4
LT
1819 break;
1820
1821 default:
1822 /* Not an idle state */
f8e30e44 1823 set_current_state(TASK_UNINTERRUPTIBLE);
1da177e4 1824 add_wait_queue(&chip->wq, &wait);
1f948b43 1825
c4e77376 1826 mutex_unlock(&chip->mutex);
1da177e4
LT
1827
1828 schedule();
1829
1830 remove_wait_queue(&chip->wq, &wait);
1f948b43 1831
1da177e4
LT
1832 goto retry;
1833 }
1834 }
1835
1836 /* Unlock the chips again */
1837
1838 for (i--; i >=0; i--) {
1839 chip = &cfi->chips[i];
1840
c4e77376 1841 mutex_lock(&chip->mutex);
1f948b43 1842
1da177e4
LT
1843 if (chip->state == FL_SYNCING) {
1844 chip->state = chip->oldstate;
1845 wake_up(&chip->wq);
1846 }
c4e77376 1847 mutex_unlock(&chip->mutex);
1da177e4
LT
1848 }
1849}
1850
1851
1852static int cfi_amdstd_suspend(struct mtd_info *mtd)
1853{
1854 struct map_info *map = mtd->priv;
1855 struct cfi_private *cfi = map->fldrv_priv;
1856 int i;
1857 struct flchip *chip;
1858 int ret = 0;
1859
1860 for (i=0; !ret && i<cfi->numchips; i++) {
1861 chip = &cfi->chips[i];
1862
c4e77376 1863 mutex_lock(&chip->mutex);
1da177e4
LT
1864
1865 switch(chip->state) {
1866 case FL_READY:
1867 case FL_STATUS:
1868 case FL_CFI_QUERY:
1869 case FL_JEDEC_QUERY:
1870 chip->oldstate = chip->state;
1871 chip->state = FL_PM_SUSPENDED;
1f948b43 1872 /* No need to wake_up() on this state change -
1da177e4
LT
1873 * as the whole point is that nobody can do anything
1874 * with the chip now anyway.
1875 */
1876 case FL_PM_SUSPENDED:
1877 break;
1878
1879 default:
1880 ret = -EAGAIN;
1881 break;
1882 }
c4e77376 1883 mutex_unlock(&chip->mutex);
1da177e4
LT
1884 }
1885
1886 /* Unlock the chips again */
1887
1888 if (ret) {
1889 for (i--; i >=0; i--) {
1890 chip = &cfi->chips[i];
1891
c4e77376 1892 mutex_lock(&chip->mutex);
1f948b43 1893
1da177e4
LT
1894 if (chip->state == FL_PM_SUSPENDED) {
1895 chip->state = chip->oldstate;
1896 wake_up(&chip->wq);
1897 }
c4e77376 1898 mutex_unlock(&chip->mutex);
1da177e4
LT
1899 }
1900 }
1f948b43 1901
1da177e4
LT
1902 return ret;
1903}
1904
1905
1906static void cfi_amdstd_resume(struct mtd_info *mtd)
1907{
1908 struct map_info *map = mtd->priv;
1909 struct cfi_private *cfi = map->fldrv_priv;
1910 int i;
1911 struct flchip *chip;
1912
1913 for (i=0; i<cfi->numchips; i++) {
1f948b43 1914
1da177e4
LT
1915 chip = &cfi->chips[i];
1916
c4e77376 1917 mutex_lock(&chip->mutex);
1f948b43 1918
1da177e4
LT
1919 if (chip->state == FL_PM_SUSPENDED) {
1920 chip->state = FL_READY;
1921 map_write(map, CMD(0xF0), chip->start);
1922 wake_up(&chip->wq);
1923 }
1924 else
1925 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1926
c4e77376 1927 mutex_unlock(&chip->mutex);
1da177e4
LT
1928 }
1929}
1930
eafe1311
KC
1931
1932/*
1933 * Ensure that the flash device is put back into read array mode before
1934 * unloading the driver or rebooting. On some systems, rebooting while
1935 * the flash is in query/program/erase mode will prevent the CPU from
1936 * fetching the bootloader code, requiring a hard reset or power cycle.
1937 */
1938static int cfi_amdstd_reset(struct mtd_info *mtd)
1939{
1940 struct map_info *map = mtd->priv;
1941 struct cfi_private *cfi = map->fldrv_priv;
1942 int i, ret;
1943 struct flchip *chip;
1944
1945 for (i = 0; i < cfi->numchips; i++) {
1946
1947 chip = &cfi->chips[i];
1948
1949 mutex_lock(&chip->mutex);
1950
1951 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
1952 if (!ret) {
1953 map_write(map, CMD(0xF0), chip->start);
1954 chip->state = FL_SHUTDOWN;
1955 put_chip(map, chip, chip->start);
1956 }
1957
1958 mutex_unlock(&chip->mutex);
1959 }
1960
1961 return 0;
1962}
1963
1964
1965static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
1966 void *v)
1967{
1968 struct mtd_info *mtd;
1969
1970 mtd = container_of(nb, struct mtd_info, reboot_notifier);
1971 cfi_amdstd_reset(mtd);
1972 return NOTIFY_DONE;
1973}
1974
1975
1da177e4
LT
1976static void cfi_amdstd_destroy(struct mtd_info *mtd)
1977{
1978 struct map_info *map = mtd->priv;
1979 struct cfi_private *cfi = map->fldrv_priv;
fa671646 1980
eafe1311
KC
1981 cfi_amdstd_reset(mtd);
1982 unregister_reboot_notifier(&mtd->reboot_notifier);
1da177e4
LT
1983 kfree(cfi->cmdset_priv);
1984 kfree(cfi->cfiq);
1985 kfree(cfi);
1986 kfree(mtd->eraseregions);
1987}
1988
1da177e4
LT
1989MODULE_LICENSE("GPL");
1990MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1991MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
This page took 0.418094 seconds and 5 git commands to generate.