Auto-update from upstream
[deliverable/linux.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
4 *
5 * (C) 2000 Red Hat. GPL'd
6 *
7 * $Id: cfi_cmdset_0001.c,v 1.178 2005/05/19 17:05:43 nico Exp $
8 *
9 *
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
18 */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/compatmac.h>
37 #include <linux/mtd/cfi.h>
38
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
44
45 #define MANUFACTURER_INTEL 0x0089
46 #define I82802AB 0x00ad
47 #define I82802AC 0x00ac
48 #define MANUFACTURER_ST 0x0020
49 #define M50LPW080 0x002F
50
51 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
55 static void cfi_intelext_sync (struct mtd_info *);
56 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
57 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
58 #ifdef CONFIG_MTD_OTP
59 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
60 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
63 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
64 struct otp_info *, size_t);
65 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
66 struct otp_info *, size_t);
67 #endif
68 static int cfi_intelext_suspend (struct mtd_info *);
69 static void cfi_intelext_resume (struct mtd_info *);
70 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
71
72 static void cfi_intelext_destroy(struct mtd_info *);
73
74 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
75
76 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
77 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
78
79 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
80 size_t *retlen, u_char **mtdbuf);
81 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
82 size_t len);
83
84 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
85 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
86 #include "fwh_lock.h"
87
88
89
90 /*
91 * *********** SETUP AND PROBE BITS ***********
92 */
93
94 static struct mtd_chip_driver cfi_intelext_chipdrv = {
95 .probe = NULL, /* Not usable directly */
96 .destroy = cfi_intelext_destroy,
97 .name = "cfi_cmdset_0001",
98 .module = THIS_MODULE
99 };
100
101 /* #define DEBUG_LOCK_BITS */
102 /* #define DEBUG_CFI_FEATURES */
103
104 #ifdef DEBUG_CFI_FEATURES
105 static void cfi_tell_features(struct cfi_pri_intelext *extp)
106 {
107 int i;
108 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
109 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
110 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
111 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
112 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
113 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
114 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
115 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
116 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
117 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
118 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
119 for (i=10; i<32; i++) {
120 if (extp->FeatureSupport & (1<<i))
121 printk(" - Unknown Bit %X: supported\n", i);
122 }
123
124 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
125 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
126 for (i=1; i<8; i++) {
127 if (extp->SuspendCmdSupport & (1<<i))
128 printk(" - Unknown Bit %X: supported\n", i);
129 }
130
131 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
132 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
133 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
134 for (i=2; i<16; i++) {
135 if (extp->BlkStatusRegMask & (1<<i))
136 printk(" - Unknown Bit %X Active: yes\n",i);
137 }
138
139 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
140 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
141 if (extp->VppOptimal)
142 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
143 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
144 }
145 #endif
146
147 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
148 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
149 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
150 {
151 struct map_info *map = mtd->priv;
152 struct cfi_private *cfi = map->fldrv_priv;
153 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
154
155 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
156 "erase on write disabled.\n");
157 extp->SuspendCmdSupport &= ~1;
158 }
159 #endif
160
161 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
162 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
163 {
164 struct map_info *map = mtd->priv;
165 struct cfi_private *cfi = map->fldrv_priv;
166 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
167
168 if (cfip && (cfip->FeatureSupport&4)) {
169 cfip->FeatureSupport &= ~4;
170 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
171 }
172 }
173 #endif
174
175 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
176 {
177 struct map_info *map = mtd->priv;
178 struct cfi_private *cfi = map->fldrv_priv;
179
180 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
181 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
182 }
183
184 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
185 {
186 struct map_info *map = mtd->priv;
187 struct cfi_private *cfi = map->fldrv_priv;
188
189 /* Note this is done after the region info is endian swapped */
190 cfi->cfiq->EraseRegionInfo[1] =
191 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
192 };
193
194 static void fixup_use_point(struct mtd_info *mtd, void *param)
195 {
196 struct map_info *map = mtd->priv;
197 if (!mtd->point && map_is_linear(map)) {
198 mtd->point = cfi_intelext_point;
199 mtd->unpoint = cfi_intelext_unpoint;
200 }
201 }
202
203 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
204 {
205 struct map_info *map = mtd->priv;
206 struct cfi_private *cfi = map->fldrv_priv;
207 if (cfi->cfiq->BufWriteTimeoutTyp) {
208 printk(KERN_INFO "Using buffer write method\n" );
209 mtd->write = cfi_intelext_write_buffers;
210 }
211 }
212
213 static struct cfi_fixup cfi_fixup_table[] = {
214 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
215 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
216 #endif
217 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
218 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
219 #endif
220 #if !FORCE_WORD_WRITE
221 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
222 #endif
223 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
224 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
225 { 0, 0, NULL, NULL }
226 };
227
228 static struct cfi_fixup jedec_fixup_table[] = {
229 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
230 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
231 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
232 { 0, 0, NULL, NULL }
233 };
234 static struct cfi_fixup fixup_table[] = {
235 /* The CFI vendor ids and the JEDEC vendor IDs appear
236 * to be common. It is like the devices id's are as
237 * well. This table is to pick all cases where
238 * we know that is the case.
239 */
240 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
241 { 0, 0, NULL, NULL }
242 };
243
244 static inline struct cfi_pri_intelext *
245 read_pri_intelext(struct map_info *map, __u16 adr)
246 {
247 struct cfi_pri_intelext *extp;
248 unsigned int extp_size = sizeof(*extp);
249
250 again:
251 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
252 if (!extp)
253 return NULL;
254
255 /* Do some byteswapping if necessary */
256 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
257 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
258 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
259
260 if (extp->MajorVersion == '1' && extp->MinorVersion == '3') {
261 unsigned int extra_size = 0;
262 int nb_parts, i;
263
264 /* Protection Register info */
265 extra_size += (extp->NumProtectionFields - 1) *
266 sizeof(struct cfi_intelext_otpinfo);
267
268 /* Burst Read info */
269 extra_size += 6;
270
271 /* Number of hardware-partitions */
272 extra_size += 1;
273 if (extp_size < sizeof(*extp) + extra_size)
274 goto need_more;
275 nb_parts = extp->extra[extra_size - 1];
276
277 for (i = 0; i < nb_parts; i++) {
278 struct cfi_intelext_regioninfo *rinfo;
279 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
280 extra_size += sizeof(*rinfo);
281 if (extp_size < sizeof(*extp) + extra_size)
282 goto need_more;
283 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
284 extra_size += (rinfo->NumBlockTypes - 1)
285 * sizeof(struct cfi_intelext_blockinfo);
286 }
287
288 if (extp_size < sizeof(*extp) + extra_size) {
289 need_more:
290 extp_size = sizeof(*extp) + extra_size;
291 kfree(extp);
292 if (extp_size > 4096) {
293 printk(KERN_ERR
294 "%s: cfi_pri_intelext is too fat\n",
295 __FUNCTION__);
296 return NULL;
297 }
298 goto again;
299 }
300 }
301
302 return extp;
303 }
304
305 /* This routine is made available to other mtd code via
306 * inter_module_register. It must only be accessed through
307 * inter_module_get which will bump the use count of this module. The
308 * addresses passed back in cfi are valid as long as the use count of
309 * this module is non-zero, i.e. between inter_module_get and
310 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
311 */
312 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
313 {
314 struct cfi_private *cfi = map->fldrv_priv;
315 struct mtd_info *mtd;
316 int i;
317
318 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
319 if (!mtd) {
320 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
321 return NULL;
322 }
323 memset(mtd, 0, sizeof(*mtd));
324 mtd->priv = map;
325 mtd->type = MTD_NORFLASH;
326
327 /* Fill in the default mtd operations */
328 mtd->erase = cfi_intelext_erase_varsize;
329 mtd->read = cfi_intelext_read;
330 mtd->write = cfi_intelext_write_words;
331 mtd->sync = cfi_intelext_sync;
332 mtd->lock = cfi_intelext_lock;
333 mtd->unlock = cfi_intelext_unlock;
334 mtd->suspend = cfi_intelext_suspend;
335 mtd->resume = cfi_intelext_resume;
336 mtd->flags = MTD_CAP_NORFLASH;
337 mtd->name = map->name;
338
339 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
340
341 if (cfi->cfi_mode == CFI_MODE_CFI) {
342 /*
343 * It's a real CFI chip, not one for which the probe
344 * routine faked a CFI structure. So we read the feature
345 * table from it.
346 */
347 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
348 struct cfi_pri_intelext *extp;
349
350 extp = read_pri_intelext(map, adr);
351 if (!extp) {
352 kfree(mtd);
353 return NULL;
354 }
355
356 /* Install our own private info structure */
357 cfi->cmdset_priv = extp;
358
359 cfi_fixup(mtd, cfi_fixup_table);
360
361 #ifdef DEBUG_CFI_FEATURES
362 /* Tell the user about it in lots of lovely detail */
363 cfi_tell_features(extp);
364 #endif
365
366 if(extp->SuspendCmdSupport & 1) {
367 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
368 }
369 }
370 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
371 /* Apply jedec specific fixups */
372 cfi_fixup(mtd, jedec_fixup_table);
373 }
374 /* Apply generic fixups */
375 cfi_fixup(mtd, fixup_table);
376
377 for (i=0; i< cfi->numchips; i++) {
378 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
379 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
380 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
381 cfi->chips[i].ref_point_counter = 0;
382 }
383
384 map->fldrv = &cfi_intelext_chipdrv;
385
386 return cfi_intelext_setup(mtd);
387 }
388
389 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
390 {
391 struct map_info *map = mtd->priv;
392 struct cfi_private *cfi = map->fldrv_priv;
393 unsigned long offset = 0;
394 int i,j;
395 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
396
397 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
398
399 mtd->size = devsize * cfi->numchips;
400
401 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
402 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
403 * mtd->numeraseregions, GFP_KERNEL);
404 if (!mtd->eraseregions) {
405 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
406 goto setup_err;
407 }
408
409 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
410 unsigned long ernum, ersize;
411 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
412 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
413
414 if (mtd->erasesize < ersize) {
415 mtd->erasesize = ersize;
416 }
417 for (j=0; j<cfi->numchips; j++) {
418 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
419 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
420 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
421 }
422 offset += (ersize * ernum);
423 }
424
425 if (offset != devsize) {
426 /* Argh */
427 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
428 goto setup_err;
429 }
430
431 for (i=0; i<mtd->numeraseregions;i++){
432 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
433 i,mtd->eraseregions[i].offset,
434 mtd->eraseregions[i].erasesize,
435 mtd->eraseregions[i].numblocks);
436 }
437
438 #ifdef CONFIG_MTD_OTP
439 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
440 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
441 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
442 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
443 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
444 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
445 #endif
446
447 /* This function has the potential to distort the reality
448 a bit and therefore should be called last. */
449 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
450 goto setup_err;
451
452 __module_get(THIS_MODULE);
453 register_reboot_notifier(&mtd->reboot_notifier);
454 return mtd;
455
456 setup_err:
457 if(mtd) {
458 kfree(mtd->eraseregions);
459 kfree(mtd);
460 }
461 kfree(cfi->cmdset_priv);
462 return NULL;
463 }
464
465 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
466 struct cfi_private **pcfi)
467 {
468 struct map_info *map = mtd->priv;
469 struct cfi_private *cfi = *pcfi;
470 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
471
472 /*
473 * Probing of multi-partition flash ships.
474 *
475 * To support multiple partitions when available, we simply arrange
476 * for each of them to have their own flchip structure even if they
477 * are on the same physical chip. This means completely recreating
478 * a new cfi_private structure right here which is a blatent code
479 * layering violation, but this is still the least intrusive
480 * arrangement at this point. This can be rearranged in the future
481 * if someone feels motivated enough. --nico
482 */
483 if (extp && extp->MajorVersion == '1' && extp->MinorVersion == '3'
484 && extp->FeatureSupport & (1 << 9)) {
485 struct cfi_private *newcfi;
486 struct flchip *chip;
487 struct flchip_shared *shared;
488 int offs, numregions, numparts, partshift, numvirtchips, i, j;
489
490 /* Protection Register info */
491 offs = (extp->NumProtectionFields - 1) *
492 sizeof(struct cfi_intelext_otpinfo);
493
494 /* Burst Read info */
495 offs += 6;
496
497 /* Number of partition regions */
498 numregions = extp->extra[offs];
499 offs += 1;
500
501 /* Number of hardware partitions */
502 numparts = 0;
503 for (i = 0; i < numregions; i++) {
504 struct cfi_intelext_regioninfo *rinfo;
505 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
506 numparts += rinfo->NumIdentPartitions;
507 offs += sizeof(*rinfo)
508 + (rinfo->NumBlockTypes - 1) *
509 sizeof(struct cfi_intelext_blockinfo);
510 }
511
512 /*
513 * All functions below currently rely on all chips having
514 * the same geometry so we'll just assume that all hardware
515 * partitions are of the same size too.
516 */
517 partshift = cfi->chipshift - __ffs(numparts);
518
519 if ((1 << partshift) < mtd->erasesize) {
520 printk( KERN_ERR
521 "%s: bad number of hw partitions (%d)\n",
522 __FUNCTION__, numparts);
523 return -EINVAL;
524 }
525
526 numvirtchips = cfi->numchips * numparts;
527 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
528 if (!newcfi)
529 return -ENOMEM;
530 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
531 if (!shared) {
532 kfree(newcfi);
533 return -ENOMEM;
534 }
535 memcpy(newcfi, cfi, sizeof(struct cfi_private));
536 newcfi->numchips = numvirtchips;
537 newcfi->chipshift = partshift;
538
539 chip = &newcfi->chips[0];
540 for (i = 0; i < cfi->numchips; i++) {
541 shared[i].writing = shared[i].erasing = NULL;
542 spin_lock_init(&shared[i].lock);
543 for (j = 0; j < numparts; j++) {
544 *chip = cfi->chips[i];
545 chip->start += j << partshift;
546 chip->priv = &shared[i];
547 /* those should be reset too since
548 they create memory references. */
549 init_waitqueue_head(&chip->wq);
550 spin_lock_init(&chip->_spinlock);
551 chip->mutex = &chip->_spinlock;
552 chip++;
553 }
554 }
555
556 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
557 "--> %d partitions of %d KiB\n",
558 map->name, cfi->numchips, cfi->interleave,
559 newcfi->numchips, 1<<(newcfi->chipshift-10));
560
561 map->fldrv_priv = newcfi;
562 *pcfi = newcfi;
563 kfree(cfi);
564 }
565
566 return 0;
567 }
568
569 /*
570 * *********** CHIP ACCESS FUNCTIONS ***********
571 */
572
573 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
574 {
575 DECLARE_WAITQUEUE(wait, current);
576 struct cfi_private *cfi = map->fldrv_priv;
577 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
578 unsigned long timeo;
579 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
580
581 resettime:
582 timeo = jiffies + HZ;
583 retry:
584 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
585 /*
586 * OK. We have possibility for contension on the write/erase
587 * operations which are global to the real chip and not per
588 * partition. So let's fight it over in the partition which
589 * currently has authority on the operation.
590 *
591 * The rules are as follows:
592 *
593 * - any write operation must own shared->writing.
594 *
595 * - any erase operation must own _both_ shared->writing and
596 * shared->erasing.
597 *
598 * - contension arbitration is handled in the owner's context.
599 *
600 * The 'shared' struct can be read when its lock is taken.
601 * However any writes to it can only be made when the current
602 * owner's lock is also held.
603 */
604 struct flchip_shared *shared = chip->priv;
605 struct flchip *contender;
606 spin_lock(&shared->lock);
607 contender = shared->writing;
608 if (contender && contender != chip) {
609 /*
610 * The engine to perform desired operation on this
611 * partition is already in use by someone else.
612 * Let's fight over it in the context of the chip
613 * currently using it. If it is possible to suspend,
614 * that other partition will do just that, otherwise
615 * it'll happily send us to sleep. In any case, when
616 * get_chip returns success we're clear to go ahead.
617 */
618 int ret = spin_trylock(contender->mutex);
619 spin_unlock(&shared->lock);
620 if (!ret)
621 goto retry;
622 spin_unlock(chip->mutex);
623 ret = get_chip(map, contender, contender->start, mode);
624 spin_lock(chip->mutex);
625 if (ret) {
626 spin_unlock(contender->mutex);
627 return ret;
628 }
629 timeo = jiffies + HZ;
630 spin_lock(&shared->lock);
631 }
632
633 /* We now own it */
634 shared->writing = chip;
635 if (mode == FL_ERASING)
636 shared->erasing = chip;
637 if (contender && contender != chip)
638 spin_unlock(contender->mutex);
639 spin_unlock(&shared->lock);
640 }
641
642 switch (chip->state) {
643
644 case FL_STATUS:
645 for (;;) {
646 status = map_read(map, adr);
647 if (map_word_andequal(map, status, status_OK, status_OK))
648 break;
649
650 /* At this point we're fine with write operations
651 in other partitions as they don't conflict. */
652 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
653 break;
654
655 if (time_after(jiffies, timeo)) {
656 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n",
657 status.x[0]);
658 return -EIO;
659 }
660 spin_unlock(chip->mutex);
661 cfi_udelay(1);
662 spin_lock(chip->mutex);
663 /* Someone else might have been playing with it. */
664 goto retry;
665 }
666
667 case FL_READY:
668 case FL_CFI_QUERY:
669 case FL_JEDEC_QUERY:
670 return 0;
671
672 case FL_ERASING:
673 if (!cfip ||
674 !(cfip->FeatureSupport & 2) ||
675 !(mode == FL_READY || mode == FL_POINT ||
676 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
677 goto sleep;
678
679
680 /* Erase suspend */
681 map_write(map, CMD(0xB0), adr);
682
683 /* If the flash has finished erasing, then 'erase suspend'
684 * appears to make some (28F320) flash devices switch to
685 * 'read' mode. Make sure that we switch to 'read status'
686 * mode so we get the right data. --rmk
687 */
688 map_write(map, CMD(0x70), adr);
689 chip->oldstate = FL_ERASING;
690 chip->state = FL_ERASE_SUSPENDING;
691 chip->erase_suspended = 1;
692 for (;;) {
693 status = map_read(map, adr);
694 if (map_word_andequal(map, status, status_OK, status_OK))
695 break;
696
697 if (time_after(jiffies, timeo)) {
698 /* Urgh. Resume and pretend we weren't here. */
699 map_write(map, CMD(0xd0), adr);
700 /* Make sure we're in 'read status' mode if it had finished */
701 map_write(map, CMD(0x70), adr);
702 chip->state = FL_ERASING;
703 chip->oldstate = FL_READY;
704 printk(KERN_ERR "Chip not ready after erase "
705 "suspended: status = 0x%lx\n", status.x[0]);
706 return -EIO;
707 }
708
709 spin_unlock(chip->mutex);
710 cfi_udelay(1);
711 spin_lock(chip->mutex);
712 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
713 So we can just loop here. */
714 }
715 chip->state = FL_STATUS;
716 return 0;
717
718 case FL_XIP_WHILE_ERASING:
719 if (mode != FL_READY && mode != FL_POINT &&
720 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
721 goto sleep;
722 chip->oldstate = chip->state;
723 chip->state = FL_READY;
724 return 0;
725
726 case FL_POINT:
727 /* Only if there's no operation suspended... */
728 if (mode == FL_READY && chip->oldstate == FL_READY)
729 return 0;
730
731 default:
732 sleep:
733 set_current_state(TASK_UNINTERRUPTIBLE);
734 add_wait_queue(&chip->wq, &wait);
735 spin_unlock(chip->mutex);
736 schedule();
737 remove_wait_queue(&chip->wq, &wait);
738 spin_lock(chip->mutex);
739 goto resettime;
740 }
741 }
742
743 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
744 {
745 struct cfi_private *cfi = map->fldrv_priv;
746
747 if (chip->priv) {
748 struct flchip_shared *shared = chip->priv;
749 spin_lock(&shared->lock);
750 if (shared->writing == chip && chip->oldstate == FL_READY) {
751 /* We own the ability to write, but we're done */
752 shared->writing = shared->erasing;
753 if (shared->writing && shared->writing != chip) {
754 /* give back ownership to who we loaned it from */
755 struct flchip *loaner = shared->writing;
756 spin_lock(loaner->mutex);
757 spin_unlock(&shared->lock);
758 spin_unlock(chip->mutex);
759 put_chip(map, loaner, loaner->start);
760 spin_lock(chip->mutex);
761 spin_unlock(loaner->mutex);
762 wake_up(&chip->wq);
763 return;
764 }
765 shared->erasing = NULL;
766 shared->writing = NULL;
767 } else if (shared->erasing == chip && shared->writing != chip) {
768 /*
769 * We own the ability to erase without the ability
770 * to write, which means the erase was suspended
771 * and some other partition is currently writing.
772 * Don't let the switch below mess things up since
773 * we don't have ownership to resume anything.
774 */
775 spin_unlock(&shared->lock);
776 wake_up(&chip->wq);
777 return;
778 }
779 spin_unlock(&shared->lock);
780 }
781
782 switch(chip->oldstate) {
783 case FL_ERASING:
784 chip->state = chip->oldstate;
785 /* What if one interleaved chip has finished and the
786 other hasn't? The old code would leave the finished
787 one in READY mode. That's bad, and caused -EROFS
788 errors to be returned from do_erase_oneblock because
789 that's the only bit it checked for at the time.
790 As the state machine appears to explicitly allow
791 sending the 0x70 (Read Status) command to an erasing
792 chip and expecting it to be ignored, that's what we
793 do. */
794 map_write(map, CMD(0xd0), adr);
795 map_write(map, CMD(0x70), adr);
796 chip->oldstate = FL_READY;
797 chip->state = FL_ERASING;
798 break;
799
800 case FL_XIP_WHILE_ERASING:
801 chip->state = chip->oldstate;
802 chip->oldstate = FL_READY;
803 break;
804
805 case FL_READY:
806 case FL_STATUS:
807 case FL_JEDEC_QUERY:
808 /* We should really make set_vpp() count, rather than doing this */
809 DISABLE_VPP(map);
810 break;
811 default:
812 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
813 }
814 wake_up(&chip->wq);
815 }
816
817 #ifdef CONFIG_MTD_XIP
818
819 /*
820 * No interrupt what so ever can be serviced while the flash isn't in array
821 * mode. This is ensured by the xip_disable() and xip_enable() functions
822 * enclosing any code path where the flash is known not to be in array mode.
823 * And within a XIP disabled code path, only functions marked with __xipram
824 * may be called and nothing else (it's a good thing to inspect generated
825 * assembly to make sure inline functions were actually inlined and that gcc
826 * didn't emit calls to its own support functions). Also configuring MTD CFI
827 * support to a single buswidth and a single interleave is also recommended.
828 */
829
830 static void xip_disable(struct map_info *map, struct flchip *chip,
831 unsigned long adr)
832 {
833 /* TODO: chips with no XIP use should ignore and return */
834 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
835 local_irq_disable();
836 }
837
838 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
839 unsigned long adr)
840 {
841 struct cfi_private *cfi = map->fldrv_priv;
842 if (chip->state != FL_POINT && chip->state != FL_READY) {
843 map_write(map, CMD(0xff), adr);
844 chip->state = FL_READY;
845 }
846 (void) map_read(map, adr);
847 xip_iprefetch();
848 local_irq_enable();
849 }
850
851 /*
852 * When a delay is required for the flash operation to complete, the
853 * xip_udelay() function is polling for both the given timeout and pending
854 * (but still masked) hardware interrupts. Whenever there is an interrupt
855 * pending then the flash erase or write operation is suspended, array mode
856 * restored and interrupts unmasked. Task scheduling might also happen at that
857 * point. The CPU eventually returns from the interrupt or the call to
858 * schedule() and the suspended flash operation is resumed for the remaining
859 * of the delay period.
860 *
861 * Warning: this function _will_ fool interrupt latency tracing tools.
862 */
863
864 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
865 unsigned long adr, int usec)
866 {
867 struct cfi_private *cfi = map->fldrv_priv;
868 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
869 map_word status, OK = CMD(0x80);
870 unsigned long suspended, start = xip_currtime();
871 flstate_t oldstate, newstate;
872
873 do {
874 cpu_relax();
875 if (xip_irqpending() && cfip &&
876 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
877 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
878 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
879 /*
880 * Let's suspend the erase or write operation when
881 * supported. Note that we currently don't try to
882 * suspend interleaved chips if there is already
883 * another operation suspended (imagine what happens
884 * when one chip was already done with the current
885 * operation while another chip suspended it, then
886 * we resume the whole thing at once). Yes, it
887 * can happen!
888 */
889 map_write(map, CMD(0xb0), adr);
890 map_write(map, CMD(0x70), adr);
891 usec -= xip_elapsed_since(start);
892 suspended = xip_currtime();
893 do {
894 if (xip_elapsed_since(suspended) > 100000) {
895 /*
896 * The chip doesn't want to suspend
897 * after waiting for 100 msecs.
898 * This is a critical error but there
899 * is not much we can do here.
900 */
901 return;
902 }
903 status = map_read(map, adr);
904 } while (!map_word_andequal(map, status, OK, OK));
905
906 /* Suspend succeeded */
907 oldstate = chip->state;
908 if (oldstate == FL_ERASING) {
909 if (!map_word_bitsset(map, status, CMD(0x40)))
910 break;
911 newstate = FL_XIP_WHILE_ERASING;
912 chip->erase_suspended = 1;
913 } else {
914 if (!map_word_bitsset(map, status, CMD(0x04)))
915 break;
916 newstate = FL_XIP_WHILE_WRITING;
917 chip->write_suspended = 1;
918 }
919 chip->state = newstate;
920 map_write(map, CMD(0xff), adr);
921 (void) map_read(map, adr);
922 asm volatile (".rep 8; nop; .endr");
923 local_irq_enable();
924 spin_unlock(chip->mutex);
925 asm volatile (".rep 8; nop; .endr");
926 cond_resched();
927
928 /*
929 * We're back. However someone else might have
930 * decided to go write to the chip if we are in
931 * a suspended erase state. If so let's wait
932 * until it's done.
933 */
934 spin_lock(chip->mutex);
935 while (chip->state != newstate) {
936 DECLARE_WAITQUEUE(wait, current);
937 set_current_state(TASK_UNINTERRUPTIBLE);
938 add_wait_queue(&chip->wq, &wait);
939 spin_unlock(chip->mutex);
940 schedule();
941 remove_wait_queue(&chip->wq, &wait);
942 spin_lock(chip->mutex);
943 }
944 /* Disallow XIP again */
945 local_irq_disable();
946
947 /* Resume the write or erase operation */
948 map_write(map, CMD(0xd0), adr);
949 map_write(map, CMD(0x70), adr);
950 chip->state = oldstate;
951 start = xip_currtime();
952 } else if (usec >= 1000000/HZ) {
953 /*
954 * Try to save on CPU power when waiting delay
955 * is at least a system timer tick period.
956 * No need to be extremely accurate here.
957 */
958 xip_cpu_idle();
959 }
960 status = map_read(map, adr);
961 } while (!map_word_andequal(map, status, OK, OK)
962 && xip_elapsed_since(start) < usec);
963 }
964
965 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
966
967 /*
968 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
969 * the flash is actively programming or erasing since we have to poll for
970 * the operation to complete anyway. We can't do that in a generic way with
971 * a XIP setup so do it before the actual flash operation in this case
972 * and stub it out from INVALIDATE_CACHE_UDELAY.
973 */
974 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
975 INVALIDATE_CACHED_RANGE(map, from, size)
976
977 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
978 UDELAY(map, chip, adr, usec)
979
980 /*
981 * Extra notes:
982 *
983 * Activating this XIP support changes the way the code works a bit. For
984 * example the code to suspend the current process when concurrent access
985 * happens is never executed because xip_udelay() will always return with the
986 * same chip state as it was entered with. This is why there is no care for
987 * the presence of add_wait_queue() or schedule() calls from within a couple
988 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
989 * The queueing and scheduling are always happening within xip_udelay().
990 *
991 * Similarly, get_chip() and put_chip() just happen to always be executed
992 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
993 * is in array mode, therefore never executing many cases therein and not
994 * causing any problem with XIP.
995 */
996
997 #else
998
999 #define xip_disable(map, chip, adr)
1000 #define xip_enable(map, chip, adr)
1001 #define XIP_INVAL_CACHED_RANGE(x...)
1002
1003 #define UDELAY(map, chip, adr, usec) \
1004 do { \
1005 spin_unlock(chip->mutex); \
1006 cfi_udelay(usec); \
1007 spin_lock(chip->mutex); \
1008 } while (0)
1009
1010 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1011 do { \
1012 spin_unlock(chip->mutex); \
1013 INVALIDATE_CACHED_RANGE(map, adr, len); \
1014 cfi_udelay(usec); \
1015 spin_lock(chip->mutex); \
1016 } while (0)
1017
1018 #endif
1019
1020 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1021 {
1022 unsigned long cmd_addr;
1023 struct cfi_private *cfi = map->fldrv_priv;
1024 int ret = 0;
1025
1026 adr += chip->start;
1027
1028 /* Ensure cmd read/writes are aligned. */
1029 cmd_addr = adr & ~(map_bankwidth(map)-1);
1030
1031 spin_lock(chip->mutex);
1032
1033 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1034
1035 if (!ret) {
1036 if (chip->state != FL_POINT && chip->state != FL_READY)
1037 map_write(map, CMD(0xff), cmd_addr);
1038
1039 chip->state = FL_POINT;
1040 chip->ref_point_counter++;
1041 }
1042 spin_unlock(chip->mutex);
1043
1044 return ret;
1045 }
1046
1047 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1048 {
1049 struct map_info *map = mtd->priv;
1050 struct cfi_private *cfi = map->fldrv_priv;
1051 unsigned long ofs;
1052 int chipnum;
1053 int ret = 0;
1054
1055 if (!map->virt || (from + len > mtd->size))
1056 return -EINVAL;
1057
1058 *mtdbuf = (void *)map->virt + from;
1059 *retlen = 0;
1060
1061 /* Now lock the chip(s) to POINT state */
1062
1063 /* ofs: offset within the first chip that the first read should start */
1064 chipnum = (from >> cfi->chipshift);
1065 ofs = from - (chipnum << cfi->chipshift);
1066
1067 while (len) {
1068 unsigned long thislen;
1069
1070 if (chipnum >= cfi->numchips)
1071 break;
1072
1073 if ((len + ofs -1) >> cfi->chipshift)
1074 thislen = (1<<cfi->chipshift) - ofs;
1075 else
1076 thislen = len;
1077
1078 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1079 if (ret)
1080 break;
1081
1082 *retlen += thislen;
1083 len -= thislen;
1084
1085 ofs = 0;
1086 chipnum++;
1087 }
1088 return 0;
1089 }
1090
1091 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1092 {
1093 struct map_info *map = mtd->priv;
1094 struct cfi_private *cfi = map->fldrv_priv;
1095 unsigned long ofs;
1096 int chipnum;
1097
1098 /* Now unlock the chip(s) POINT state */
1099
1100 /* ofs: offset within the first chip that the first read should start */
1101 chipnum = (from >> cfi->chipshift);
1102 ofs = from - (chipnum << cfi->chipshift);
1103
1104 while (len) {
1105 unsigned long thislen;
1106 struct flchip *chip;
1107
1108 chip = &cfi->chips[chipnum];
1109 if (chipnum >= cfi->numchips)
1110 break;
1111
1112 if ((len + ofs -1) >> cfi->chipshift)
1113 thislen = (1<<cfi->chipshift) - ofs;
1114 else
1115 thislen = len;
1116
1117 spin_lock(chip->mutex);
1118 if (chip->state == FL_POINT) {
1119 chip->ref_point_counter--;
1120 if(chip->ref_point_counter == 0)
1121 chip->state = FL_READY;
1122 } else
1123 printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
1124
1125 put_chip(map, chip, chip->start);
1126 spin_unlock(chip->mutex);
1127
1128 len -= thislen;
1129 ofs = 0;
1130 chipnum++;
1131 }
1132 }
1133
1134 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1135 {
1136 unsigned long cmd_addr;
1137 struct cfi_private *cfi = map->fldrv_priv;
1138 int ret;
1139
1140 adr += chip->start;
1141
1142 /* Ensure cmd read/writes are aligned. */
1143 cmd_addr = adr & ~(map_bankwidth(map)-1);
1144
1145 spin_lock(chip->mutex);
1146 ret = get_chip(map, chip, cmd_addr, FL_READY);
1147 if (ret) {
1148 spin_unlock(chip->mutex);
1149 return ret;
1150 }
1151
1152 if (chip->state != FL_POINT && chip->state != FL_READY) {
1153 map_write(map, CMD(0xff), cmd_addr);
1154
1155 chip->state = FL_READY;
1156 }
1157
1158 map_copy_from(map, buf, adr, len);
1159
1160 put_chip(map, chip, cmd_addr);
1161
1162 spin_unlock(chip->mutex);
1163 return 0;
1164 }
1165
1166 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1167 {
1168 struct map_info *map = mtd->priv;
1169 struct cfi_private *cfi = map->fldrv_priv;
1170 unsigned long ofs;
1171 int chipnum;
1172 int ret = 0;
1173
1174 /* ofs: offset within the first chip that the first read should start */
1175 chipnum = (from >> cfi->chipshift);
1176 ofs = from - (chipnum << cfi->chipshift);
1177
1178 *retlen = 0;
1179
1180 while (len) {
1181 unsigned long thislen;
1182
1183 if (chipnum >= cfi->numchips)
1184 break;
1185
1186 if ((len + ofs -1) >> cfi->chipshift)
1187 thislen = (1<<cfi->chipshift) - ofs;
1188 else
1189 thislen = len;
1190
1191 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1192 if (ret)
1193 break;
1194
1195 *retlen += thislen;
1196 len -= thislen;
1197 buf += thislen;
1198
1199 ofs = 0;
1200 chipnum++;
1201 }
1202 return ret;
1203 }
1204
1205 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1206 unsigned long adr, map_word datum, int mode)
1207 {
1208 struct cfi_private *cfi = map->fldrv_priv;
1209 map_word status, status_OK, write_cmd;
1210 unsigned long timeo;
1211 int z, ret=0;
1212
1213 adr += chip->start;
1214
1215 /* Let's determine this according to the interleave only once */
1216 status_OK = CMD(0x80);
1217 switch (mode) {
1218 case FL_WRITING: write_cmd = CMD(0x40); break;
1219 case FL_OTP_WRITE: write_cmd = CMD(0xc0); break;
1220 default: return -EINVAL;
1221 }
1222
1223 spin_lock(chip->mutex);
1224 ret = get_chip(map, chip, adr, mode);
1225 if (ret) {
1226 spin_unlock(chip->mutex);
1227 return ret;
1228 }
1229
1230 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1231 ENABLE_VPP(map);
1232 xip_disable(map, chip, adr);
1233 map_write(map, write_cmd, adr);
1234 map_write(map, datum, adr);
1235 chip->state = mode;
1236
1237 INVALIDATE_CACHE_UDELAY(map, chip,
1238 adr, map_bankwidth(map),
1239 chip->word_write_time);
1240
1241 timeo = jiffies + (HZ/2);
1242 z = 0;
1243 for (;;) {
1244 if (chip->state != mode) {
1245 /* Someone's suspended the write. Sleep */
1246 DECLARE_WAITQUEUE(wait, current);
1247
1248 set_current_state(TASK_UNINTERRUPTIBLE);
1249 add_wait_queue(&chip->wq, &wait);
1250 spin_unlock(chip->mutex);
1251 schedule();
1252 remove_wait_queue(&chip->wq, &wait);
1253 timeo = jiffies + (HZ / 2); /* FIXME */
1254 spin_lock(chip->mutex);
1255 continue;
1256 }
1257
1258 status = map_read(map, adr);
1259 if (map_word_andequal(map, status, status_OK, status_OK))
1260 break;
1261
1262 /* OK Still waiting */
1263 if (time_after(jiffies, timeo)) {
1264 chip->state = FL_STATUS;
1265 xip_enable(map, chip, adr);
1266 printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
1267 ret = -EIO;
1268 goto out;
1269 }
1270
1271 /* Latency issues. Drop the lock, wait a while and retry */
1272 z++;
1273 UDELAY(map, chip, adr, 1);
1274 }
1275 if (!z) {
1276 chip->word_write_time--;
1277 if (!chip->word_write_time)
1278 chip->word_write_time++;
1279 }
1280 if (z > 1)
1281 chip->word_write_time++;
1282
1283 /* Done and happy. */
1284 chip->state = FL_STATUS;
1285
1286 /* check for lock bit */
1287 if (map_word_bitsset(map, status, CMD(0x02))) {
1288 /* clear status */
1289 map_write(map, CMD(0x50), adr);
1290 /* put back into read status register mode */
1291 map_write(map, CMD(0x70), adr);
1292 ret = -EROFS;
1293 }
1294
1295 xip_enable(map, chip, adr);
1296 out: put_chip(map, chip, adr);
1297 spin_unlock(chip->mutex);
1298
1299 return ret;
1300 }
1301
1302
1303 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1304 {
1305 struct map_info *map = mtd->priv;
1306 struct cfi_private *cfi = map->fldrv_priv;
1307 int ret = 0;
1308 int chipnum;
1309 unsigned long ofs;
1310
1311 *retlen = 0;
1312 if (!len)
1313 return 0;
1314
1315 chipnum = to >> cfi->chipshift;
1316 ofs = to - (chipnum << cfi->chipshift);
1317
1318 /* If it's not bus-aligned, do the first byte write */
1319 if (ofs & (map_bankwidth(map)-1)) {
1320 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1321 int gap = ofs - bus_ofs;
1322 int n;
1323 map_word datum;
1324
1325 n = min_t(int, len, map_bankwidth(map)-gap);
1326 datum = map_word_ff(map);
1327 datum = map_word_load_partial(map, datum, buf, gap, n);
1328
1329 ret = do_write_oneword(map, &cfi->chips[chipnum],
1330 bus_ofs, datum, FL_WRITING);
1331 if (ret)
1332 return ret;
1333
1334 len -= n;
1335 ofs += n;
1336 buf += n;
1337 (*retlen) += n;
1338
1339 if (ofs >> cfi->chipshift) {
1340 chipnum ++;
1341 ofs = 0;
1342 if (chipnum == cfi->numchips)
1343 return 0;
1344 }
1345 }
1346
1347 while(len >= map_bankwidth(map)) {
1348 map_word datum = map_word_load(map, buf);
1349
1350 ret = do_write_oneword(map, &cfi->chips[chipnum],
1351 ofs, datum, FL_WRITING);
1352 if (ret)
1353 return ret;
1354
1355 ofs += map_bankwidth(map);
1356 buf += map_bankwidth(map);
1357 (*retlen) += map_bankwidth(map);
1358 len -= map_bankwidth(map);
1359
1360 if (ofs >> cfi->chipshift) {
1361 chipnum ++;
1362 ofs = 0;
1363 if (chipnum == cfi->numchips)
1364 return 0;
1365 }
1366 }
1367
1368 if (len & (map_bankwidth(map)-1)) {
1369 map_word datum;
1370
1371 datum = map_word_ff(map);
1372 datum = map_word_load_partial(map, datum, buf, 0, len);
1373
1374 ret = do_write_oneword(map, &cfi->chips[chipnum],
1375 ofs, datum, FL_WRITING);
1376 if (ret)
1377 return ret;
1378
1379 (*retlen) += len;
1380 }
1381
1382 return 0;
1383 }
1384
1385
1386 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1387 unsigned long adr, const u_char *buf, int len)
1388 {
1389 struct cfi_private *cfi = map->fldrv_priv;
1390 map_word status, status_OK;
1391 unsigned long cmd_adr, timeo;
1392 int wbufsize, z, ret=0, bytes, words;
1393
1394 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1395 adr += chip->start;
1396 cmd_adr = adr & ~(wbufsize-1);
1397
1398 /* Let's determine this according to the interleave only once */
1399 status_OK = CMD(0x80);
1400
1401 spin_lock(chip->mutex);
1402 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1403 if (ret) {
1404 spin_unlock(chip->mutex);
1405 return ret;
1406 }
1407
1408 XIP_INVAL_CACHED_RANGE(map, adr, len);
1409 ENABLE_VPP(map);
1410 xip_disable(map, chip, cmd_adr);
1411
1412 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1413 [...], the device will not accept any more Write to Buffer commands".
1414 So we must check here and reset those bits if they're set. Otherwise
1415 we're just pissing in the wind */
1416 if (chip->state != FL_STATUS)
1417 map_write(map, CMD(0x70), cmd_adr);
1418 status = map_read(map, cmd_adr);
1419 if (map_word_bitsset(map, status, CMD(0x30))) {
1420 xip_enable(map, chip, cmd_adr);
1421 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1422 xip_disable(map, chip, cmd_adr);
1423 map_write(map, CMD(0x50), cmd_adr);
1424 map_write(map, CMD(0x70), cmd_adr);
1425 }
1426
1427 chip->state = FL_WRITING_TO_BUFFER;
1428
1429 z = 0;
1430 for (;;) {
1431 map_write(map, CMD(0xe8), cmd_adr);
1432
1433 status = map_read(map, cmd_adr);
1434 if (map_word_andequal(map, status, status_OK, status_OK))
1435 break;
1436
1437 UDELAY(map, chip, cmd_adr, 1);
1438
1439 if (++z > 20) {
1440 /* Argh. Not ready for write to buffer */
1441 map_word Xstatus;
1442 map_write(map, CMD(0x70), cmd_adr);
1443 chip->state = FL_STATUS;
1444 Xstatus = map_read(map, cmd_adr);
1445 /* Odd. Clear status bits */
1446 map_write(map, CMD(0x50), cmd_adr);
1447 map_write(map, CMD(0x70), cmd_adr);
1448 xip_enable(map, chip, cmd_adr);
1449 printk(KERN_ERR "Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1450 status.x[0], Xstatus.x[0]);
1451 ret = -EIO;
1452 goto out;
1453 }
1454 }
1455
1456 /* Write length of data to come */
1457 bytes = len & (map_bankwidth(map)-1);
1458 words = len / map_bankwidth(map);
1459 map_write(map, CMD(words - !bytes), cmd_adr );
1460
1461 /* Write data */
1462 z = 0;
1463 while(z < words * map_bankwidth(map)) {
1464 map_word datum = map_word_load(map, buf);
1465 map_write(map, datum, adr+z);
1466
1467 z += map_bankwidth(map);
1468 buf += map_bankwidth(map);
1469 }
1470
1471 if (bytes) {
1472 map_word datum;
1473
1474 datum = map_word_ff(map);
1475 datum = map_word_load_partial(map, datum, buf, 0, bytes);
1476 map_write(map, datum, adr+z);
1477 }
1478
1479 /* GO GO GO */
1480 map_write(map, CMD(0xd0), cmd_adr);
1481 chip->state = FL_WRITING;
1482
1483 INVALIDATE_CACHE_UDELAY(map, chip,
1484 cmd_adr, len,
1485 chip->buffer_write_time);
1486
1487 timeo = jiffies + (HZ/2);
1488 z = 0;
1489 for (;;) {
1490 if (chip->state != FL_WRITING) {
1491 /* Someone's suspended the write. Sleep */
1492 DECLARE_WAITQUEUE(wait, current);
1493 set_current_state(TASK_UNINTERRUPTIBLE);
1494 add_wait_queue(&chip->wq, &wait);
1495 spin_unlock(chip->mutex);
1496 schedule();
1497 remove_wait_queue(&chip->wq, &wait);
1498 timeo = jiffies + (HZ / 2); /* FIXME */
1499 spin_lock(chip->mutex);
1500 continue;
1501 }
1502
1503 status = map_read(map, cmd_adr);
1504 if (map_word_andequal(map, status, status_OK, status_OK))
1505 break;
1506
1507 /* OK Still waiting */
1508 if (time_after(jiffies, timeo)) {
1509 chip->state = FL_STATUS;
1510 xip_enable(map, chip, cmd_adr);
1511 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1512 ret = -EIO;
1513 goto out;
1514 }
1515
1516 /* Latency issues. Drop the lock, wait a while and retry */
1517 z++;
1518 UDELAY(map, chip, cmd_adr, 1);
1519 }
1520 if (!z) {
1521 chip->buffer_write_time--;
1522 if (!chip->buffer_write_time)
1523 chip->buffer_write_time++;
1524 }
1525 if (z > 1)
1526 chip->buffer_write_time++;
1527
1528 /* Done and happy. */
1529 chip->state = FL_STATUS;
1530
1531 /* check for lock bit */
1532 if (map_word_bitsset(map, status, CMD(0x02))) {
1533 /* clear status */
1534 map_write(map, CMD(0x50), cmd_adr);
1535 /* put back into read status register mode */
1536 map_write(map, CMD(0x70), adr);
1537 ret = -EROFS;
1538 }
1539
1540 xip_enable(map, chip, cmd_adr);
1541 out: put_chip(map, chip, cmd_adr);
1542 spin_unlock(chip->mutex);
1543 return ret;
1544 }
1545
1546 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1547 size_t len, size_t *retlen, const u_char *buf)
1548 {
1549 struct map_info *map = mtd->priv;
1550 struct cfi_private *cfi = map->fldrv_priv;
1551 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1552 int ret = 0;
1553 int chipnum;
1554 unsigned long ofs;
1555
1556 *retlen = 0;
1557 if (!len)
1558 return 0;
1559
1560 chipnum = to >> cfi->chipshift;
1561 ofs = to - (chipnum << cfi->chipshift);
1562
1563 /* If it's not bus-aligned, do the first word write */
1564 if (ofs & (map_bankwidth(map)-1)) {
1565 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1566 if (local_len > len)
1567 local_len = len;
1568 ret = cfi_intelext_write_words(mtd, to, local_len,
1569 retlen, buf);
1570 if (ret)
1571 return ret;
1572 ofs += local_len;
1573 buf += local_len;
1574 len -= local_len;
1575
1576 if (ofs >> cfi->chipshift) {
1577 chipnum ++;
1578 ofs = 0;
1579 if (chipnum == cfi->numchips)
1580 return 0;
1581 }
1582 }
1583
1584 while(len) {
1585 /* We must not cross write block boundaries */
1586 int size = wbufsize - (ofs & (wbufsize-1));
1587
1588 if (size > len)
1589 size = len;
1590 ret = do_write_buffer(map, &cfi->chips[chipnum],
1591 ofs, buf, size);
1592 if (ret)
1593 return ret;
1594
1595 ofs += size;
1596 buf += size;
1597 (*retlen) += size;
1598 len -= size;
1599
1600 if (ofs >> cfi->chipshift) {
1601 chipnum ++;
1602 ofs = 0;
1603 if (chipnum == cfi->numchips)
1604 return 0;
1605 }
1606 }
1607 return 0;
1608 }
1609
1610 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1611 unsigned long adr, int len, void *thunk)
1612 {
1613 struct cfi_private *cfi = map->fldrv_priv;
1614 map_word status, status_OK;
1615 unsigned long timeo;
1616 int retries = 3;
1617 DECLARE_WAITQUEUE(wait, current);
1618 int ret = 0;
1619
1620 adr += chip->start;
1621
1622 /* Let's determine this according to the interleave only once */
1623 status_OK = CMD(0x80);
1624
1625 retry:
1626 spin_lock(chip->mutex);
1627 ret = get_chip(map, chip, adr, FL_ERASING);
1628 if (ret) {
1629 spin_unlock(chip->mutex);
1630 return ret;
1631 }
1632
1633 XIP_INVAL_CACHED_RANGE(map, adr, len);
1634 ENABLE_VPP(map);
1635 xip_disable(map, chip, adr);
1636
1637 /* Clear the status register first */
1638 map_write(map, CMD(0x50), adr);
1639
1640 /* Now erase */
1641 map_write(map, CMD(0x20), adr);
1642 map_write(map, CMD(0xD0), adr);
1643 chip->state = FL_ERASING;
1644 chip->erase_suspended = 0;
1645
1646 INVALIDATE_CACHE_UDELAY(map, chip,
1647 adr, len,
1648 chip->erase_time*1000/2);
1649
1650 /* FIXME. Use a timer to check this, and return immediately. */
1651 /* Once the state machine's known to be working I'll do that */
1652
1653 timeo = jiffies + (HZ*20);
1654 for (;;) {
1655 if (chip->state != FL_ERASING) {
1656 /* Someone's suspended the erase. Sleep */
1657 set_current_state(TASK_UNINTERRUPTIBLE);
1658 add_wait_queue(&chip->wq, &wait);
1659 spin_unlock(chip->mutex);
1660 schedule();
1661 remove_wait_queue(&chip->wq, &wait);
1662 spin_lock(chip->mutex);
1663 continue;
1664 }
1665 if (chip->erase_suspended) {
1666 /* This erase was suspended and resumed.
1667 Adjust the timeout */
1668 timeo = jiffies + (HZ*20); /* FIXME */
1669 chip->erase_suspended = 0;
1670 }
1671
1672 status = map_read(map, adr);
1673 if (map_word_andequal(map, status, status_OK, status_OK))
1674 break;
1675
1676 /* OK Still waiting */
1677 if (time_after(jiffies, timeo)) {
1678 map_word Xstatus;
1679 map_write(map, CMD(0x70), adr);
1680 chip->state = FL_STATUS;
1681 Xstatus = map_read(map, adr);
1682 /* Clear status bits */
1683 map_write(map, CMD(0x50), adr);
1684 map_write(map, CMD(0x70), adr);
1685 xip_enable(map, chip, adr);
1686 printk(KERN_ERR "waiting for erase at %08lx to complete timed out. status = %lx, Xstatus = %lx.\n",
1687 adr, status.x[0], Xstatus.x[0]);
1688 ret = -EIO;
1689 goto out;
1690 }
1691
1692 /* Latency issues. Drop the lock, wait a while and retry */
1693 UDELAY(map, chip, adr, 1000000/HZ);
1694 }
1695
1696 /* We've broken this before. It doesn't hurt to be safe */
1697 map_write(map, CMD(0x70), adr);
1698 chip->state = FL_STATUS;
1699 status = map_read(map, adr);
1700
1701 /* check for lock bit */
1702 if (map_word_bitsset(map, status, CMD(0x3a))) {
1703 unsigned long chipstatus;
1704
1705 /* Reset the error bits */
1706 map_write(map, CMD(0x50), adr);
1707 map_write(map, CMD(0x70), adr);
1708 xip_enable(map, chip, adr);
1709
1710 chipstatus = MERGESTATUS(status);
1711
1712 if ((chipstatus & 0x30) == 0x30) {
1713 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%lx\n", chipstatus);
1714 ret = -EIO;
1715 } else if (chipstatus & 0x02) {
1716 /* Protection bit set */
1717 ret = -EROFS;
1718 } else if (chipstatus & 0x8) {
1719 /* Voltage */
1720 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%lx\n", chipstatus);
1721 ret = -EIO;
1722 } else if (chipstatus & 0x20) {
1723 if (retries--) {
1724 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1725 timeo = jiffies + HZ;
1726 put_chip(map, chip, adr);
1727 spin_unlock(chip->mutex);
1728 goto retry;
1729 }
1730 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%lx\n", adr, chipstatus);
1731 ret = -EIO;
1732 }
1733 } else {
1734 xip_enable(map, chip, adr);
1735 ret = 0;
1736 }
1737
1738 out: put_chip(map, chip, adr);
1739 spin_unlock(chip->mutex);
1740 return ret;
1741 }
1742
1743 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1744 {
1745 unsigned long ofs, len;
1746 int ret;
1747
1748 ofs = instr->addr;
1749 len = instr->len;
1750
1751 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1752 if (ret)
1753 return ret;
1754
1755 instr->state = MTD_ERASE_DONE;
1756 mtd_erase_callback(instr);
1757
1758 return 0;
1759 }
1760
1761 static void cfi_intelext_sync (struct mtd_info *mtd)
1762 {
1763 struct map_info *map = mtd->priv;
1764 struct cfi_private *cfi = map->fldrv_priv;
1765 int i;
1766 struct flchip *chip;
1767 int ret = 0;
1768
1769 for (i=0; !ret && i<cfi->numchips; i++) {
1770 chip = &cfi->chips[i];
1771
1772 spin_lock(chip->mutex);
1773 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1774
1775 if (!ret) {
1776 chip->oldstate = chip->state;
1777 chip->state = FL_SYNCING;
1778 /* No need to wake_up() on this state change -
1779 * as the whole point is that nobody can do anything
1780 * with the chip now anyway.
1781 */
1782 }
1783 spin_unlock(chip->mutex);
1784 }
1785
1786 /* Unlock the chips again */
1787
1788 for (i--; i >=0; i--) {
1789 chip = &cfi->chips[i];
1790
1791 spin_lock(chip->mutex);
1792
1793 if (chip->state == FL_SYNCING) {
1794 chip->state = chip->oldstate;
1795 chip->oldstate = FL_READY;
1796 wake_up(&chip->wq);
1797 }
1798 spin_unlock(chip->mutex);
1799 }
1800 }
1801
1802 #ifdef DEBUG_LOCK_BITS
1803 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1804 struct flchip *chip,
1805 unsigned long adr,
1806 int len, void *thunk)
1807 {
1808 struct cfi_private *cfi = map->fldrv_priv;
1809 int status, ofs_factor = cfi->interleave * cfi->device_type;
1810
1811 adr += chip->start;
1812 xip_disable(map, chip, adr+(2*ofs_factor));
1813 map_write(map, CMD(0x90), adr+(2*ofs_factor));
1814 chip->state = FL_JEDEC_QUERY;
1815 status = cfi_read_query(map, adr+(2*ofs_factor));
1816 xip_enable(map, chip, 0);
1817 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1818 adr, status);
1819 return 0;
1820 }
1821 #endif
1822
1823 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1824 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1825
1826 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1827 unsigned long adr, int len, void *thunk)
1828 {
1829 struct cfi_private *cfi = map->fldrv_priv;
1830 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1831 map_word status, status_OK;
1832 unsigned long timeo = jiffies + HZ;
1833 int ret;
1834
1835 adr += chip->start;
1836
1837 /* Let's determine this according to the interleave only once */
1838 status_OK = CMD(0x80);
1839
1840 spin_lock(chip->mutex);
1841 ret = get_chip(map, chip, adr, FL_LOCKING);
1842 if (ret) {
1843 spin_unlock(chip->mutex);
1844 return ret;
1845 }
1846
1847 ENABLE_VPP(map);
1848 xip_disable(map, chip, adr);
1849
1850 map_write(map, CMD(0x60), adr);
1851 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1852 map_write(map, CMD(0x01), adr);
1853 chip->state = FL_LOCKING;
1854 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1855 map_write(map, CMD(0xD0), adr);
1856 chip->state = FL_UNLOCKING;
1857 } else
1858 BUG();
1859
1860 /*
1861 * If Instant Individual Block Locking supported then no need
1862 * to delay.
1863 */
1864
1865 if (!extp || !(extp->FeatureSupport & (1 << 5)))
1866 UDELAY(map, chip, adr, 1000000/HZ);
1867
1868 /* FIXME. Use a timer to check this, and return immediately. */
1869 /* Once the state machine's known to be working I'll do that */
1870
1871 timeo = jiffies + (HZ*20);
1872 for (;;) {
1873
1874 status = map_read(map, adr);
1875 if (map_word_andequal(map, status, status_OK, status_OK))
1876 break;
1877
1878 /* OK Still waiting */
1879 if (time_after(jiffies, timeo)) {
1880 map_word Xstatus;
1881 map_write(map, CMD(0x70), adr);
1882 chip->state = FL_STATUS;
1883 Xstatus = map_read(map, adr);
1884 xip_enable(map, chip, adr);
1885 printk(KERN_ERR "waiting for unlock to complete timed out. status = %lx, Xstatus = %lx.\n",
1886 status.x[0], Xstatus.x[0]);
1887 put_chip(map, chip, adr);
1888 spin_unlock(chip->mutex);
1889 return -EIO;
1890 }
1891
1892 /* Latency issues. Drop the lock, wait a while and retry */
1893 UDELAY(map, chip, adr, 1);
1894 }
1895
1896 /* Done and happy. */
1897 chip->state = FL_STATUS;
1898 xip_enable(map, chip, adr);
1899 put_chip(map, chip, adr);
1900 spin_unlock(chip->mutex);
1901 return 0;
1902 }
1903
1904 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1905 {
1906 int ret;
1907
1908 #ifdef DEBUG_LOCK_BITS
1909 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1910 __FUNCTION__, ofs, len);
1911 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1912 ofs, len, 0);
1913 #endif
1914
1915 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1916 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1917
1918 #ifdef DEBUG_LOCK_BITS
1919 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1920 __FUNCTION__, ret);
1921 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1922 ofs, len, 0);
1923 #endif
1924
1925 return ret;
1926 }
1927
1928 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1929 {
1930 int ret;
1931
1932 #ifdef DEBUG_LOCK_BITS
1933 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1934 __FUNCTION__, ofs, len);
1935 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1936 ofs, len, 0);
1937 #endif
1938
1939 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1940 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1941
1942 #ifdef DEBUG_LOCK_BITS
1943 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1944 __FUNCTION__, ret);
1945 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1946 ofs, len, 0);
1947 #endif
1948
1949 return ret;
1950 }
1951
1952 #ifdef CONFIG_MTD_OTP
1953
1954 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1955 u_long data_offset, u_char *buf, u_int size,
1956 u_long prot_offset, u_int groupno, u_int groupsize);
1957
1958 static int __xipram
1959 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1960 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1961 {
1962 struct cfi_private *cfi = map->fldrv_priv;
1963 int ret;
1964
1965 spin_lock(chip->mutex);
1966 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1967 if (ret) {
1968 spin_unlock(chip->mutex);
1969 return ret;
1970 }
1971
1972 /* let's ensure we're not reading back cached data from array mode */
1973 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1974
1975 xip_disable(map, chip, chip->start);
1976 if (chip->state != FL_JEDEC_QUERY) {
1977 map_write(map, CMD(0x90), chip->start);
1978 chip->state = FL_JEDEC_QUERY;
1979 }
1980 map_copy_from(map, buf, chip->start + offset, size);
1981 xip_enable(map, chip, chip->start);
1982
1983 /* then ensure we don't keep OTP data in the cache */
1984 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1985
1986 put_chip(map, chip, chip->start);
1987 spin_unlock(chip->mutex);
1988 return 0;
1989 }
1990
1991 static int
1992 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
1993 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1994 {
1995 int ret;
1996
1997 while (size) {
1998 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
1999 int gap = offset - bus_ofs;
2000 int n = min_t(int, size, map_bankwidth(map)-gap);
2001 map_word datum = map_word_ff(map);
2002
2003 datum = map_word_load_partial(map, datum, buf, gap, n);
2004 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2005 if (ret)
2006 return ret;
2007
2008 offset += n;
2009 buf += n;
2010 size -= n;
2011 }
2012
2013 return 0;
2014 }
2015
2016 static int
2017 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2018 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2019 {
2020 struct cfi_private *cfi = map->fldrv_priv;
2021 map_word datum;
2022
2023 /* make sure area matches group boundaries */
2024 if (size != grpsz)
2025 return -EXDEV;
2026
2027 datum = map_word_ff(map);
2028 datum = map_word_clr(map, datum, CMD(1 << grpno));
2029 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2030 }
2031
2032 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2033 size_t *retlen, u_char *buf,
2034 otp_op_t action, int user_regs)
2035 {
2036 struct map_info *map = mtd->priv;
2037 struct cfi_private *cfi = map->fldrv_priv;
2038 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2039 struct flchip *chip;
2040 struct cfi_intelext_otpinfo *otp;
2041 u_long devsize, reg_prot_offset, data_offset;
2042 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2043 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2044 int ret;
2045
2046 *retlen = 0;
2047
2048 /* Check that we actually have some OTP registers */
2049 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2050 return -ENODATA;
2051
2052 /* we need real chips here not virtual ones */
2053 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2054 chip_step = devsize >> cfi->chipshift;
2055 chip_num = 0;
2056
2057 /* Some chips have OTP located in the _top_ partition only.
2058 For example: Intel 28F256L18T (T means top-parameter device) */
2059 if (cfi->mfr == MANUFACTURER_INTEL) {
2060 switch (cfi->id) {
2061 case 0x880b:
2062 case 0x880c:
2063 case 0x880d:
2064 chip_num = chip_step - 1;
2065 }
2066 }
2067
2068 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2069 chip = &cfi->chips[chip_num];
2070 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2071
2072 /* first OTP region */
2073 field = 0;
2074 reg_prot_offset = extp->ProtRegAddr;
2075 reg_fact_groups = 1;
2076 reg_fact_size = 1 << extp->FactProtRegSize;
2077 reg_user_groups = 1;
2078 reg_user_size = 1 << extp->UserProtRegSize;
2079
2080 while (len > 0) {
2081 /* flash geometry fixup */
2082 data_offset = reg_prot_offset + 1;
2083 data_offset *= cfi->interleave * cfi->device_type;
2084 reg_prot_offset *= cfi->interleave * cfi->device_type;
2085 reg_fact_size *= cfi->interleave;
2086 reg_user_size *= cfi->interleave;
2087
2088 if (user_regs) {
2089 groups = reg_user_groups;
2090 groupsize = reg_user_size;
2091 /* skip over factory reg area */
2092 groupno = reg_fact_groups;
2093 data_offset += reg_fact_groups * reg_fact_size;
2094 } else {
2095 groups = reg_fact_groups;
2096 groupsize = reg_fact_size;
2097 groupno = 0;
2098 }
2099
2100 while (len > 0 && groups > 0) {
2101 if (!action) {
2102 /*
2103 * Special case: if action is NULL
2104 * we fill buf with otp_info records.
2105 */
2106 struct otp_info *otpinfo;
2107 map_word lockword;
2108 len -= sizeof(struct otp_info);
2109 if (len <= 0)
2110 return -ENOSPC;
2111 ret = do_otp_read(map, chip,
2112 reg_prot_offset,
2113 (u_char *)&lockword,
2114 map_bankwidth(map),
2115 0, 0, 0);
2116 if (ret)
2117 return ret;
2118 otpinfo = (struct otp_info *)buf;
2119 otpinfo->start = from;
2120 otpinfo->length = groupsize;
2121 otpinfo->locked =
2122 !map_word_bitsset(map, lockword,
2123 CMD(1 << groupno));
2124 from += groupsize;
2125 buf += sizeof(*otpinfo);
2126 *retlen += sizeof(*otpinfo);
2127 } else if (from >= groupsize) {
2128 from -= groupsize;
2129 data_offset += groupsize;
2130 } else {
2131 int size = groupsize;
2132 data_offset += from;
2133 size -= from;
2134 from = 0;
2135 if (size > len)
2136 size = len;
2137 ret = action(map, chip, data_offset,
2138 buf, size, reg_prot_offset,
2139 groupno, groupsize);
2140 if (ret < 0)
2141 return ret;
2142 buf += size;
2143 len -= size;
2144 *retlen += size;
2145 data_offset += size;
2146 }
2147 groupno++;
2148 groups--;
2149 }
2150
2151 /* next OTP region */
2152 if (++field == extp->NumProtectionFields)
2153 break;
2154 reg_prot_offset = otp->ProtRegAddr;
2155 reg_fact_groups = otp->FactGroups;
2156 reg_fact_size = 1 << otp->FactProtRegSize;
2157 reg_user_groups = otp->UserGroups;
2158 reg_user_size = 1 << otp->UserProtRegSize;
2159 otp++;
2160 }
2161 }
2162
2163 return 0;
2164 }
2165
2166 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2167 size_t len, size_t *retlen,
2168 u_char *buf)
2169 {
2170 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2171 buf, do_otp_read, 0);
2172 }
2173
2174 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2175 size_t len, size_t *retlen,
2176 u_char *buf)
2177 {
2178 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2179 buf, do_otp_read, 1);
2180 }
2181
2182 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2183 size_t len, size_t *retlen,
2184 u_char *buf)
2185 {
2186 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2187 buf, do_otp_write, 1);
2188 }
2189
2190 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2191 loff_t from, size_t len)
2192 {
2193 size_t retlen;
2194 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2195 NULL, do_otp_lock, 1);
2196 }
2197
2198 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2199 struct otp_info *buf, size_t len)
2200 {
2201 size_t retlen;
2202 int ret;
2203
2204 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2205 return ret ? : retlen;
2206 }
2207
2208 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2209 struct otp_info *buf, size_t len)
2210 {
2211 size_t retlen;
2212 int ret;
2213
2214 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2215 return ret ? : retlen;
2216 }
2217
2218 #endif
2219
2220 static int cfi_intelext_suspend(struct mtd_info *mtd)
2221 {
2222 struct map_info *map = mtd->priv;
2223 struct cfi_private *cfi = map->fldrv_priv;
2224 int i;
2225 struct flchip *chip;
2226 int ret = 0;
2227
2228 for (i=0; !ret && i<cfi->numchips; i++) {
2229 chip = &cfi->chips[i];
2230
2231 spin_lock(chip->mutex);
2232
2233 switch (chip->state) {
2234 case FL_READY:
2235 case FL_STATUS:
2236 case FL_CFI_QUERY:
2237 case FL_JEDEC_QUERY:
2238 if (chip->oldstate == FL_READY) {
2239 chip->oldstate = chip->state;
2240 chip->state = FL_PM_SUSPENDED;
2241 /* No need to wake_up() on this state change -
2242 * as the whole point is that nobody can do anything
2243 * with the chip now anyway.
2244 */
2245 } else {
2246 /* There seems to be an operation pending. We must wait for it. */
2247 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2248 ret = -EAGAIN;
2249 }
2250 break;
2251 default:
2252 /* Should we actually wait? Once upon a time these routines weren't
2253 allowed to. Or should we return -EAGAIN, because the upper layers
2254 ought to have already shut down anything which was using the device
2255 anyway? The latter for now. */
2256 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2257 ret = -EAGAIN;
2258 case FL_PM_SUSPENDED:
2259 break;
2260 }
2261 spin_unlock(chip->mutex);
2262 }
2263
2264 /* Unlock the chips again */
2265
2266 if (ret) {
2267 for (i--; i >=0; i--) {
2268 chip = &cfi->chips[i];
2269
2270 spin_lock(chip->mutex);
2271
2272 if (chip->state == FL_PM_SUSPENDED) {
2273 /* No need to force it into a known state here,
2274 because we're returning failure, and it didn't
2275 get power cycled */
2276 chip->state = chip->oldstate;
2277 chip->oldstate = FL_READY;
2278 wake_up(&chip->wq);
2279 }
2280 spin_unlock(chip->mutex);
2281 }
2282 }
2283
2284 return ret;
2285 }
2286
2287 static void cfi_intelext_resume(struct mtd_info *mtd)
2288 {
2289 struct map_info *map = mtd->priv;
2290 struct cfi_private *cfi = map->fldrv_priv;
2291 int i;
2292 struct flchip *chip;
2293
2294 for (i=0; i<cfi->numchips; i++) {
2295
2296 chip = &cfi->chips[i];
2297
2298 spin_lock(chip->mutex);
2299
2300 /* Go to known state. Chip may have been power cycled */
2301 if (chip->state == FL_PM_SUSPENDED) {
2302 map_write(map, CMD(0xFF), cfi->chips[i].start);
2303 chip->oldstate = chip->state = FL_READY;
2304 wake_up(&chip->wq);
2305 }
2306
2307 spin_unlock(chip->mutex);
2308 }
2309 }
2310
2311 static int cfi_intelext_reset(struct mtd_info *mtd)
2312 {
2313 struct map_info *map = mtd->priv;
2314 struct cfi_private *cfi = map->fldrv_priv;
2315 int i, ret;
2316
2317 for (i=0; i < cfi->numchips; i++) {
2318 struct flchip *chip = &cfi->chips[i];
2319
2320 /* force the completion of any ongoing operation
2321 and switch to array mode so any bootloader in
2322 flash is accessible for soft reboot. */
2323 spin_lock(chip->mutex);
2324 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2325 if (!ret) {
2326 map_write(map, CMD(0xff), chip->start);
2327 chip->state = FL_READY;
2328 }
2329 spin_unlock(chip->mutex);
2330 }
2331
2332 return 0;
2333 }
2334
2335 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2336 void *v)
2337 {
2338 struct mtd_info *mtd;
2339
2340 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2341 cfi_intelext_reset(mtd);
2342 return NOTIFY_DONE;
2343 }
2344
2345 static void cfi_intelext_destroy(struct mtd_info *mtd)
2346 {
2347 struct map_info *map = mtd->priv;
2348 struct cfi_private *cfi = map->fldrv_priv;
2349 cfi_intelext_reset(mtd);
2350 unregister_reboot_notifier(&mtd->reboot_notifier);
2351 kfree(cfi->cmdset_priv);
2352 kfree(cfi->cfiq);
2353 kfree(cfi->chips[0].priv);
2354 kfree(cfi);
2355 kfree(mtd->eraseregions);
2356 }
2357
2358 static char im_name_1[]="cfi_cmdset_0001";
2359 static char im_name_3[]="cfi_cmdset_0003";
2360
2361 static int __init cfi_intelext_init(void)
2362 {
2363 inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
2364 inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
2365 return 0;
2366 }
2367
2368 static void __exit cfi_intelext_exit(void)
2369 {
2370 inter_module_unregister(im_name_1);
2371 inter_module_unregister(im_name_3);
2372 }
2373
2374 module_init(cfi_intelext_init);
2375 module_exit(cfi_intelext_exit);
2376
2377 MODULE_LICENSE("GPL");
2378 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2379 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
This page took 0.081771 seconds and 6 git commands to generate.