cfi_cmdset_0001: factorize code to wait for flash status
[deliverable/linux.git] / drivers / mtd / chips / cfi_cmdset_0001.c
CommitLineData
1da177e4
LT
1/*
2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
4 *
5 * (C) 2000 Red Hat. GPL'd
6 *
8bc3b380 7 * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
1f948b43 8 *
1da177e4 9 *
1da177e4
LT
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
18 */
19
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/init.h>
25#include <asm/io.h>
26#include <asm/byteorder.h>
27
28#include <linux/errno.h>
29#include <linux/slab.h>
30#include <linux/delay.h>
31#include <linux/interrupt.h>
963a6fb0 32#include <linux/reboot.h>
1da177e4
LT
33#include <linux/mtd/xip.h>
34#include <linux/mtd/map.h>
35#include <linux/mtd/mtd.h>
36#include <linux/mtd/compatmac.h>
37#include <linux/mtd/cfi.h>
38
39/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41
42// debugging, turns off buffer write mode if set to 1
43#define FORCE_WORD_WRITE 0
44
45#define MANUFACTURER_INTEL 0x0089
46#define I82802AB 0x00ad
47#define I82802AC 0x00ac
48#define MANUFACTURER_ST 0x0020
49#define M50LPW080 0x002F
50
51static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
1da177e4
LT
52static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
e102d54a 54static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
1da177e4
LT
55static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
56static void cfi_intelext_sync (struct mtd_info *);
57static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
58static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
8048d2fc 59#ifdef CONFIG_MTD_OTP
f77814dd
NP
60static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
64static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
65 struct otp_info *, size_t);
66static int cfi_intelext_get_user_prot_info (struct mtd_info *,
67 struct otp_info *, size_t);
8048d2fc 68#endif
1da177e4
LT
69static int cfi_intelext_suspend (struct mtd_info *);
70static void cfi_intelext_resume (struct mtd_info *);
963a6fb0 71static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
1da177e4
LT
72
73static void cfi_intelext_destroy(struct mtd_info *);
74
75struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
76
77static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
78static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
79
80static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
81 size_t *retlen, u_char **mtdbuf);
82static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
83 size_t len);
84
85static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
86static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
87#include "fwh_lock.h"
88
89
90
91/*
92 * *********** SETUP AND PROBE BITS ***********
93 */
94
95static struct mtd_chip_driver cfi_intelext_chipdrv = {
96 .probe = NULL, /* Not usable directly */
97 .destroy = cfi_intelext_destroy,
98 .name = "cfi_cmdset_0001",
99 .module = THIS_MODULE
100};
101
102/* #define DEBUG_LOCK_BITS */
103/* #define DEBUG_CFI_FEATURES */
104
105#ifdef DEBUG_CFI_FEATURES
106static void cfi_tell_features(struct cfi_pri_intelext *extp)
107{
108 int i;
638d9838 109 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
1da177e4
LT
110 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
111 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
112 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
113 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
114 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
115 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
116 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
117 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
118 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
119 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
120 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
638d9838
NP
121 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
122 for (i=11; i<32; i++) {
1f948b43 123 if (extp->FeatureSupport & (1<<i))
1da177e4
LT
124 printk(" - Unknown Bit %X: supported\n", i);
125 }
1f948b43 126
1da177e4
LT
127 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
128 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
129 for (i=1; i<8; i++) {
130 if (extp->SuspendCmdSupport & (1<<i))
131 printk(" - Unknown Bit %X: supported\n", i);
132 }
1f948b43 133
1da177e4
LT
134 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
135 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
638d9838
NP
136 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
137 for (i=2; i<3; i++) {
1da177e4
LT
138 if (extp->BlkStatusRegMask & (1<<i))
139 printk(" - Unknown Bit %X Active: yes\n",i);
140 }
638d9838
NP
141 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
142 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
143 for (i=6; i<16; i++) {
144 if (extp->BlkStatusRegMask & (1<<i))
145 printk(" - Unknown Bit %X Active: yes\n",i);
146 }
147
1f948b43 148 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
1da177e4
LT
149 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
150 if (extp->VppOptimal)
1f948b43 151 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
1da177e4
LT
152 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
153}
154#endif
155
156#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
1f948b43 157/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
1da177e4
LT
158static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
159{
160 struct map_info *map = mtd->priv;
161 struct cfi_private *cfi = map->fldrv_priv;
162 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
163
164 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
165 "erase on write disabled.\n");
166 extp->SuspendCmdSupport &= ~1;
167}
168#endif
169
170#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
171static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
172{
173 struct map_info *map = mtd->priv;
174 struct cfi_private *cfi = map->fldrv_priv;
175 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
176
177 if (cfip && (cfip->FeatureSupport&4)) {
178 cfip->FeatureSupport &= ~4;
179 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
180 }
181}
182#endif
183
184static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
185{
186 struct map_info *map = mtd->priv;
187 struct cfi_private *cfi = map->fldrv_priv;
1f948b43 188
1da177e4
LT
189 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
190 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
191}
192
193static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
194{
195 struct map_info *map = mtd->priv;
196 struct cfi_private *cfi = map->fldrv_priv;
1f948b43 197
1da177e4
LT
198 /* Note this is done after the region info is endian swapped */
199 cfi->cfiq->EraseRegionInfo[1] =
200 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
201};
202
203static void fixup_use_point(struct mtd_info *mtd, void *param)
204{
205 struct map_info *map = mtd->priv;
206 if (!mtd->point && map_is_linear(map)) {
207 mtd->point = cfi_intelext_point;
208 mtd->unpoint = cfi_intelext_unpoint;
209 }
210}
211
212static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
213{
214 struct map_info *map = mtd->priv;
215 struct cfi_private *cfi = map->fldrv_priv;
216 if (cfi->cfiq->BufWriteTimeoutTyp) {
217 printk(KERN_INFO "Using buffer write method\n" );
218 mtd->write = cfi_intelext_write_buffers;
e102d54a 219 mtd->writev = cfi_intelext_writev;
1da177e4
LT
220 }
221}
222
223static struct cfi_fixup cfi_fixup_table[] = {
224#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
1f948b43 225 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
1da177e4
LT
226#endif
227#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
228 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
229#endif
230#if !FORCE_WORD_WRITE
231 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
232#endif
233 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
234 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
235 { 0, 0, NULL, NULL }
236};
237
238static struct cfi_fixup jedec_fixup_table[] = {
239 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
240 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
241 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
242 { 0, 0, NULL, NULL }
243};
244static struct cfi_fixup fixup_table[] = {
245 /* The CFI vendor ids and the JEDEC vendor IDs appear
246 * to be common. It is like the devices id's are as
247 * well. This table is to pick all cases where
248 * we know that is the case.
249 */
250 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
251 { 0, 0, NULL, NULL }
252};
253
254static inline struct cfi_pri_intelext *
255read_pri_intelext(struct map_info *map, __u16 adr)
256{
257 struct cfi_pri_intelext *extp;
258 unsigned int extp_size = sizeof(*extp);
259
260 again:
261 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
262 if (!extp)
263 return NULL;
264
d88f977b 265 if (extp->MajorVersion != '1' ||
638d9838 266 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
d88f977b
TP
267 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
268 "version %c.%c.\n", extp->MajorVersion,
269 extp->MinorVersion);
270 kfree(extp);
271 return NULL;
272 }
273
1da177e4
LT
274 /* Do some byteswapping if necessary */
275 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
276 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
277 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
278
638d9838 279 if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
1da177e4
LT
280 unsigned int extra_size = 0;
281 int nb_parts, i;
282
283 /* Protection Register info */
72b56a2d
NP
284 extra_size += (extp->NumProtectionFields - 1) *
285 sizeof(struct cfi_intelext_otpinfo);
1da177e4
LT
286
287 /* Burst Read info */
6f6ed056
NP
288 extra_size += 2;
289 if (extp_size < sizeof(*extp) + extra_size)
290 goto need_more;
291 extra_size += extp->extra[extra_size-1];
1da177e4
LT
292
293 /* Number of hardware-partitions */
294 extra_size += 1;
295 if (extp_size < sizeof(*extp) + extra_size)
296 goto need_more;
297 nb_parts = extp->extra[extra_size - 1];
298
638d9838
NP
299 /* skip the sizeof(partregion) field in CFI 1.4 */
300 if (extp->MinorVersion >= '4')
301 extra_size += 2;
302
1da177e4
LT
303 for (i = 0; i < nb_parts; i++) {
304 struct cfi_intelext_regioninfo *rinfo;
305 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
306 extra_size += sizeof(*rinfo);
307 if (extp_size < sizeof(*extp) + extra_size)
308 goto need_more;
309 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
310 extra_size += (rinfo->NumBlockTypes - 1)
311 * sizeof(struct cfi_intelext_blockinfo);
312 }
313
638d9838
NP
314 if (extp->MinorVersion >= '4')
315 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
316
1da177e4
LT
317 if (extp_size < sizeof(*extp) + extra_size) {
318 need_more:
319 extp_size = sizeof(*extp) + extra_size;
320 kfree(extp);
321 if (extp_size > 4096) {
322 printk(KERN_ERR
323 "%s: cfi_pri_intelext is too fat\n",
324 __FUNCTION__);
325 return NULL;
326 }
327 goto again;
328 }
329 }
1f948b43 330
1da177e4
LT
331 return extp;
332}
333
1da177e4
LT
334struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
335{
336 struct cfi_private *cfi = map->fldrv_priv;
337 struct mtd_info *mtd;
338 int i;
339
340 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
341 if (!mtd) {
342 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
343 return NULL;
344 }
345 memset(mtd, 0, sizeof(*mtd));
346 mtd->priv = map;
347 mtd->type = MTD_NORFLASH;
348
349 /* Fill in the default mtd operations */
350 mtd->erase = cfi_intelext_erase_varsize;
351 mtd->read = cfi_intelext_read;
352 mtd->write = cfi_intelext_write_words;
353 mtd->sync = cfi_intelext_sync;
354 mtd->lock = cfi_intelext_lock;
355 mtd->unlock = cfi_intelext_unlock;
356 mtd->suspend = cfi_intelext_suspend;
357 mtd->resume = cfi_intelext_resume;
358 mtd->flags = MTD_CAP_NORFLASH;
359 mtd->name = map->name;
963a6fb0
NP
360
361 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
362
1da177e4 363 if (cfi->cfi_mode == CFI_MODE_CFI) {
1f948b43 364 /*
1da177e4
LT
365 * It's a real CFI chip, not one for which the probe
366 * routine faked a CFI structure. So we read the feature
367 * table from it.
368 */
369 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
370 struct cfi_pri_intelext *extp;
371
372 extp = read_pri_intelext(map, adr);
373 if (!extp) {
374 kfree(mtd);
375 return NULL;
376 }
377
378 /* Install our own private info structure */
1f948b43 379 cfi->cmdset_priv = extp;
1da177e4
LT
380
381 cfi_fixup(mtd, cfi_fixup_table);
382
383#ifdef DEBUG_CFI_FEATURES
384 /* Tell the user about it in lots of lovely detail */
385 cfi_tell_features(extp);
1f948b43 386#endif
1da177e4
LT
387
388 if(extp->SuspendCmdSupport & 1) {
389 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
390 }
391 }
392 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
393 /* Apply jedec specific fixups */
394 cfi_fixup(mtd, jedec_fixup_table);
395 }
396 /* Apply generic fixups */
397 cfi_fixup(mtd, fixup_table);
398
399 for (i=0; i< cfi->numchips; i++) {
400 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
401 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
c172471b 402 cfi->chips[i].erase_time = 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
1da177e4 403 cfi->chips[i].ref_point_counter = 0;
c314b6f1 404 init_waitqueue_head(&(cfi->chips[i].wq));
1f948b43 405 }
1da177e4
LT
406
407 map->fldrv = &cfi_intelext_chipdrv;
1f948b43 408
1da177e4
LT
409 return cfi_intelext_setup(mtd);
410}
a15bdeef
DW
411struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
412struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
413EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
414EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
415EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
1da177e4
LT
416
417static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
418{
419 struct map_info *map = mtd->priv;
420 struct cfi_private *cfi = map->fldrv_priv;
421 unsigned long offset = 0;
422 int i,j;
423 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
424
425 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
426
427 mtd->size = devsize * cfi->numchips;
428
429 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
1f948b43 430 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
1da177e4 431 * mtd->numeraseregions, GFP_KERNEL);
1f948b43 432 if (!mtd->eraseregions) {
1da177e4
LT
433 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
434 goto setup_err;
435 }
1f948b43 436
1da177e4
LT
437 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
438 unsigned long ernum, ersize;
439 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
440 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
441
442 if (mtd->erasesize < ersize) {
443 mtd->erasesize = ersize;
444 }
445 for (j=0; j<cfi->numchips; j++) {
446 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
447 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
448 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
449 }
450 offset += (ersize * ernum);
451 }
452
453 if (offset != devsize) {
454 /* Argh */
455 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
456 goto setup_err;
457 }
458
459 for (i=0; i<mtd->numeraseregions;i++){
4843653c 460 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
1da177e4
LT
461 i,mtd->eraseregions[i].offset,
462 mtd->eraseregions[i].erasesize,
463 mtd->eraseregions[i].numblocks);
464 }
465
f77814dd 466#ifdef CONFIG_MTD_OTP
1da177e4 467 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
f77814dd
NP
468 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
469 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
470 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
471 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
472 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
1da177e4
LT
473#endif
474
475 /* This function has the potential to distort the reality
476 a bit and therefore should be called last. */
477 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
478 goto setup_err;
479
480 __module_get(THIS_MODULE);
963a6fb0 481 register_reboot_notifier(&mtd->reboot_notifier);
1da177e4
LT
482 return mtd;
483
484 setup_err:
485 if(mtd) {
fa671646 486 kfree(mtd->eraseregions);
1da177e4
LT
487 kfree(mtd);
488 }
489 kfree(cfi->cmdset_priv);
490 return NULL;
491}
492
493static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
494 struct cfi_private **pcfi)
495{
496 struct map_info *map = mtd->priv;
497 struct cfi_private *cfi = *pcfi;
498 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
499
500 /*
501 * Probing of multi-partition flash ships.
502 *
503 * To support multiple partitions when available, we simply arrange
504 * for each of them to have their own flchip structure even if they
505 * are on the same physical chip. This means completely recreating
506 * a new cfi_private structure right here which is a blatent code
507 * layering violation, but this is still the least intrusive
508 * arrangement at this point. This can be rearranged in the future
509 * if someone feels motivated enough. --nico
510 */
638d9838 511 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
1da177e4
LT
512 && extp->FeatureSupport & (1 << 9)) {
513 struct cfi_private *newcfi;
514 struct flchip *chip;
515 struct flchip_shared *shared;
516 int offs, numregions, numparts, partshift, numvirtchips, i, j;
517
518 /* Protection Register info */
72b56a2d
NP
519 offs = (extp->NumProtectionFields - 1) *
520 sizeof(struct cfi_intelext_otpinfo);
1da177e4
LT
521
522 /* Burst Read info */
6f6ed056 523 offs += extp->extra[offs+1]+2;
1da177e4
LT
524
525 /* Number of partition regions */
526 numregions = extp->extra[offs];
527 offs += 1;
528
638d9838
NP
529 /* skip the sizeof(partregion) field in CFI 1.4 */
530 if (extp->MinorVersion >= '4')
531 offs += 2;
532
1da177e4
LT
533 /* Number of hardware partitions */
534 numparts = 0;
535 for (i = 0; i < numregions; i++) {
536 struct cfi_intelext_regioninfo *rinfo;
537 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
538 numparts += rinfo->NumIdentPartitions;
539 offs += sizeof(*rinfo)
540 + (rinfo->NumBlockTypes - 1) *
541 sizeof(struct cfi_intelext_blockinfo);
542 }
543
638d9838
NP
544 /* Programming Region info */
545 if (extp->MinorVersion >= '4') {
546 struct cfi_intelext_programming_regioninfo *prinfo;
547 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
548 MTD_PROGREGION_SIZE(mtd) = cfi->interleave << prinfo->ProgRegShift;
549 MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid;
550 MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid;
551 mtd->flags |= MTD_PROGRAM_REGIONS;
552 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
553 map->name, MTD_PROGREGION_SIZE(mtd),
554 MTD_PROGREGION_CTRLMODE_VALID(mtd),
555 MTD_PROGREGION_CTRLMODE_INVALID(mtd));
556 }
557
1da177e4
LT
558 /*
559 * All functions below currently rely on all chips having
560 * the same geometry so we'll just assume that all hardware
561 * partitions are of the same size too.
562 */
563 partshift = cfi->chipshift - __ffs(numparts);
564
565 if ((1 << partshift) < mtd->erasesize) {
566 printk( KERN_ERR
567 "%s: bad number of hw partitions (%d)\n",
568 __FUNCTION__, numparts);
569 return -EINVAL;
570 }
571
572 numvirtchips = cfi->numchips * numparts;
573 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
574 if (!newcfi)
575 return -ENOMEM;
576 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
577 if (!shared) {
578 kfree(newcfi);
579 return -ENOMEM;
580 }
581 memcpy(newcfi, cfi, sizeof(struct cfi_private));
582 newcfi->numchips = numvirtchips;
583 newcfi->chipshift = partshift;
584
585 chip = &newcfi->chips[0];
586 for (i = 0; i < cfi->numchips; i++) {
587 shared[i].writing = shared[i].erasing = NULL;
588 spin_lock_init(&shared[i].lock);
589 for (j = 0; j < numparts; j++) {
590 *chip = cfi->chips[i];
591 chip->start += j << partshift;
592 chip->priv = &shared[i];
593 /* those should be reset too since
594 they create memory references. */
595 init_waitqueue_head(&chip->wq);
596 spin_lock_init(&chip->_spinlock);
597 chip->mutex = &chip->_spinlock;
598 chip++;
599 }
600 }
601
602 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
603 "--> %d partitions of %d KiB\n",
604 map->name, cfi->numchips, cfi->interleave,
605 newcfi->numchips, 1<<(newcfi->chipshift-10));
606
607 map->fldrv_priv = newcfi;
608 *pcfi = newcfi;
609 kfree(cfi);
610 }
611
612 return 0;
613}
614
615/*
616 * *********** CHIP ACCESS FUNCTIONS ***********
617 */
618
619static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
620{
621 DECLARE_WAITQUEUE(wait, current);
622 struct cfi_private *cfi = map->fldrv_priv;
623 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
624 unsigned long timeo;
625 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
626
627 resettime:
628 timeo = jiffies + HZ;
629 retry:
f77814dd 630 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
1da177e4
LT
631 /*
632 * OK. We have possibility for contension on the write/erase
633 * operations which are global to the real chip and not per
634 * partition. So let's fight it over in the partition which
635 * currently has authority on the operation.
636 *
637 * The rules are as follows:
638 *
639 * - any write operation must own shared->writing.
640 *
641 * - any erase operation must own _both_ shared->writing and
642 * shared->erasing.
643 *
644 * - contension arbitration is handled in the owner's context.
645 *
8bc3b380
NP
646 * The 'shared' struct can be read and/or written only when
647 * its lock is taken.
1da177e4
LT
648 */
649 struct flchip_shared *shared = chip->priv;
650 struct flchip *contender;
651 spin_lock(&shared->lock);
652 contender = shared->writing;
653 if (contender && contender != chip) {
654 /*
655 * The engine to perform desired operation on this
656 * partition is already in use by someone else.
657 * Let's fight over it in the context of the chip
658 * currently using it. If it is possible to suspend,
659 * that other partition will do just that, otherwise
660 * it'll happily send us to sleep. In any case, when
661 * get_chip returns success we're clear to go ahead.
662 */
663 int ret = spin_trylock(contender->mutex);
664 spin_unlock(&shared->lock);
665 if (!ret)
666 goto retry;
667 spin_unlock(chip->mutex);
668 ret = get_chip(map, contender, contender->start, mode);
669 spin_lock(chip->mutex);
670 if (ret) {
671 spin_unlock(contender->mutex);
672 return ret;
673 }
674 timeo = jiffies + HZ;
675 spin_lock(&shared->lock);
8bc3b380 676 spin_unlock(contender->mutex);
1da177e4
LT
677 }
678
679 /* We now own it */
680 shared->writing = chip;
681 if (mode == FL_ERASING)
682 shared->erasing = chip;
1da177e4
LT
683 spin_unlock(&shared->lock);
684 }
685
686 switch (chip->state) {
687
688 case FL_STATUS:
689 for (;;) {
690 status = map_read(map, adr);
691 if (map_word_andequal(map, status, status_OK, status_OK))
692 break;
693
694 /* At this point we're fine with write operations
695 in other partitions as they don't conflict. */
696 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
697 break;
698
699 if (time_after(jiffies, timeo)) {
1f948b43 700 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
4843653c 701 map->name, status.x[0]);
1da177e4
LT
702 return -EIO;
703 }
704 spin_unlock(chip->mutex);
705 cfi_udelay(1);
706 spin_lock(chip->mutex);
707 /* Someone else might have been playing with it. */
708 goto retry;
709 }
1f948b43 710
1da177e4
LT
711 case FL_READY:
712 case FL_CFI_QUERY:
713 case FL_JEDEC_QUERY:
714 return 0;
715
716 case FL_ERASING:
717 if (!cfip ||
718 !(cfip->FeatureSupport & 2) ||
719 !(mode == FL_READY || mode == FL_POINT ||
720 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
721 goto sleep;
722
723
724 /* Erase suspend */
725 map_write(map, CMD(0xB0), adr);
726
727 /* If the flash has finished erasing, then 'erase suspend'
728 * appears to make some (28F320) flash devices switch to
729 * 'read' mode. Make sure that we switch to 'read status'
730 * mode so we get the right data. --rmk
731 */
732 map_write(map, CMD(0x70), adr);
733 chip->oldstate = FL_ERASING;
734 chip->state = FL_ERASE_SUSPENDING;
735 chip->erase_suspended = 1;
736 for (;;) {
737 status = map_read(map, adr);
738 if (map_word_andequal(map, status, status_OK, status_OK))
739 break;
740
741 if (time_after(jiffies, timeo)) {
742 /* Urgh. Resume and pretend we weren't here. */
743 map_write(map, CMD(0xd0), adr);
744 /* Make sure we're in 'read status' mode if it had finished */
745 map_write(map, CMD(0x70), adr);
746 chip->state = FL_ERASING;
747 chip->oldstate = FL_READY;
4843653c
NP
748 printk(KERN_ERR "%s: Chip not ready after erase "
749 "suspended: status = 0x%lx\n", map->name, status.x[0]);
1da177e4
LT
750 return -EIO;
751 }
752
753 spin_unlock(chip->mutex);
754 cfi_udelay(1);
755 spin_lock(chip->mutex);
756 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
757 So we can just loop here. */
758 }
759 chip->state = FL_STATUS;
760 return 0;
761
762 case FL_XIP_WHILE_ERASING:
763 if (mode != FL_READY && mode != FL_POINT &&
764 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
765 goto sleep;
766 chip->oldstate = chip->state;
767 chip->state = FL_READY;
768 return 0;
769
770 case FL_POINT:
771 /* Only if there's no operation suspended... */
772 if (mode == FL_READY && chip->oldstate == FL_READY)
773 return 0;
774
775 default:
776 sleep:
777 set_current_state(TASK_UNINTERRUPTIBLE);
778 add_wait_queue(&chip->wq, &wait);
779 spin_unlock(chip->mutex);
780 schedule();
781 remove_wait_queue(&chip->wq, &wait);
782 spin_lock(chip->mutex);
783 goto resettime;
784 }
785}
786
787static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
788{
789 struct cfi_private *cfi = map->fldrv_priv;
790
791 if (chip->priv) {
792 struct flchip_shared *shared = chip->priv;
793 spin_lock(&shared->lock);
794 if (shared->writing == chip && chip->oldstate == FL_READY) {
795 /* We own the ability to write, but we're done */
796 shared->writing = shared->erasing;
797 if (shared->writing && shared->writing != chip) {
798 /* give back ownership to who we loaned it from */
799 struct flchip *loaner = shared->writing;
800 spin_lock(loaner->mutex);
801 spin_unlock(&shared->lock);
802 spin_unlock(chip->mutex);
803 put_chip(map, loaner, loaner->start);
804 spin_lock(chip->mutex);
805 spin_unlock(loaner->mutex);
806 wake_up(&chip->wq);
807 return;
808 }
809 shared->erasing = NULL;
810 shared->writing = NULL;
811 } else if (shared->erasing == chip && shared->writing != chip) {
812 /*
813 * We own the ability to erase without the ability
814 * to write, which means the erase was suspended
815 * and some other partition is currently writing.
816 * Don't let the switch below mess things up since
817 * we don't have ownership to resume anything.
818 */
819 spin_unlock(&shared->lock);
820 wake_up(&chip->wq);
821 return;
822 }
823 spin_unlock(&shared->lock);
824 }
825
826 switch(chip->oldstate) {
827 case FL_ERASING:
828 chip->state = chip->oldstate;
1f948b43 829 /* What if one interleaved chip has finished and the
1da177e4 830 other hasn't? The old code would leave the finished
1f948b43 831 one in READY mode. That's bad, and caused -EROFS
1da177e4
LT
832 errors to be returned from do_erase_oneblock because
833 that's the only bit it checked for at the time.
1f948b43 834 As the state machine appears to explicitly allow
1da177e4 835 sending the 0x70 (Read Status) command to an erasing
1f948b43 836 chip and expecting it to be ignored, that's what we
1da177e4
LT
837 do. */
838 map_write(map, CMD(0xd0), adr);
839 map_write(map, CMD(0x70), adr);
840 chip->oldstate = FL_READY;
841 chip->state = FL_ERASING;
842 break;
843
844 case FL_XIP_WHILE_ERASING:
845 chip->state = chip->oldstate;
846 chip->oldstate = FL_READY;
847 break;
848
849 case FL_READY:
850 case FL_STATUS:
851 case FL_JEDEC_QUERY:
852 /* We should really make set_vpp() count, rather than doing this */
853 DISABLE_VPP(map);
854 break;
855 default:
4843653c 856 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1da177e4
LT
857 }
858 wake_up(&chip->wq);
859}
860
861#ifdef CONFIG_MTD_XIP
862
863/*
864 * No interrupt what so ever can be serviced while the flash isn't in array
865 * mode. This is ensured by the xip_disable() and xip_enable() functions
866 * enclosing any code path where the flash is known not to be in array mode.
867 * And within a XIP disabled code path, only functions marked with __xipram
868 * may be called and nothing else (it's a good thing to inspect generated
869 * assembly to make sure inline functions were actually inlined and that gcc
870 * didn't emit calls to its own support functions). Also configuring MTD CFI
871 * support to a single buswidth and a single interleave is also recommended.
1da177e4
LT
872 */
873
874static void xip_disable(struct map_info *map, struct flchip *chip,
875 unsigned long adr)
876{
877 /* TODO: chips with no XIP use should ignore and return */
878 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1da177e4
LT
879 local_irq_disable();
880}
881
882static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
883 unsigned long adr)
884{
885 struct cfi_private *cfi = map->fldrv_priv;
886 if (chip->state != FL_POINT && chip->state != FL_READY) {
887 map_write(map, CMD(0xff), adr);
888 chip->state = FL_READY;
889 }
890 (void) map_read(map, adr);
97f927a4 891 xip_iprefetch();
1da177e4 892 local_irq_enable();
1da177e4
LT
893}
894
895/*
896 * When a delay is required for the flash operation to complete, the
c172471b
NP
897 * xip_wait_for_operation() function is polling for both the given timeout
898 * and pending (but still masked) hardware interrupts. Whenever there is an
899 * interrupt pending then the flash erase or write operation is suspended,
900 * array mode restored and interrupts unmasked. Task scheduling might also
901 * happen at that point. The CPU eventually returns from the interrupt or
902 * the call to schedule() and the suspended flash operation is resumed for
903 * the remaining of the delay period.
1da177e4
LT
904 *
905 * Warning: this function _will_ fool interrupt latency tracing tools.
906 */
907
c172471b
NP
908static int __xipram xip_wait_for_operation(
909 struct map_info *map, struct flchip *chip,
910 unsigned long adr, int *chip_op_time )
1da177e4
LT
911{
912 struct cfi_private *cfi = map->fldrv_priv;
913 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
914 map_word status, OK = CMD(0x80);
c172471b 915 unsigned long usec, suspended, start, done;
1da177e4
LT
916 flstate_t oldstate, newstate;
917
c172471b
NP
918 start = xip_currtime();
919 usec = *chip_op_time * 8;
920 if (usec == 0)
921 usec = 500000;
922 done = 0;
923
1da177e4
LT
924 do {
925 cpu_relax();
926 if (xip_irqpending() && cfip &&
927 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
928 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
929 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
930 /*
931 * Let's suspend the erase or write operation when
932 * supported. Note that we currently don't try to
933 * suspend interleaved chips if there is already
934 * another operation suspended (imagine what happens
935 * when one chip was already done with the current
936 * operation while another chip suspended it, then
937 * we resume the whole thing at once). Yes, it
938 * can happen!
939 */
c172471b 940 usec -= done;
1da177e4
LT
941 map_write(map, CMD(0xb0), adr);
942 map_write(map, CMD(0x70), adr);
1da177e4
LT
943 suspended = xip_currtime();
944 do {
945 if (xip_elapsed_since(suspended) > 100000) {
946 /*
947 * The chip doesn't want to suspend
948 * after waiting for 100 msecs.
949 * This is a critical error but there
950 * is not much we can do here.
951 */
c172471b 952 return -EIO;
1da177e4
LT
953 }
954 status = map_read(map, adr);
955 } while (!map_word_andequal(map, status, OK, OK));
956
957 /* Suspend succeeded */
958 oldstate = chip->state;
959 if (oldstate == FL_ERASING) {
960 if (!map_word_bitsset(map, status, CMD(0x40)))
961 break;
962 newstate = FL_XIP_WHILE_ERASING;
963 chip->erase_suspended = 1;
964 } else {
965 if (!map_word_bitsset(map, status, CMD(0x04)))
966 break;
967 newstate = FL_XIP_WHILE_WRITING;
968 chip->write_suspended = 1;
969 }
970 chip->state = newstate;
971 map_write(map, CMD(0xff), adr);
972 (void) map_read(map, adr);
973 asm volatile (".rep 8; nop; .endr");
974 local_irq_enable();
6da70124 975 spin_unlock(chip->mutex);
1da177e4
LT
976 asm volatile (".rep 8; nop; .endr");
977 cond_resched();
978
979 /*
980 * We're back. However someone else might have
981 * decided to go write to the chip if we are in
982 * a suspended erase state. If so let's wait
983 * until it's done.
984 */
6da70124 985 spin_lock(chip->mutex);
1da177e4
LT
986 while (chip->state != newstate) {
987 DECLARE_WAITQUEUE(wait, current);
988 set_current_state(TASK_UNINTERRUPTIBLE);
989 add_wait_queue(&chip->wq, &wait);
6da70124 990 spin_unlock(chip->mutex);
1da177e4
LT
991 schedule();
992 remove_wait_queue(&chip->wq, &wait);
6da70124 993 spin_lock(chip->mutex);
1da177e4
LT
994 }
995 /* Disallow XIP again */
996 local_irq_disable();
997
998 /* Resume the write or erase operation */
999 map_write(map, CMD(0xd0), adr);
1000 map_write(map, CMD(0x70), adr);
1001 chip->state = oldstate;
1002 start = xip_currtime();
1003 } else if (usec >= 1000000/HZ) {
1004 /*
1005 * Try to save on CPU power when waiting delay
1006 * is at least a system timer tick period.
1007 * No need to be extremely accurate here.
1008 */
1009 xip_cpu_idle();
1010 }
1011 status = map_read(map, adr);
c172471b 1012 done = xip_elapsed_since(start);
1da177e4 1013 } while (!map_word_andequal(map, status, OK, OK)
c172471b 1014 && done < usec);
1da177e4 1015
c172471b
NP
1016 return (done >= usec) ? -ETIME : 0;
1017}
1da177e4
LT
1018
1019/*
1020 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1021 * the flash is actively programming or erasing since we have to poll for
1022 * the operation to complete anyway. We can't do that in a generic way with
6da70124 1023 * a XIP setup so do it before the actual flash operation in this case
c172471b 1024 * and stub it out from INVAL_CACHE_AND_WAIT.
1da177e4 1025 */
6da70124
NP
1026#define XIP_INVAL_CACHED_RANGE(map, from, size) \
1027 INVALIDATE_CACHED_RANGE(map, from, size)
1028
c172471b
NP
1029#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, p_usec) \
1030 xip_wait_for_operation(map, chip, cmd_adr, p_usec)
1da177e4
LT
1031
1032#else
1033
1034#define xip_disable(map, chip, adr)
1035#define xip_enable(map, chip, adr)
1da177e4 1036#define XIP_INVAL_CACHED_RANGE(x...)
c172471b
NP
1037#define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1038
1039static int inval_cache_and_wait_for_operation(
1040 struct map_info *map, struct flchip *chip,
1041 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1042 int *chip_op_time )
1043{
1044 struct cfi_private *cfi = map->fldrv_priv;
1045 map_word status, status_OK = CMD(0x80);
1046 int z, chip_state = chip->state;
1047 unsigned long timeo;
1048
1049 spin_unlock(chip->mutex);
1050 if (inval_len)
1051 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1052 if (*chip_op_time)
1053 cfi_udelay(*chip_op_time);
1054 spin_lock(chip->mutex);
1055
1056 timeo = *chip_op_time * 8 * HZ / 1000000;
1057 if (timeo < HZ/2)
1058 timeo = HZ/2;
1059 timeo += jiffies;
1060
1061 z = 0;
1062 for (;;) {
1063 if (chip->state != chip_state) {
1064 /* Someone's suspended the operation: sleep */
1065 DECLARE_WAITQUEUE(wait, current);
1066
1067 set_current_state(TASK_UNINTERRUPTIBLE);
1068 add_wait_queue(&chip->wq, &wait);
1069 spin_unlock(chip->mutex);
1070 schedule();
1071 remove_wait_queue(&chip->wq, &wait);
1072 timeo = jiffies + (HZ / 2); /* FIXME */
1073 spin_lock(chip->mutex);
1074 continue;
1075 }
1076
1077 status = map_read(map, cmd_adr);
1078 if (map_word_andequal(map, status, status_OK, status_OK))
1079 break;
1da177e4 1080
c172471b
NP
1081 /* OK Still waiting */
1082 if (time_after(jiffies, timeo)) {
1083 map_write(map, CMD(0x70), cmd_adr);
1084 chip->state = FL_STATUS;
1085 return -ETIME;
1086 }
1087
1088 /* Latency issues. Drop the lock, wait a while and retry */
1089 z++;
1090 spin_unlock(chip->mutex);
1091 cfi_udelay(1);
1092 spin_lock(chip->mutex);
1093 }
1094
1095 if (!z) {
1096 if (!--(*chip_op_time))
1097 *chip_op_time = 1;
1098 } else if (z > 1)
1099 ++(*chip_op_time);
1100
1101 /* Done and happy. */
1102 chip->state = FL_STATUS;
1103 return 0;
1104}
6da70124 1105
1da177e4
LT
1106#endif
1107
c172471b
NP
1108#define WAIT_TIMEOUT(map, chip, adr, udelay) \
1109 ({ int __udelay = (udelay); \
1110 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, &__udelay); })
1111
1112
1da177e4
LT
1113static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1114{
1115 unsigned long cmd_addr;
1116 struct cfi_private *cfi = map->fldrv_priv;
1117 int ret = 0;
1118
1119 adr += chip->start;
1120
1f948b43
TG
1121 /* Ensure cmd read/writes are aligned. */
1122 cmd_addr = adr & ~(map_bankwidth(map)-1);
1da177e4
LT
1123
1124 spin_lock(chip->mutex);
1125
1126 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1127
1128 if (!ret) {
1129 if (chip->state != FL_POINT && chip->state != FL_READY)
1130 map_write(map, CMD(0xff), cmd_addr);
1131
1132 chip->state = FL_POINT;
1133 chip->ref_point_counter++;
1134 }
1135 spin_unlock(chip->mutex);
1136
1137 return ret;
1138}
1139
1140static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1141{
1142 struct map_info *map = mtd->priv;
1143 struct cfi_private *cfi = map->fldrv_priv;
1144 unsigned long ofs;
1145 int chipnum;
1146 int ret = 0;
1147
1148 if (!map->virt || (from + len > mtd->size))
1149 return -EINVAL;
1f948b43 1150
1da177e4
LT
1151 *mtdbuf = (void *)map->virt + from;
1152 *retlen = 0;
1153
1154 /* Now lock the chip(s) to POINT state */
1155
1156 /* ofs: offset within the first chip that the first read should start */
1157 chipnum = (from >> cfi->chipshift);
1158 ofs = from - (chipnum << cfi->chipshift);
1159
1160 while (len) {
1161 unsigned long thislen;
1162
1163 if (chipnum >= cfi->numchips)
1164 break;
1165
1166 if ((len + ofs -1) >> cfi->chipshift)
1167 thislen = (1<<cfi->chipshift) - ofs;
1168 else
1169 thislen = len;
1170
1171 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1172 if (ret)
1173 break;
1174
1175 *retlen += thislen;
1176 len -= thislen;
1f948b43 1177
1da177e4
LT
1178 ofs = 0;
1179 chipnum++;
1180 }
1181 return 0;
1182}
1183
1184static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1185{
1186 struct map_info *map = mtd->priv;
1187 struct cfi_private *cfi = map->fldrv_priv;
1188 unsigned long ofs;
1189 int chipnum;
1190
1191 /* Now unlock the chip(s) POINT state */
1192
1193 /* ofs: offset within the first chip that the first read should start */
1194 chipnum = (from >> cfi->chipshift);
1195 ofs = from - (chipnum << cfi->chipshift);
1196
1197 while (len) {
1198 unsigned long thislen;
1199 struct flchip *chip;
1200
1201 chip = &cfi->chips[chipnum];
1202 if (chipnum >= cfi->numchips)
1203 break;
1204
1205 if ((len + ofs -1) >> cfi->chipshift)
1206 thislen = (1<<cfi->chipshift) - ofs;
1207 else
1208 thislen = len;
1209
1210 spin_lock(chip->mutex);
1211 if (chip->state == FL_POINT) {
1212 chip->ref_point_counter--;
1213 if(chip->ref_point_counter == 0)
1214 chip->state = FL_READY;
1215 } else
4843653c 1216 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1da177e4
LT
1217
1218 put_chip(map, chip, chip->start);
1219 spin_unlock(chip->mutex);
1220
1221 len -= thislen;
1222 ofs = 0;
1223 chipnum++;
1224 }
1225}
1226
1227static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1228{
1229 unsigned long cmd_addr;
1230 struct cfi_private *cfi = map->fldrv_priv;
1231 int ret;
1232
1233 adr += chip->start;
1234
1f948b43
TG
1235 /* Ensure cmd read/writes are aligned. */
1236 cmd_addr = adr & ~(map_bankwidth(map)-1);
1da177e4
LT
1237
1238 spin_lock(chip->mutex);
1239 ret = get_chip(map, chip, cmd_addr, FL_READY);
1240 if (ret) {
1241 spin_unlock(chip->mutex);
1242 return ret;
1243 }
1244
1245 if (chip->state != FL_POINT && chip->state != FL_READY) {
1246 map_write(map, CMD(0xff), cmd_addr);
1247
1248 chip->state = FL_READY;
1249 }
1250
1251 map_copy_from(map, buf, adr, len);
1252
1253 put_chip(map, chip, cmd_addr);
1254
1255 spin_unlock(chip->mutex);
1256 return 0;
1257}
1258
1259static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1260{
1261 struct map_info *map = mtd->priv;
1262 struct cfi_private *cfi = map->fldrv_priv;
1263 unsigned long ofs;
1264 int chipnum;
1265 int ret = 0;
1266
1267 /* ofs: offset within the first chip that the first read should start */
1268 chipnum = (from >> cfi->chipshift);
1269 ofs = from - (chipnum << cfi->chipshift);
1270
1271 *retlen = 0;
1272
1273 while (len) {
1274 unsigned long thislen;
1275
1276 if (chipnum >= cfi->numchips)
1277 break;
1278
1279 if ((len + ofs -1) >> cfi->chipshift)
1280 thislen = (1<<cfi->chipshift) - ofs;
1281 else
1282 thislen = len;
1283
1284 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1285 if (ret)
1286 break;
1287
1288 *retlen += thislen;
1289 len -= thislen;
1290 buf += thislen;
1f948b43 1291
1da177e4
LT
1292 ofs = 0;
1293 chipnum++;
1294 }
1295 return ret;
1296}
1297
1da177e4 1298static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
f77814dd 1299 unsigned long adr, map_word datum, int mode)
1da177e4
LT
1300{
1301 struct cfi_private *cfi = map->fldrv_priv;
c172471b
NP
1302 map_word status, write_cmd;
1303 int ret=0;
1da177e4
LT
1304
1305 adr += chip->start;
1306
f77814dd 1307 switch (mode) {
638d9838
NP
1308 case FL_WRITING:
1309 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1310 break;
1311 case FL_OTP_WRITE:
1312 write_cmd = CMD(0xc0);
1313 break;
1314 default:
1315 return -EINVAL;
f77814dd 1316 }
1da177e4
LT
1317
1318 spin_lock(chip->mutex);
f77814dd 1319 ret = get_chip(map, chip, adr, mode);
1da177e4
LT
1320 if (ret) {
1321 spin_unlock(chip->mutex);
1322 return ret;
1323 }
1324
1325 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1326 ENABLE_VPP(map);
1327 xip_disable(map, chip, adr);
f77814dd 1328 map_write(map, write_cmd, adr);
1da177e4 1329 map_write(map, datum, adr);
f77814dd 1330 chip->state = mode;
1da177e4 1331
c172471b
NP
1332 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1333 adr, map_bankwidth(map),
1334 &chip->word_write_time);
1335 if (ret) {
1336 xip_enable(map, chip, adr);
1337 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1338 goto out;
1da177e4 1339 }
1da177e4 1340
4843653c 1341 /* check for errors */
c172471b 1342 status = map_read(map, adr);
4843653c
NP
1343 if (map_word_bitsset(map, status, CMD(0x1a))) {
1344 unsigned long chipstatus = MERGESTATUS(status);
1345
1346 /* reset status */
1da177e4 1347 map_write(map, CMD(0x50), adr);
1da177e4 1348 map_write(map, CMD(0x70), adr);
4843653c
NP
1349 xip_enable(map, chip, adr);
1350
1351 if (chipstatus & 0x02) {
1352 ret = -EROFS;
1353 } else if (chipstatus & 0x08) {
1354 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1355 ret = -EIO;
1356 } else {
1357 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1358 ret = -EINVAL;
1359 }
1360
1361 goto out;
1da177e4
LT
1362 }
1363
1364 xip_enable(map, chip, adr);
1365 out: put_chip(map, chip, adr);
1366 spin_unlock(chip->mutex);
1da177e4
LT
1367 return ret;
1368}
1369
1370
1371static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1372{
1373 struct map_info *map = mtd->priv;
1374 struct cfi_private *cfi = map->fldrv_priv;
1375 int ret = 0;
1376 int chipnum;
1377 unsigned long ofs;
1378
1379 *retlen = 0;
1380 if (!len)
1381 return 0;
1382
1383 chipnum = to >> cfi->chipshift;
1384 ofs = to - (chipnum << cfi->chipshift);
1385
1386 /* If it's not bus-aligned, do the first byte write */
1387 if (ofs & (map_bankwidth(map)-1)) {
1388 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1389 int gap = ofs - bus_ofs;
1390 int n;
1391 map_word datum;
1392
1393 n = min_t(int, len, map_bankwidth(map)-gap);
1394 datum = map_word_ff(map);
1395 datum = map_word_load_partial(map, datum, buf, gap, n);
1396
1397 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1398 bus_ofs, datum, FL_WRITING);
1f948b43 1399 if (ret)
1da177e4
LT
1400 return ret;
1401
1402 len -= n;
1403 ofs += n;
1404 buf += n;
1405 (*retlen) += n;
1406
1407 if (ofs >> cfi->chipshift) {
1f948b43 1408 chipnum ++;
1da177e4
LT
1409 ofs = 0;
1410 if (chipnum == cfi->numchips)
1411 return 0;
1412 }
1413 }
1f948b43 1414
1da177e4
LT
1415 while(len >= map_bankwidth(map)) {
1416 map_word datum = map_word_load(map, buf);
1417
1418 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1419 ofs, datum, FL_WRITING);
1da177e4
LT
1420 if (ret)
1421 return ret;
1422
1423 ofs += map_bankwidth(map);
1424 buf += map_bankwidth(map);
1425 (*retlen) += map_bankwidth(map);
1426 len -= map_bankwidth(map);
1427
1428 if (ofs >> cfi->chipshift) {
1f948b43 1429 chipnum ++;
1da177e4
LT
1430 ofs = 0;
1431 if (chipnum == cfi->numchips)
1432 return 0;
1433 }
1434 }
1435
1436 if (len & (map_bankwidth(map)-1)) {
1437 map_word datum;
1438
1439 datum = map_word_ff(map);
1440 datum = map_word_load_partial(map, datum, buf, 0, len);
1441
1442 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1443 ofs, datum, FL_WRITING);
1f948b43 1444 if (ret)
1da177e4 1445 return ret;
1f948b43 1446
1da177e4
LT
1447 (*retlen) += len;
1448 }
1449
1450 return 0;
1451}
1452
1453
1f948b43 1454static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
e102d54a
NP
1455 unsigned long adr, const struct kvec **pvec,
1456 unsigned long *pvec_seek, int len)
1da177e4
LT
1457{
1458 struct cfi_private *cfi = map->fldrv_priv;
c172471b
NP
1459 map_word status, write_cmd, datum;
1460 unsigned long cmd_adr;
1461 int ret, wbufsize, word_gap, words;
e102d54a
NP
1462 const struct kvec *vec;
1463 unsigned long vec_seek;
1da177e4
LT
1464
1465 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1466 adr += chip->start;
1467 cmd_adr = adr & ~(wbufsize-1);
638d9838 1468
1da177e4 1469 /* Let's determine this according to the interleave only once */
638d9838 1470 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1da177e4
LT
1471
1472 spin_lock(chip->mutex);
1473 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1474 if (ret) {
1475 spin_unlock(chip->mutex);
1476 return ret;
1477 }
1478
1479 XIP_INVAL_CACHED_RANGE(map, adr, len);
1480 ENABLE_VPP(map);
1481 xip_disable(map, chip, cmd_adr);
1482
151e7659 1483 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1f948b43 1484 [...], the device will not accept any more Write to Buffer commands".
1da177e4
LT
1485 So we must check here and reset those bits if they're set. Otherwise
1486 we're just pissing in the wind */
6e7a6809 1487 if (chip->state != FL_STATUS) {
1da177e4 1488 map_write(map, CMD(0x70), cmd_adr);
6e7a6809
NP
1489 chip->state = FL_STATUS;
1490 }
1da177e4
LT
1491 status = map_read(map, cmd_adr);
1492 if (map_word_bitsset(map, status, CMD(0x30))) {
1493 xip_enable(map, chip, cmd_adr);
1494 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1495 xip_disable(map, chip, cmd_adr);
1496 map_write(map, CMD(0x50), cmd_adr);
1497 map_write(map, CMD(0x70), cmd_adr);
1498 }
1499
1500 chip->state = FL_WRITING_TO_BUFFER;
c172471b
NP
1501 map_write(map, write_cmd, cmd_adr);
1502 ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1503 if (ret) {
1504 /* Argh. Not ready for write to buffer */
1505 map_word Xstatus = map_read(map, cmd_adr);
1506 map_write(map, CMD(0x70), cmd_adr);
1507 chip->state = FL_STATUS;
1da177e4 1508 status = map_read(map, cmd_adr);
c172471b
NP
1509 map_write(map, CMD(0x50), cmd_adr);
1510 map_write(map, CMD(0x70), cmd_adr);
1511 xip_enable(map, chip, cmd_adr);
1512 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1513 map->name, Xstatus.x[0], status.x[0]);
1514 goto out;
1da177e4
LT
1515 }
1516
e102d54a
NP
1517 /* Figure out the number of words to write */
1518 word_gap = (-adr & (map_bankwidth(map)-1));
1519 words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1520 if (!word_gap) {
1521 words--;
1522 } else {
1523 word_gap = map_bankwidth(map) - word_gap;
1524 adr -= word_gap;
1525 datum = map_word_ff(map);
1526 }
1527
1da177e4 1528 /* Write length of data to come */
e102d54a 1529 map_write(map, CMD(words), cmd_adr );
1da177e4
LT
1530
1531 /* Write data */
e102d54a
NP
1532 vec = *pvec;
1533 vec_seek = *pvec_seek;
1534 do {
1535 int n = map_bankwidth(map) - word_gap;
1536 if (n > vec->iov_len - vec_seek)
1537 n = vec->iov_len - vec_seek;
1538 if (n > len)
1539 n = len;
1da177e4 1540
e102d54a
NP
1541 if (!word_gap && len < map_bankwidth(map))
1542 datum = map_word_ff(map);
1da177e4 1543
e102d54a 1544 datum = map_word_load_partial(map, datum,
1f948b43 1545 vec->iov_base + vec_seek,
e102d54a 1546 word_gap, n);
1da177e4 1547
e102d54a
NP
1548 len -= n;
1549 word_gap += n;
1550 if (!len || word_gap == map_bankwidth(map)) {
1551 map_write(map, datum, adr);
1552 adr += map_bankwidth(map);
1553 word_gap = 0;
1554 }
1da177e4 1555
e102d54a
NP
1556 vec_seek += n;
1557 if (vec_seek == vec->iov_len) {
1558 vec++;
1559 vec_seek = 0;
1560 }
1561 } while (len);
1562 *pvec = vec;
1563 *pvec_seek = vec_seek;
1da177e4
LT
1564
1565 /* GO GO GO */
1566 map_write(map, CMD(0xd0), cmd_adr);
1567 chip->state = FL_WRITING;
1568
c172471b
NP
1569 ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1570 adr, len,
1571 &chip->buffer_write_time);
1572 if (ret) {
1573 map_write(map, CMD(0x70), cmd_adr);
1574 chip->state = FL_STATUS;
1575 xip_enable(map, chip, cmd_adr);
1576 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1577 goto out;
1da177e4 1578 }
1da177e4 1579
4843653c 1580 /* check for errors */
c172471b 1581 status = map_read(map, cmd_adr);
4843653c
NP
1582 if (map_word_bitsset(map, status, CMD(0x1a))) {
1583 unsigned long chipstatus = MERGESTATUS(status);
1584
1585 /* reset status */
1da177e4 1586 map_write(map, CMD(0x50), cmd_adr);
4843653c
NP
1587 map_write(map, CMD(0x70), cmd_adr);
1588 xip_enable(map, chip, cmd_adr);
1589
1590 if (chipstatus & 0x02) {
1591 ret = -EROFS;
1592 } else if (chipstatus & 0x08) {
1593 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1594 ret = -EIO;
1595 } else {
1596 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1597 ret = -EINVAL;
1598 }
1599
1600 goto out;
1da177e4
LT
1601 }
1602
1603 xip_enable(map, chip, cmd_adr);
1604 out: put_chip(map, chip, cmd_adr);
1605 spin_unlock(chip->mutex);
1606 return ret;
1607}
1608
e102d54a
NP
1609static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1610 unsigned long count, loff_t to, size_t *retlen)
1da177e4
LT
1611{
1612 struct map_info *map = mtd->priv;
1613 struct cfi_private *cfi = map->fldrv_priv;
1614 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1615 int ret = 0;
1616 int chipnum;
e102d54a
NP
1617 unsigned long ofs, vec_seek, i;
1618 size_t len = 0;
1619
1620 for (i = 0; i < count; i++)
1621 len += vecs[i].iov_len;
1da177e4
LT
1622
1623 *retlen = 0;
1624 if (!len)
1625 return 0;
1626
1627 chipnum = to >> cfi->chipshift;
e102d54a
NP
1628 ofs = to - (chipnum << cfi->chipshift);
1629 vec_seek = 0;
1da177e4 1630
e102d54a 1631 do {
1da177e4
LT
1632 /* We must not cross write block boundaries */
1633 int size = wbufsize - (ofs & (wbufsize-1));
1634
1635 if (size > len)
1636 size = len;
1f948b43 1637 ret = do_write_buffer(map, &cfi->chips[chipnum],
e102d54a 1638 ofs, &vecs, &vec_seek, size);
1da177e4
LT
1639 if (ret)
1640 return ret;
1641
1642 ofs += size;
1da177e4
LT
1643 (*retlen) += size;
1644 len -= size;
1645
1646 if (ofs >> cfi->chipshift) {
1f948b43 1647 chipnum ++;
1da177e4
LT
1648 ofs = 0;
1649 if (chipnum == cfi->numchips)
1650 return 0;
1651 }
df54b52c
JB
1652
1653 /* Be nice and reschedule with the chip in a usable state for other
1654 processes. */
1655 cond_resched();
1656
e102d54a
NP
1657 } while (len);
1658
1da177e4
LT
1659 return 0;
1660}
1661
e102d54a
NP
1662static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1663 size_t len, size_t *retlen, const u_char *buf)
1664{
1665 struct kvec vec;
1666
1667 vec.iov_base = (void *) buf;
1668 vec.iov_len = len;
1669
1670 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1671}
1672
1da177e4
LT
1673static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1674 unsigned long adr, int len, void *thunk)
1675{
1676 struct cfi_private *cfi = map->fldrv_priv;
c172471b 1677 map_word status;
1da177e4 1678 int retries = 3;
c172471b 1679 int ret;
1da177e4
LT
1680
1681 adr += chip->start;
1682
1da177e4
LT
1683 retry:
1684 spin_lock(chip->mutex);
1685 ret = get_chip(map, chip, adr, FL_ERASING);
1686 if (ret) {
1687 spin_unlock(chip->mutex);
1688 return ret;
1689 }
1690
1691 XIP_INVAL_CACHED_RANGE(map, adr, len);
1692 ENABLE_VPP(map);
1693 xip_disable(map, chip, adr);
1694
1695 /* Clear the status register first */
1696 map_write(map, CMD(0x50), adr);
1697
1698 /* Now erase */
1699 map_write(map, CMD(0x20), adr);
1700 map_write(map, CMD(0xD0), adr);
1701 chip->state = FL_ERASING;
1702 chip->erase_suspended = 0;
1703
c172471b
NP
1704 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1705 adr, len,
1706 &chip->erase_time);
1707 if (ret) {
1708 map_write(map, CMD(0x70), adr);
1709 chip->state = FL_STATUS;
1710 xip_enable(map, chip, adr);
1711 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1712 goto out;
1da177e4
LT
1713 }
1714
1715 /* We've broken this before. It doesn't hurt to be safe */
1716 map_write(map, CMD(0x70), adr);
1717 chip->state = FL_STATUS;
1718 status = map_read(map, adr);
1719
4843653c 1720 /* check for errors */
1da177e4 1721 if (map_word_bitsset(map, status, CMD(0x3a))) {
4843653c 1722 unsigned long chipstatus = MERGESTATUS(status);
1da177e4
LT
1723
1724 /* Reset the error bits */
1725 map_write(map, CMD(0x50), adr);
1726 map_write(map, CMD(0x70), adr);
1727 xip_enable(map, chip, adr);
1728
1da177e4 1729 if ((chipstatus & 0x30) == 0x30) {
4843653c
NP
1730 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1731 ret = -EINVAL;
1da177e4
LT
1732 } else if (chipstatus & 0x02) {
1733 /* Protection bit set */
1734 ret = -EROFS;
1735 } else if (chipstatus & 0x8) {
1736 /* Voltage */
4843653c 1737 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1da177e4 1738 ret = -EIO;
4843653c
NP
1739 } else if (chipstatus & 0x20 && retries--) {
1740 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
4843653c
NP
1741 put_chip(map, chip, adr);
1742 spin_unlock(chip->mutex);
1743 goto retry;
1744 } else {
1745 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1da177e4
LT
1746 ret = -EIO;
1747 }
4843653c
NP
1748
1749 goto out;
1da177e4
LT
1750 }
1751
4843653c 1752 xip_enable(map, chip, adr);
1da177e4
LT
1753 out: put_chip(map, chip, adr);
1754 spin_unlock(chip->mutex);
1755 return ret;
1756}
1757
1758int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1759{
1760 unsigned long ofs, len;
1761 int ret;
1762
1763 ofs = instr->addr;
1764 len = instr->len;
1765
1766 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1767 if (ret)
1768 return ret;
1769
1770 instr->state = MTD_ERASE_DONE;
1771 mtd_erase_callback(instr);
1f948b43 1772
1da177e4
LT
1773 return 0;
1774}
1775
1776static void cfi_intelext_sync (struct mtd_info *mtd)
1777{
1778 struct map_info *map = mtd->priv;
1779 struct cfi_private *cfi = map->fldrv_priv;
1780 int i;
1781 struct flchip *chip;
1782 int ret = 0;
1783
1784 for (i=0; !ret && i<cfi->numchips; i++) {
1785 chip = &cfi->chips[i];
1786
1787 spin_lock(chip->mutex);
1788 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1789
1790 if (!ret) {
1791 chip->oldstate = chip->state;
1792 chip->state = FL_SYNCING;
1f948b43 1793 /* No need to wake_up() on this state change -
1da177e4
LT
1794 * as the whole point is that nobody can do anything
1795 * with the chip now anyway.
1796 */
1797 }
1798 spin_unlock(chip->mutex);
1799 }
1800
1801 /* Unlock the chips again */
1802
1803 for (i--; i >=0; i--) {
1804 chip = &cfi->chips[i];
1805
1806 spin_lock(chip->mutex);
1f948b43 1807
1da177e4
LT
1808 if (chip->state == FL_SYNCING) {
1809 chip->state = chip->oldstate;
09c79335 1810 chip->oldstate = FL_READY;
1da177e4
LT
1811 wake_up(&chip->wq);
1812 }
1813 spin_unlock(chip->mutex);
1814 }
1815}
1816
1817#ifdef DEBUG_LOCK_BITS
1818static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1819 struct flchip *chip,
1820 unsigned long adr,
1821 int len, void *thunk)
1822{
1823 struct cfi_private *cfi = map->fldrv_priv;
1824 int status, ofs_factor = cfi->interleave * cfi->device_type;
1825
c25bb1f5 1826 adr += chip->start;
1da177e4 1827 xip_disable(map, chip, adr+(2*ofs_factor));
c25bb1f5 1828 map_write(map, CMD(0x90), adr+(2*ofs_factor));
1da177e4
LT
1829 chip->state = FL_JEDEC_QUERY;
1830 status = cfi_read_query(map, adr+(2*ofs_factor));
1831 xip_enable(map, chip, 0);
1832 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1833 adr, status);
1834 return 0;
1835}
1836#endif
1837
1838#define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1839#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1840
1841static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1842 unsigned long adr, int len, void *thunk)
1843{
1844 struct cfi_private *cfi = map->fldrv_priv;
9a6e73ec 1845 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
c172471b 1846 int udelay;
1da177e4
LT
1847 int ret;
1848
1849 adr += chip->start;
1850
1da177e4
LT
1851 spin_lock(chip->mutex);
1852 ret = get_chip(map, chip, adr, FL_LOCKING);
1853 if (ret) {
1854 spin_unlock(chip->mutex);
1855 return ret;
1856 }
1857
1858 ENABLE_VPP(map);
1859 xip_disable(map, chip, adr);
1f948b43 1860
1da177e4
LT
1861 map_write(map, CMD(0x60), adr);
1862 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1863 map_write(map, CMD(0x01), adr);
1864 chip->state = FL_LOCKING;
1865 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1866 map_write(map, CMD(0xD0), adr);
1867 chip->state = FL_UNLOCKING;
1868 } else
1869 BUG();
1870
9a6e73ec
TP
1871 /*
1872 * If Instant Individual Block Locking supported then no need
1873 * to delay.
1874 */
c172471b 1875 udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
9a6e73ec 1876
c172471b
NP
1877 ret = WAIT_TIMEOUT(map, chip, adr, udelay);
1878 if (ret) {
1879 map_write(map, CMD(0x70), adr);
1880 chip->state = FL_STATUS;
1881 xip_enable(map, chip, adr);
1882 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1883 goto out;
1da177e4 1884 }
1f948b43 1885
1da177e4 1886 xip_enable(map, chip, adr);
c172471b 1887out: put_chip(map, chip, adr);
1da177e4 1888 spin_unlock(chip->mutex);
c172471b 1889 return ret;
1da177e4
LT
1890}
1891
1892static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1893{
1894 int ret;
1895
1896#ifdef DEBUG_LOCK_BITS
1897 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1898 __FUNCTION__, ofs, len);
1899 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1900 ofs, len, 0);
1901#endif
1902
1f948b43 1903 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1da177e4 1904 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1f948b43 1905
1da177e4
LT
1906#ifdef DEBUG_LOCK_BITS
1907 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1908 __FUNCTION__, ret);
1909 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1910 ofs, len, 0);
1911#endif
1912
1913 return ret;
1914}
1915
1916static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1917{
1918 int ret;
1919
1920#ifdef DEBUG_LOCK_BITS
1921 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1922 __FUNCTION__, ofs, len);
1923 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1924 ofs, len, 0);
1925#endif
1926
1927 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1928 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1f948b43 1929
1da177e4
LT
1930#ifdef DEBUG_LOCK_BITS
1931 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1932 __FUNCTION__, ret);
1f948b43 1933 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1da177e4
LT
1934 ofs, len, 0);
1935#endif
1f948b43 1936
1da177e4
LT
1937 return ret;
1938}
1939
f77814dd
NP
1940#ifdef CONFIG_MTD_OTP
1941
1f948b43 1942typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
f77814dd
NP
1943 u_long data_offset, u_char *buf, u_int size,
1944 u_long prot_offset, u_int groupno, u_int groupsize);
1945
1946static int __xipram
1947do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1948 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1949{
1950 struct cfi_private *cfi = map->fldrv_priv;
1951 int ret;
1952
1953 spin_lock(chip->mutex);
1954 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1955 if (ret) {
1956 spin_unlock(chip->mutex);
1957 return ret;
1958 }
1959
1960 /* let's ensure we're not reading back cached data from array mode */
6da70124 1961 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
f77814dd
NP
1962
1963 xip_disable(map, chip, chip->start);
1964 if (chip->state != FL_JEDEC_QUERY) {
1965 map_write(map, CMD(0x90), chip->start);
1966 chip->state = FL_JEDEC_QUERY;
1967 }
1968 map_copy_from(map, buf, chip->start + offset, size);
1969 xip_enable(map, chip, chip->start);
1970
1971 /* then ensure we don't keep OTP data in the cache */
6da70124 1972 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
f77814dd
NP
1973
1974 put_chip(map, chip, chip->start);
1975 spin_unlock(chip->mutex);
1976 return 0;
1977}
1978
1979static int
1980do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
1981 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1982{
1983 int ret;
1984
1985 while (size) {
1986 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
1987 int gap = offset - bus_ofs;
1988 int n = min_t(int, size, map_bankwidth(map)-gap);
1989 map_word datum = map_word_ff(map);
1990
1991 datum = map_word_load_partial(map, datum, buf, gap, n);
1992 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
1f948b43 1993 if (ret)
f77814dd
NP
1994 return ret;
1995
1996 offset += n;
1997 buf += n;
1998 size -= n;
1999 }
2000
2001 return 0;
2002}
2003
2004static int
2005do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2006 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2007{
2008 struct cfi_private *cfi = map->fldrv_priv;
2009 map_word datum;
2010
2011 /* make sure area matches group boundaries */
332d71f7 2012 if (size != grpsz)
f77814dd
NP
2013 return -EXDEV;
2014
2015 datum = map_word_ff(map);
2016 datum = map_word_clr(map, datum, CMD(1 << grpno));
2017 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2018}
2019
2020static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2021 size_t *retlen, u_char *buf,
2022 otp_op_t action, int user_regs)
2023{
2024 struct map_info *map = mtd->priv;
2025 struct cfi_private *cfi = map->fldrv_priv;
2026 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2027 struct flchip *chip;
2028 struct cfi_intelext_otpinfo *otp;
2029 u_long devsize, reg_prot_offset, data_offset;
2030 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2031 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2032 int ret;
2033
2034 *retlen = 0;
2035
2036 /* Check that we actually have some OTP registers */
2037 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2038 return -ENODATA;
2039
2040 /* we need real chips here not virtual ones */
2041 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2042 chip_step = devsize >> cfi->chipshift;
dce2b4da
NP
2043 chip_num = 0;
2044
2045 /* Some chips have OTP located in the _top_ partition only.
2046 For example: Intel 28F256L18T (T means top-parameter device) */
2047 if (cfi->mfr == MANUFACTURER_INTEL) {
2048 switch (cfi->id) {
2049 case 0x880b:
2050 case 0x880c:
2051 case 0x880d:
2052 chip_num = chip_step - 1;
2053 }
2054 }
f77814dd 2055
dce2b4da 2056 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
f77814dd
NP
2057 chip = &cfi->chips[chip_num];
2058 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2059
2060 /* first OTP region */
2061 field = 0;
2062 reg_prot_offset = extp->ProtRegAddr;
2063 reg_fact_groups = 1;
2064 reg_fact_size = 1 << extp->FactProtRegSize;
2065 reg_user_groups = 1;
2066 reg_user_size = 1 << extp->UserProtRegSize;
2067
2068 while (len > 0) {
2069 /* flash geometry fixup */
2070 data_offset = reg_prot_offset + 1;
2071 data_offset *= cfi->interleave * cfi->device_type;
2072 reg_prot_offset *= cfi->interleave * cfi->device_type;
2073 reg_fact_size *= cfi->interleave;
2074 reg_user_size *= cfi->interleave;
2075
2076 if (user_regs) {
2077 groups = reg_user_groups;
2078 groupsize = reg_user_size;
2079 /* skip over factory reg area */
2080 groupno = reg_fact_groups;
2081 data_offset += reg_fact_groups * reg_fact_size;
2082 } else {
2083 groups = reg_fact_groups;
2084 groupsize = reg_fact_size;
2085 groupno = 0;
2086 }
2087
332d71f7 2088 while (len > 0 && groups > 0) {
f77814dd
NP
2089 if (!action) {
2090 /*
2091 * Special case: if action is NULL
2092 * we fill buf with otp_info records.
2093 */
2094 struct otp_info *otpinfo;
2095 map_word lockword;
2096 len -= sizeof(struct otp_info);
2097 if (len <= 0)
2098 return -ENOSPC;
2099 ret = do_otp_read(map, chip,
2100 reg_prot_offset,
2101 (u_char *)&lockword,
2102 map_bankwidth(map),
2103 0, 0, 0);
2104 if (ret)
2105 return ret;
2106 otpinfo = (struct otp_info *)buf;
2107 otpinfo->start = from;
2108 otpinfo->length = groupsize;
2109 otpinfo->locked =
2110 !map_word_bitsset(map, lockword,
2111 CMD(1 << groupno));
2112 from += groupsize;
2113 buf += sizeof(*otpinfo);
2114 *retlen += sizeof(*otpinfo);
2115 } else if (from >= groupsize) {
2116 from -= groupsize;
332d71f7 2117 data_offset += groupsize;
f77814dd
NP
2118 } else {
2119 int size = groupsize;
2120 data_offset += from;
2121 size -= from;
2122 from = 0;
2123 if (size > len)
2124 size = len;
2125 ret = action(map, chip, data_offset,
2126 buf, size, reg_prot_offset,
2127 groupno, groupsize);
2128 if (ret < 0)
2129 return ret;
2130 buf += size;
2131 len -= size;
2132 *retlen += size;
332d71f7 2133 data_offset += size;
f77814dd
NP
2134 }
2135 groupno++;
2136 groups--;
2137 }
2138
2139 /* next OTP region */
2140 if (++field == extp->NumProtectionFields)
2141 break;
2142 reg_prot_offset = otp->ProtRegAddr;
2143 reg_fact_groups = otp->FactGroups;
2144 reg_fact_size = 1 << otp->FactProtRegSize;
2145 reg_user_groups = otp->UserGroups;
2146 reg_user_size = 1 << otp->UserProtRegSize;
2147 otp++;
2148 }
2149 }
2150
2151 return 0;
2152}
2153
2154static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2155 size_t len, size_t *retlen,
2156 u_char *buf)
2157{
2158 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2159 buf, do_otp_read, 0);
2160}
2161
2162static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2163 size_t len, size_t *retlen,
2164 u_char *buf)
2165{
2166 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2167 buf, do_otp_read, 1);
2168}
2169
2170static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2171 size_t len, size_t *retlen,
2172 u_char *buf)
2173{
2174 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2175 buf, do_otp_write, 1);
2176}
2177
2178static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2179 loff_t from, size_t len)
2180{
2181 size_t retlen;
2182 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2183 NULL, do_otp_lock, 1);
2184}
2185
1f948b43 2186static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
f77814dd
NP
2187 struct otp_info *buf, size_t len)
2188{
2189 size_t retlen;
2190 int ret;
2191
2192 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2193 return ret ? : retlen;
2194}
2195
2196static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2197 struct otp_info *buf, size_t len)
2198{
2199 size_t retlen;
2200 int ret;
2201
2202 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2203 return ret ? : retlen;
2204}
2205
2206#endif
2207
1da177e4
LT
2208static int cfi_intelext_suspend(struct mtd_info *mtd)
2209{
2210 struct map_info *map = mtd->priv;
2211 struct cfi_private *cfi = map->fldrv_priv;
2212 int i;
2213 struct flchip *chip;
2214 int ret = 0;
2215
2216 for (i=0; !ret && i<cfi->numchips; i++) {
2217 chip = &cfi->chips[i];
2218
2219 spin_lock(chip->mutex);
2220
2221 switch (chip->state) {
2222 case FL_READY:
2223 case FL_STATUS:
2224 case FL_CFI_QUERY:
2225 case FL_JEDEC_QUERY:
2226 if (chip->oldstate == FL_READY) {
2227 chip->oldstate = chip->state;
2228 chip->state = FL_PM_SUSPENDED;
1f948b43 2229 /* No need to wake_up() on this state change -
1da177e4
LT
2230 * as the whole point is that nobody can do anything
2231 * with the chip now anyway.
2232 */
2233 } else {
2234 /* There seems to be an operation pending. We must wait for it. */
2235 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2236 ret = -EAGAIN;
2237 }
2238 break;
2239 default:
2240 /* Should we actually wait? Once upon a time these routines weren't
2241 allowed to. Or should we return -EAGAIN, because the upper layers
2242 ought to have already shut down anything which was using the device
2243 anyway? The latter for now. */
2244 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2245 ret = -EAGAIN;
2246 case FL_PM_SUSPENDED:
2247 break;
2248 }
2249 spin_unlock(chip->mutex);
2250 }
2251
2252 /* Unlock the chips again */
2253
2254 if (ret) {
2255 for (i--; i >=0; i--) {
2256 chip = &cfi->chips[i];
1f948b43 2257
1da177e4 2258 spin_lock(chip->mutex);
1f948b43 2259
1da177e4
LT
2260 if (chip->state == FL_PM_SUSPENDED) {
2261 /* No need to force it into a known state here,
2262 because we're returning failure, and it didn't
2263 get power cycled */
2264 chip->state = chip->oldstate;
2265 chip->oldstate = FL_READY;
2266 wake_up(&chip->wq);
2267 }
2268 spin_unlock(chip->mutex);
2269 }
1f948b43
TG
2270 }
2271
1da177e4
LT
2272 return ret;
2273}
2274
2275static void cfi_intelext_resume(struct mtd_info *mtd)
2276{
2277 struct map_info *map = mtd->priv;
2278 struct cfi_private *cfi = map->fldrv_priv;
2279 int i;
2280 struct flchip *chip;
2281
2282 for (i=0; i<cfi->numchips; i++) {
1f948b43 2283
1da177e4
LT
2284 chip = &cfi->chips[i];
2285
2286 spin_lock(chip->mutex);
1f948b43 2287
1da177e4
LT
2288 /* Go to known state. Chip may have been power cycled */
2289 if (chip->state == FL_PM_SUSPENDED) {
2290 map_write(map, CMD(0xFF), cfi->chips[i].start);
2291 chip->oldstate = chip->state = FL_READY;
2292 wake_up(&chip->wq);
2293 }
2294
2295 spin_unlock(chip->mutex);
2296 }
2297}
2298
963a6fb0
NP
2299static int cfi_intelext_reset(struct mtd_info *mtd)
2300{
2301 struct map_info *map = mtd->priv;
2302 struct cfi_private *cfi = map->fldrv_priv;
2303 int i, ret;
2304
2305 for (i=0; i < cfi->numchips; i++) {
2306 struct flchip *chip = &cfi->chips[i];
2307
2308 /* force the completion of any ongoing operation
1f948b43 2309 and switch to array mode so any bootloader in
963a6fb0
NP
2310 flash is accessible for soft reboot. */
2311 spin_lock(chip->mutex);
2312 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2313 if (!ret) {
2314 map_write(map, CMD(0xff), chip->start);
2315 chip->state = FL_READY;
2316 }
2317 spin_unlock(chip->mutex);
2318 }
2319
2320 return 0;
2321}
2322
2323static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2324 void *v)
2325{
2326 struct mtd_info *mtd;
2327
2328 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2329 cfi_intelext_reset(mtd);
2330 return NOTIFY_DONE;
2331}
2332
1da177e4
LT
2333static void cfi_intelext_destroy(struct mtd_info *mtd)
2334{
2335 struct map_info *map = mtd->priv;
2336 struct cfi_private *cfi = map->fldrv_priv;
963a6fb0
NP
2337 cfi_intelext_reset(mtd);
2338 unregister_reboot_notifier(&mtd->reboot_notifier);
1da177e4
LT
2339 kfree(cfi->cmdset_priv);
2340 kfree(cfi->cfiq);
2341 kfree(cfi->chips[0].priv);
2342 kfree(cfi);
2343 kfree(mtd->eraseregions);
2344}
2345
1da177e4
LT
2346MODULE_LICENSE("GPL");
2347MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2348MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
a15bdeef
DW
2349MODULE_ALIAS("cfi_cmdset_0003");
2350MODULE_ALIAS("cfi_cmdset_0200");
This page took 0.305824 seconds and 5 git commands to generate.