2 * NAND Flash Controller Device Driver
3 * Copyright © 2009-2010, Intel Corporation and its suppliers.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 #include <linux/interrupt.h>
21 #include <linux/delay.h>
22 #include <linux/wait.h>
23 #include <linux/mutex.h>
24 #include <linux/pci.h>
25 #include <linux/mtd/mtd.h>
26 #include <linux/module.h>
30 MODULE_LICENSE("GPL");
32 /* We define a module parameter that allows the user to override
33 * the hardware and decide what timing mode should be used.
35 #define NAND_DEFAULT_TIMINGS -1
37 static int onfi_timing_mode
= NAND_DEFAULT_TIMINGS
;
38 module_param(onfi_timing_mode
, int, S_IRUGO
);
39 MODULE_PARM_DESC(onfi_timing_mode
, "Overrides default ONFI setting. -1 indicates"
40 " use default timings");
42 #define DENALI_NAND_NAME "denali-nand"
44 /* We define a macro here that combines all interrupts this driver uses into
45 * a single constant value, for convenience. */
46 #define DENALI_IRQ_ALL (INTR_STATUS0__DMA_CMD_COMP | \
47 INTR_STATUS0__ECC_TRANSACTION_DONE | \
48 INTR_STATUS0__ECC_ERR | \
49 INTR_STATUS0__PROGRAM_FAIL | \
50 INTR_STATUS0__LOAD_COMP | \
51 INTR_STATUS0__PROGRAM_COMP | \
52 INTR_STATUS0__TIME_OUT | \
53 INTR_STATUS0__ERASE_FAIL | \
54 INTR_STATUS0__RST_COMP | \
55 INTR_STATUS0__ERASE_COMP)
57 /* indicates whether or not the internal value for the flash bank is
59 #define CHIP_SELECT_INVALID -1
61 #define SUPPORT_8BITECC 1
63 /* This macro divides two integers and rounds fractional values up
64 * to the nearest integer value. */
65 #define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
67 /* this macro allows us to convert from an MTD structure to our own
68 * device context (denali) structure.
70 #define mtd_to_denali(m) container_of(m, struct denali_nand_info, mtd)
72 /* These constants are defined by the driver to enable common driver
73 configuration options. */
74 #define SPARE_ACCESS 0x41
75 #define MAIN_ACCESS 0x42
76 #define MAIN_SPARE_ACCESS 0x43
79 #define DENALI_WRITE 0x100
81 /* types of device accesses. We can issue commands and get status */
82 #define COMMAND_CYCLE 0
84 #define STATUS_CYCLE 2
86 /* this is a helper macro that allows us to
87 * format the bank into the proper bits for the controller */
88 #define BANK(x) ((x) << 24)
90 /* List of platforms this NAND controller has be integrated into */
91 static const struct pci_device_id denali_pci_ids
[] = {
92 { PCI_VDEVICE(INTEL
, 0x0701), INTEL_CE4100
},
93 { PCI_VDEVICE(INTEL
, 0x0809), INTEL_MRST
},
94 { /* end: all zeroes */ }
98 /* these are static lookup tables that give us easy access to
99 registers in the NAND controller.
101 static const uint32_t intr_status_addresses
[4] = {INTR_STATUS0
,
106 static const uint32_t device_reset_banks
[4] = {DEVICE_RESET__BANK0
,
109 DEVICE_RESET__BANK3
};
111 static const uint32_t operation_timeout
[4] = {INTR_STATUS0__TIME_OUT
,
112 INTR_STATUS1__TIME_OUT
,
113 INTR_STATUS2__TIME_OUT
,
114 INTR_STATUS3__TIME_OUT
};
116 static const uint32_t reset_complete
[4] = {INTR_STATUS0__RST_COMP
,
117 INTR_STATUS1__RST_COMP
,
118 INTR_STATUS2__RST_COMP
,
119 INTR_STATUS3__RST_COMP
};
121 /* specifies the debug level of the driver */
122 static int nand_debug_level
= 0;
124 /* forward declarations */
125 static void clear_interrupts(struct denali_nand_info
*denali
);
126 static uint32_t wait_for_irq(struct denali_nand_info
*denali
, uint32_t irq_mask
);
127 static void denali_irq_enable(struct denali_nand_info
*denali
, uint32_t int_mask
);
128 static uint32_t read_interrupt_status(struct denali_nand_info
*denali
);
130 #define DEBUG_DENALI 0
132 /* This is a wrapper for writing to the denali registers.
133 * this allows us to create debug information so we can
134 * observe how the driver is programming the device.
135 * it uses standard linux convention for (val, addr) */
136 static void denali_write32(uint32_t value
, void *addr
)
138 iowrite32(value
, addr
);
141 printk(KERN_ERR
"wrote: 0x%x -> 0x%x\n", value
, (uint32_t)((uint32_t)addr
& 0x1fff));
145 /* Certain operations for the denali NAND controller use an indexed mode to read/write
146 data. The operation is performed by writing the address value of the command to
147 the device memory followed by the data. This function abstracts this common
150 static void index_addr(struct denali_nand_info
*denali
, uint32_t address
, uint32_t data
)
152 denali_write32(address
, denali
->flash_mem
);
153 denali_write32(data
, denali
->flash_mem
+ 0x10);
156 /* Perform an indexed read of the device */
157 static void index_addr_read_data(struct denali_nand_info
*denali
,
158 uint32_t address
, uint32_t *pdata
)
160 denali_write32(address
, denali
->flash_mem
);
161 *pdata
= ioread32(denali
->flash_mem
+ 0x10);
164 /* We need to buffer some data for some of the NAND core routines.
165 * The operations manage buffering that data. */
166 static void reset_buf(struct denali_nand_info
*denali
)
168 denali
->buf
.head
= denali
->buf
.tail
= 0;
171 static void write_byte_to_buf(struct denali_nand_info
*denali
, uint8_t byte
)
173 BUG_ON(denali
->buf
.tail
>= sizeof(denali
->buf
.buf
));
174 denali
->buf
.buf
[denali
->buf
.tail
++] = byte
;
177 /* reads the status of the device */
178 static void read_status(struct denali_nand_info
*denali
)
182 /* initialize the data buffer to store status */
185 /* initiate a device status read */
186 cmd
= MODE_11
| BANK(denali
->flash_bank
);
187 index_addr(denali
, cmd
| COMMAND_CYCLE
, 0x70);
188 denali_write32(cmd
| STATUS_CYCLE
, denali
->flash_mem
);
190 /* update buffer with status value */
191 write_byte_to_buf(denali
, ioread32(denali
->flash_mem
+ 0x10));
194 printk("device reporting status value of 0x%2x\n", denali
->buf
.buf
[0]);
198 /* resets a specific device connected to the core */
199 static void reset_bank(struct denali_nand_info
*denali
)
201 uint32_t irq_status
= 0;
202 uint32_t irq_mask
= reset_complete
[denali
->flash_bank
] |
203 operation_timeout
[denali
->flash_bank
];
206 clear_interrupts(denali
);
208 bank
= device_reset_banks
[denali
->flash_bank
];
209 denali_write32(bank
, denali
->flash_reg
+ DEVICE_RESET
);
211 irq_status
= wait_for_irq(denali
, irq_mask
);
213 if (irq_status
& operation_timeout
[denali
->flash_bank
])
215 printk(KERN_ERR
"reset bank failed.\n");
219 /* Reset the flash controller */
220 static uint16_t NAND_Flash_Reset(struct denali_nand_info
*denali
)
224 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
225 __FILE__
, __LINE__
, __func__
);
227 for (i
= 0 ; i
< LLD_MAX_FLASH_BANKS
; i
++)
228 denali_write32(reset_complete
[i
] | operation_timeout
[i
],
229 denali
->flash_reg
+ intr_status_addresses
[i
]);
231 for (i
= 0 ; i
< LLD_MAX_FLASH_BANKS
; i
++) {
232 denali_write32(device_reset_banks
[i
], denali
->flash_reg
+ DEVICE_RESET
);
233 while (!(ioread32(denali
->flash_reg
+ intr_status_addresses
[i
]) &
234 (reset_complete
[i
] | operation_timeout
[i
])))
236 if (ioread32(denali
->flash_reg
+ intr_status_addresses
[i
]) &
237 operation_timeout
[i
])
238 nand_dbg_print(NAND_DBG_WARN
,
239 "NAND Reset operation timed out on bank %d\n", i
);
242 for (i
= 0; i
< LLD_MAX_FLASH_BANKS
; i
++)
243 denali_write32(reset_complete
[i
] | operation_timeout
[i
],
244 denali
->flash_reg
+ intr_status_addresses
[i
]);
249 /* this routine calculates the ONFI timing values for a given mode and programs
250 * the clocking register accordingly. The mode is determined by the get_onfi_nand_para
253 static void NAND_ONFi_Timing_Mode(struct denali_nand_info
*denali
, uint16_t mode
)
255 uint16_t Trea
[6] = {40, 30, 25, 20, 20, 16};
256 uint16_t Trp
[6] = {50, 25, 17, 15, 12, 10};
257 uint16_t Treh
[6] = {30, 15, 15, 10, 10, 7};
258 uint16_t Trc
[6] = {100, 50, 35, 30, 25, 20};
259 uint16_t Trhoh
[6] = {0, 15, 15, 15, 15, 15};
260 uint16_t Trloh
[6] = {0, 0, 0, 0, 5, 5};
261 uint16_t Tcea
[6] = {100, 45, 30, 25, 25, 25};
262 uint16_t Tadl
[6] = {200, 100, 100, 100, 70, 70};
263 uint16_t Trhw
[6] = {200, 100, 100, 100, 100, 100};
264 uint16_t Trhz
[6] = {200, 100, 100, 100, 100, 100};
265 uint16_t Twhr
[6] = {120, 80, 80, 60, 60, 60};
266 uint16_t Tcs
[6] = {70, 35, 25, 25, 20, 15};
268 uint16_t TclsRising
= 1;
269 uint16_t data_invalid_rhoh
, data_invalid_rloh
, data_invalid
;
270 uint16_t dv_window
= 0;
271 uint16_t en_lo
, en_hi
;
273 uint16_t addr_2_data
, re_2_we
, re_2_re
, we_2_re
, cs_cnt
;
275 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
276 __FILE__
, __LINE__
, __func__
);
278 en_lo
= CEIL_DIV(Trp
[mode
], CLK_X
);
279 en_hi
= CEIL_DIV(Treh
[mode
], CLK_X
);
281 if ((en_hi
* CLK_X
) < (Treh
[mode
] + 2))
285 if ((en_lo
+ en_hi
) * CLK_X
< Trc
[mode
])
286 en_lo
+= CEIL_DIV((Trc
[mode
] - (en_lo
+ en_hi
) * CLK_X
), CLK_X
);
288 if ((en_lo
+ en_hi
) < CLK_MULTI
)
289 en_lo
+= CLK_MULTI
- en_lo
- en_hi
;
291 while (dv_window
< 8) {
292 data_invalid_rhoh
= en_lo
* CLK_X
+ Trhoh
[mode
];
294 data_invalid_rloh
= (en_lo
+ en_hi
) * CLK_X
+ Trloh
[mode
];
298 data_invalid_rloh
? data_invalid_rhoh
: data_invalid_rloh
;
300 dv_window
= data_invalid
- Trea
[mode
];
306 acc_clks
= CEIL_DIV(Trea
[mode
], CLK_X
);
308 while (((acc_clks
* CLK_X
) - Trea
[mode
]) < 3)
311 if ((data_invalid
- acc_clks
* CLK_X
) < 2)
312 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d: Warning!\n",
315 addr_2_data
= CEIL_DIV(Tadl
[mode
], CLK_X
);
316 re_2_we
= CEIL_DIV(Trhw
[mode
], CLK_X
);
317 re_2_re
= CEIL_DIV(Trhz
[mode
], CLK_X
);
318 we_2_re
= CEIL_DIV(Twhr
[mode
], CLK_X
);
319 cs_cnt
= CEIL_DIV((Tcs
[mode
] - Trp
[mode
]), CLK_X
);
321 cs_cnt
= CEIL_DIV(Tcs
[mode
], CLK_X
);
326 while (((cs_cnt
* CLK_X
) + Trea
[mode
]) < Tcea
[mode
])
335 /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
336 if ((ioread32(denali
->flash_reg
+ MANUFACTURER_ID
) == 0) &&
337 (ioread32(denali
->flash_reg
+ DEVICE_ID
) == 0x88))
340 denali_write32(acc_clks
, denali
->flash_reg
+ ACC_CLKS
);
341 denali_write32(re_2_we
, denali
->flash_reg
+ RE_2_WE
);
342 denali_write32(re_2_re
, denali
->flash_reg
+ RE_2_RE
);
343 denali_write32(we_2_re
, denali
->flash_reg
+ WE_2_RE
);
344 denali_write32(addr_2_data
, denali
->flash_reg
+ ADDR_2_DATA
);
345 denali_write32(en_lo
, denali
->flash_reg
+ RDWR_EN_LO_CNT
);
346 denali_write32(en_hi
, denali
->flash_reg
+ RDWR_EN_HI_CNT
);
347 denali_write32(cs_cnt
, denali
->flash_reg
+ CS_SETUP_CNT
);
350 /* configures the initial ECC settings for the controller */
351 static void set_ecc_config(struct denali_nand_info
*denali
)
354 if ((ioread32(denali
->flash_reg
+ DEVICE_MAIN_AREA_SIZE
) < 4096) ||
355 (ioread32(denali
->flash_reg
+ DEVICE_SPARE_AREA_SIZE
) <= 128))
356 denali_write32(8, denali
->flash_reg
+ ECC_CORRECTION
);
359 if ((ioread32(denali
->flash_reg
+ ECC_CORRECTION
) & ECC_CORRECTION__VALUE
)
361 denali
->dev_info
.wECCBytesPerSector
= 4;
362 denali
->dev_info
.wECCBytesPerSector
*= denali
->dev_info
.wDevicesConnected
;
363 denali
->dev_info
.wNumPageSpareFlag
=
364 denali
->dev_info
.wPageSpareSize
-
365 denali
->dev_info
.wPageDataSize
/
366 (ECC_SECTOR_SIZE
* denali
->dev_info
.wDevicesConnected
) *
367 denali
->dev_info
.wECCBytesPerSector
368 - denali
->dev_info
.wSpareSkipBytes
;
370 denali
->dev_info
.wECCBytesPerSector
=
371 (ioread32(denali
->flash_reg
+ ECC_CORRECTION
) &
372 ECC_CORRECTION__VALUE
) * 13 / 8;
373 if ((denali
->dev_info
.wECCBytesPerSector
) % 2 == 0)
374 denali
->dev_info
.wECCBytesPerSector
+= 2;
376 denali
->dev_info
.wECCBytesPerSector
+= 1;
378 denali
->dev_info
.wECCBytesPerSector
*= denali
->dev_info
.wDevicesConnected
;
379 denali
->dev_info
.wNumPageSpareFlag
= denali
->dev_info
.wPageSpareSize
-
380 denali
->dev_info
.wPageDataSize
/
381 (ECC_SECTOR_SIZE
* denali
->dev_info
.wDevicesConnected
) *
382 denali
->dev_info
.wECCBytesPerSector
383 - denali
->dev_info
.wSpareSkipBytes
;
387 /* queries the NAND device to see what ONFI modes it supports. */
388 static uint16_t get_onfi_nand_para(struct denali_nand_info
*denali
)
391 uint16_t blks_lun_l
, blks_lun_h
, n_of_luns
;
392 uint32_t blockperlun
, id
;
394 denali_write32(DEVICE_RESET__BANK0
, denali
->flash_reg
+ DEVICE_RESET
);
396 while (!((ioread32(denali
->flash_reg
+ INTR_STATUS0
) &
397 INTR_STATUS0__RST_COMP
) |
398 (ioread32(denali
->flash_reg
+ INTR_STATUS0
) &
399 INTR_STATUS0__TIME_OUT
)))
402 if (ioread32(denali
->flash_reg
+ INTR_STATUS0
) & INTR_STATUS0__RST_COMP
) {
403 denali_write32(DEVICE_RESET__BANK1
, denali
->flash_reg
+ DEVICE_RESET
);
404 while (!((ioread32(denali
->flash_reg
+ INTR_STATUS1
) &
405 INTR_STATUS1__RST_COMP
) |
406 (ioread32(denali
->flash_reg
+ INTR_STATUS1
) &
407 INTR_STATUS1__TIME_OUT
)))
410 if (ioread32(denali
->flash_reg
+ INTR_STATUS1
) &
411 INTR_STATUS1__RST_COMP
) {
412 denali_write32(DEVICE_RESET__BANK2
,
413 denali
->flash_reg
+ DEVICE_RESET
);
414 while (!((ioread32(denali
->flash_reg
+ INTR_STATUS2
) &
415 INTR_STATUS2__RST_COMP
) |
416 (ioread32(denali
->flash_reg
+ INTR_STATUS2
) &
417 INTR_STATUS2__TIME_OUT
)))
420 if (ioread32(denali
->flash_reg
+ INTR_STATUS2
) &
421 INTR_STATUS2__RST_COMP
) {
422 denali_write32(DEVICE_RESET__BANK3
,
423 denali
->flash_reg
+ DEVICE_RESET
);
424 while (!((ioread32(denali
->flash_reg
+ INTR_STATUS3
) &
425 INTR_STATUS3__RST_COMP
) |
426 (ioread32(denali
->flash_reg
+ INTR_STATUS3
) &
427 INTR_STATUS3__TIME_OUT
)))
430 printk(KERN_ERR
"Getting a time out for bank 2!\n");
433 printk(KERN_ERR
"Getting a time out for bank 1!\n");
437 denali_write32(INTR_STATUS0__TIME_OUT
, denali
->flash_reg
+ INTR_STATUS0
);
438 denali_write32(INTR_STATUS1__TIME_OUT
, denali
->flash_reg
+ INTR_STATUS1
);
439 denali_write32(INTR_STATUS2__TIME_OUT
, denali
->flash_reg
+ INTR_STATUS2
);
440 denali_write32(INTR_STATUS3__TIME_OUT
, denali
->flash_reg
+ INTR_STATUS3
);
442 denali
->dev_info
.wONFIDevFeatures
=
443 ioread32(denali
->flash_reg
+ ONFI_DEVICE_FEATURES
);
444 denali
->dev_info
.wONFIOptCommands
=
445 ioread32(denali
->flash_reg
+ ONFI_OPTIONAL_COMMANDS
);
446 denali
->dev_info
.wONFITimingMode
=
447 ioread32(denali
->flash_reg
+ ONFI_TIMING_MODE
);
448 denali
->dev_info
.wONFIPgmCacheTimingMode
=
449 ioread32(denali
->flash_reg
+ ONFI_PGM_CACHE_TIMING_MODE
);
451 n_of_luns
= ioread32(denali
->flash_reg
+ ONFI_DEVICE_NO_OF_LUNS
) &
452 ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS
;
453 blks_lun_l
= ioread32(denali
->flash_reg
+ ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L
);
454 blks_lun_h
= ioread32(denali
->flash_reg
+ ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U
);
456 blockperlun
= (blks_lun_h
<< 16) | blks_lun_l
;
458 denali
->dev_info
.wTotalBlocks
= n_of_luns
* blockperlun
;
460 if (!(ioread32(denali
->flash_reg
+ ONFI_TIMING_MODE
) &
461 ONFI_TIMING_MODE__VALUE
))
464 for (i
= 5; i
> 0; i
--) {
465 if (ioread32(denali
->flash_reg
+ ONFI_TIMING_MODE
) & (0x01 << i
))
469 NAND_ONFi_Timing_Mode(denali
, i
);
471 index_addr(denali
, MODE_11
| 0, 0x90);
472 index_addr(denali
, MODE_11
| 1, 0);
474 for (i
= 0; i
< 3; i
++)
475 index_addr_read_data(denali
, MODE_11
| 2, &id
);
477 nand_dbg_print(NAND_DBG_DEBUG
, "3rd ID: 0x%x\n", id
);
479 denali
->dev_info
.MLCDevice
= id
& 0x0C;
481 /* By now, all the ONFI devices we know support the page cache */
482 /* rw feature. So here we enable the pipeline_rw_ahead feature */
483 /* iowrite32(1, denali->flash_reg + CACHE_WRITE_ENABLE); */
484 /* iowrite32(1, denali->flash_reg + CACHE_READ_ENABLE); */
489 static void get_samsung_nand_para(struct denali_nand_info
*denali
)
491 uint8_t no_of_planes
;
493 uint64_t plane_size
, capacity
;
494 uint32_t id_bytes
[5];
497 index_addr(denali
, (uint32_t)(MODE_11
| 0), 0x90);
498 index_addr(denali
, (uint32_t)(MODE_11
| 1), 0);
499 for (i
= 0; i
< 5; i
++)
500 index_addr_read_data(denali
, (uint32_t)(MODE_11
| 2), &id_bytes
[i
]);
502 nand_dbg_print(NAND_DBG_DEBUG
,
503 "ID bytes: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
504 id_bytes
[0], id_bytes
[1], id_bytes
[2],
505 id_bytes
[3], id_bytes
[4]);
507 if ((id_bytes
[1] & 0xff) == 0xd3) { /* Samsung K9WAG08U1A */
508 /* Set timing register values according to datasheet */
509 denali_write32(5, denali
->flash_reg
+ ACC_CLKS
);
510 denali_write32(20, denali
->flash_reg
+ RE_2_WE
);
511 denali_write32(12, denali
->flash_reg
+ WE_2_RE
);
512 denali_write32(14, denali
->flash_reg
+ ADDR_2_DATA
);
513 denali_write32(3, denali
->flash_reg
+ RDWR_EN_LO_CNT
);
514 denali_write32(2, denali
->flash_reg
+ RDWR_EN_HI_CNT
);
515 denali_write32(2, denali
->flash_reg
+ CS_SETUP_CNT
);
518 no_of_planes
= 1 << ((id_bytes
[4] & 0x0c) >> 2);
519 plane_size
= (uint64_t)64 << ((id_bytes
[4] & 0x70) >> 4);
520 blk_size
= 64 << ((ioread32(denali
->flash_reg
+ DEVICE_PARAM_1
) & 0x30) >> 4);
521 capacity
= (uint64_t)128 * plane_size
* no_of_planes
;
523 do_div(capacity
, blk_size
);
524 denali
->dev_info
.wTotalBlocks
= capacity
;
527 static void get_toshiba_nand_para(struct denali_nand_info
*denali
)
529 void __iomem
*scratch_reg
;
532 /* Workaround to fix a controller bug which reports a wrong */
533 /* spare area size for some kind of Toshiba NAND device */
534 if ((ioread32(denali
->flash_reg
+ DEVICE_MAIN_AREA_SIZE
) == 4096) &&
535 (ioread32(denali
->flash_reg
+ DEVICE_SPARE_AREA_SIZE
) == 64)) {
536 denali_write32(216, denali
->flash_reg
+ DEVICE_SPARE_AREA_SIZE
);
537 tmp
= ioread32(denali
->flash_reg
+ DEVICES_CONNECTED
) *
538 ioread32(denali
->flash_reg
+ DEVICE_SPARE_AREA_SIZE
);
539 denali_write32(tmp
, denali
->flash_reg
+ LOGICAL_PAGE_SPARE_SIZE
);
541 denali_write32(15, denali
->flash_reg
+ ECC_CORRECTION
);
542 #elif SUPPORT_8BITECC
543 denali_write32(8, denali
->flash_reg
+ ECC_CORRECTION
);
547 /* As Toshiba NAND can not provide it's block number, */
548 /* so here we need user to provide the correct block */
549 /* number in a scratch register before the Linux NAND */
550 /* driver is loaded. If no valid value found in the scratch */
551 /* register, then we use default block number value */
552 scratch_reg
= ioremap_nocache(SCRATCH_REG_ADDR
, SCRATCH_REG_SIZE
);
554 printk(KERN_ERR
"Spectra: ioremap failed in %s, Line %d",
556 denali
->dev_info
.wTotalBlocks
= GLOB_HWCTL_DEFAULT_BLKS
;
558 nand_dbg_print(NAND_DBG_WARN
,
559 "Spectra: ioremap reg address: 0x%p\n", scratch_reg
);
560 denali
->dev_info
.wTotalBlocks
= 1 << ioread8(scratch_reg
);
561 if (denali
->dev_info
.wTotalBlocks
< 512)
562 denali
->dev_info
.wTotalBlocks
= GLOB_HWCTL_DEFAULT_BLKS
;
563 iounmap(scratch_reg
);
567 static void get_hynix_nand_para(struct denali_nand_info
*denali
)
569 void __iomem
*scratch_reg
;
570 uint32_t main_size
, spare_size
;
572 switch (denali
->dev_info
.wDeviceID
) {
573 case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
574 case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
575 denali_write32(128, denali
->flash_reg
+ PAGES_PER_BLOCK
);
576 denali_write32(4096, denali
->flash_reg
+ DEVICE_MAIN_AREA_SIZE
);
577 denali_write32(224, denali
->flash_reg
+ DEVICE_SPARE_AREA_SIZE
);
578 main_size
= 4096 * ioread32(denali
->flash_reg
+ DEVICES_CONNECTED
);
579 spare_size
= 224 * ioread32(denali
->flash_reg
+ DEVICES_CONNECTED
);
580 denali_write32(main_size
, denali
->flash_reg
+ LOGICAL_PAGE_DATA_SIZE
);
581 denali_write32(spare_size
, denali
->flash_reg
+ LOGICAL_PAGE_SPARE_SIZE
);
582 denali_write32(0, denali
->flash_reg
+ DEVICE_WIDTH
);
584 denali_write32(15, denali
->flash_reg
+ ECC_CORRECTION
);
585 #elif SUPPORT_8BITECC
586 denali_write32(8, denali
->flash_reg
+ ECC_CORRECTION
);
588 denali
->dev_info
.MLCDevice
= 1;
591 nand_dbg_print(NAND_DBG_WARN
,
592 "Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
593 "Will use default parameter values instead.\n",
594 denali
->dev_info
.wDeviceID
);
597 scratch_reg
= ioremap_nocache(SCRATCH_REG_ADDR
, SCRATCH_REG_SIZE
);
599 printk(KERN_ERR
"Spectra: ioremap failed in %s, Line %d",
601 denali
->dev_info
.wTotalBlocks
= GLOB_HWCTL_DEFAULT_BLKS
;
603 nand_dbg_print(NAND_DBG_WARN
,
604 "Spectra: ioremap reg address: 0x%p\n", scratch_reg
);
605 denali
->dev_info
.wTotalBlocks
= 1 << ioread8(scratch_reg
);
606 if (denali
->dev_info
.wTotalBlocks
< 512)
607 denali
->dev_info
.wTotalBlocks
= GLOB_HWCTL_DEFAULT_BLKS
;
608 iounmap(scratch_reg
);
612 /* determines how many NAND chips are connected to the controller. Note for
613 Intel CE4100 devices we don't support more than one device.
615 static void find_valid_banks(struct denali_nand_info
*denali
)
617 uint32_t id
[LLD_MAX_FLASH_BANKS
];
620 denali
->total_used_banks
= 1;
621 for (i
= 0; i
< LLD_MAX_FLASH_BANKS
; i
++) {
622 index_addr(denali
, (uint32_t)(MODE_11
| (i
<< 24) | 0), 0x90);
623 index_addr(denali
, (uint32_t)(MODE_11
| (i
<< 24) | 1), 0);
624 index_addr_read_data(denali
, (uint32_t)(MODE_11
| (i
<< 24) | 2), &id
[i
]);
626 nand_dbg_print(NAND_DBG_DEBUG
,
627 "Return 1st ID for bank[%d]: %x\n", i
, id
[i
]);
630 if (!(id
[i
] & 0x0ff))
633 if ((id
[i
] & 0x0ff) == (id
[0] & 0x0ff))
634 denali
->total_used_banks
++;
640 if (denali
->platform
== INTEL_CE4100
)
642 /* Platform limitations of the CE4100 device limit
643 * users to a single chip solution for NAND.
644 * Multichip support is not enabled.
646 if (denali
->total_used_banks
!= 1)
648 printk(KERN_ERR
"Sorry, Intel CE4100 only supports "
649 "a single NAND device.\n");
653 nand_dbg_print(NAND_DBG_DEBUG
,
654 "denali->total_used_banks: %d\n", denali
->total_used_banks
);
657 static void detect_partition_feature(struct denali_nand_info
*denali
)
659 if (ioread32(denali
->flash_reg
+ FEATURES
) & FEATURES__PARTITION
) {
660 if ((ioread32(denali
->flash_reg
+ PERM_SRC_ID_1
) &
661 PERM_SRC_ID_1__SRCID
) == SPECTRA_PARTITION_ID
) {
662 denali
->dev_info
.wSpectraStartBlock
=
663 ((ioread32(denali
->flash_reg
+ MIN_MAX_BANK_1
) &
664 MIN_MAX_BANK_1__MIN_VALUE
) *
665 denali
->dev_info
.wTotalBlocks
)
667 (ioread32(denali
->flash_reg
+ MIN_BLK_ADDR_1
) &
668 MIN_BLK_ADDR_1__VALUE
);
670 denali
->dev_info
.wSpectraEndBlock
=
671 (((ioread32(denali
->flash_reg
+ MIN_MAX_BANK_1
) &
672 MIN_MAX_BANK_1__MAX_VALUE
) >> 2) *
673 denali
->dev_info
.wTotalBlocks
)
675 (ioread32(denali
->flash_reg
+ MAX_BLK_ADDR_1
) &
676 MAX_BLK_ADDR_1__VALUE
);
678 denali
->dev_info
.wTotalBlocks
*= denali
->total_used_banks
;
680 if (denali
->dev_info
.wSpectraEndBlock
>=
681 denali
->dev_info
.wTotalBlocks
) {
682 denali
->dev_info
.wSpectraEndBlock
=
683 denali
->dev_info
.wTotalBlocks
- 1;
686 denali
->dev_info
.wDataBlockNum
=
687 denali
->dev_info
.wSpectraEndBlock
-
688 denali
->dev_info
.wSpectraStartBlock
+ 1;
690 denali
->dev_info
.wTotalBlocks
*= denali
->total_used_banks
;
691 denali
->dev_info
.wSpectraStartBlock
= SPECTRA_START_BLOCK
;
692 denali
->dev_info
.wSpectraEndBlock
=
693 denali
->dev_info
.wTotalBlocks
- 1;
694 denali
->dev_info
.wDataBlockNum
=
695 denali
->dev_info
.wSpectraEndBlock
-
696 denali
->dev_info
.wSpectraStartBlock
+ 1;
699 denali
->dev_info
.wTotalBlocks
*= denali
->total_used_banks
;
700 denali
->dev_info
.wSpectraStartBlock
= SPECTRA_START_BLOCK
;
701 denali
->dev_info
.wSpectraEndBlock
= denali
->dev_info
.wTotalBlocks
- 1;
702 denali
->dev_info
.wDataBlockNum
=
703 denali
->dev_info
.wSpectraEndBlock
-
704 denali
->dev_info
.wSpectraStartBlock
+ 1;
708 static void dump_device_info(struct denali_nand_info
*denali
)
710 nand_dbg_print(NAND_DBG_DEBUG
, "denali->dev_info:\n");
711 nand_dbg_print(NAND_DBG_DEBUG
, "DeviceMaker: 0x%x\n",
712 denali
->dev_info
.wDeviceMaker
);
713 nand_dbg_print(NAND_DBG_DEBUG
, "DeviceID: 0x%x\n",
714 denali
->dev_info
.wDeviceID
);
715 nand_dbg_print(NAND_DBG_DEBUG
, "DeviceType: 0x%x\n",
716 denali
->dev_info
.wDeviceType
);
717 nand_dbg_print(NAND_DBG_DEBUG
, "SpectraStartBlock: %d\n",
718 denali
->dev_info
.wSpectraStartBlock
);
719 nand_dbg_print(NAND_DBG_DEBUG
, "SpectraEndBlock: %d\n",
720 denali
->dev_info
.wSpectraEndBlock
);
721 nand_dbg_print(NAND_DBG_DEBUG
, "TotalBlocks: %d\n",
722 denali
->dev_info
.wTotalBlocks
);
723 nand_dbg_print(NAND_DBG_DEBUG
, "PagesPerBlock: %d\n",
724 denali
->dev_info
.wPagesPerBlock
);
725 nand_dbg_print(NAND_DBG_DEBUG
, "PageSize: %d\n",
726 denali
->dev_info
.wPageSize
);
727 nand_dbg_print(NAND_DBG_DEBUG
, "PageDataSize: %d\n",
728 denali
->dev_info
.wPageDataSize
);
729 nand_dbg_print(NAND_DBG_DEBUG
, "PageSpareSize: %d\n",
730 denali
->dev_info
.wPageSpareSize
);
731 nand_dbg_print(NAND_DBG_DEBUG
, "NumPageSpareFlag: %d\n",
732 denali
->dev_info
.wNumPageSpareFlag
);
733 nand_dbg_print(NAND_DBG_DEBUG
, "ECCBytesPerSector: %d\n",
734 denali
->dev_info
.wECCBytesPerSector
);
735 nand_dbg_print(NAND_DBG_DEBUG
, "BlockSize: %d\n",
736 denali
->dev_info
.wBlockSize
);
737 nand_dbg_print(NAND_DBG_DEBUG
, "BlockDataSize: %d\n",
738 denali
->dev_info
.wBlockDataSize
);
739 nand_dbg_print(NAND_DBG_DEBUG
, "DataBlockNum: %d\n",
740 denali
->dev_info
.wDataBlockNum
);
741 nand_dbg_print(NAND_DBG_DEBUG
, "PlaneNum: %d\n",
742 denali
->dev_info
.bPlaneNum
);
743 nand_dbg_print(NAND_DBG_DEBUG
, "DeviceMainAreaSize: %d\n",
744 denali
->dev_info
.wDeviceMainAreaSize
);
745 nand_dbg_print(NAND_DBG_DEBUG
, "DeviceSpareAreaSize: %d\n",
746 denali
->dev_info
.wDeviceSpareAreaSize
);
747 nand_dbg_print(NAND_DBG_DEBUG
, "DevicesConnected: %d\n",
748 denali
->dev_info
.wDevicesConnected
);
749 nand_dbg_print(NAND_DBG_DEBUG
, "DeviceWidth: %d\n",
750 denali
->dev_info
.wDeviceWidth
);
751 nand_dbg_print(NAND_DBG_DEBUG
, "HWRevision: 0x%x\n",
752 denali
->dev_info
.wHWRevision
);
753 nand_dbg_print(NAND_DBG_DEBUG
, "HWFeatures: 0x%x\n",
754 denali
->dev_info
.wHWFeatures
);
755 nand_dbg_print(NAND_DBG_DEBUG
, "ONFIDevFeatures: 0x%x\n",
756 denali
->dev_info
.wONFIDevFeatures
);
757 nand_dbg_print(NAND_DBG_DEBUG
, "ONFIOptCommands: 0x%x\n",
758 denali
->dev_info
.wONFIOptCommands
);
759 nand_dbg_print(NAND_DBG_DEBUG
, "ONFITimingMode: 0x%x\n",
760 denali
->dev_info
.wONFITimingMode
);
761 nand_dbg_print(NAND_DBG_DEBUG
, "ONFIPgmCacheTimingMode: 0x%x\n",
762 denali
->dev_info
.wONFIPgmCacheTimingMode
);
763 nand_dbg_print(NAND_DBG_DEBUG
, "MLCDevice: %s\n",
764 denali
->dev_info
.MLCDevice
? "Yes" : "No");
765 nand_dbg_print(NAND_DBG_DEBUG
, "SpareSkipBytes: %d\n",
766 denali
->dev_info
.wSpareSkipBytes
);
767 nand_dbg_print(NAND_DBG_DEBUG
, "BitsInPageNumber: %d\n",
768 denali
->dev_info
.nBitsInPageNumber
);
769 nand_dbg_print(NAND_DBG_DEBUG
, "BitsInPageDataSize: %d\n",
770 denali
->dev_info
.nBitsInPageDataSize
);
771 nand_dbg_print(NAND_DBG_DEBUG
, "BitsInBlockDataSize: %d\n",
772 denali
->dev_info
.nBitsInBlockDataSize
);
775 static uint16_t NAND_Read_Device_ID(struct denali_nand_info
*denali
)
777 uint16_t status
= PASS
;
778 uint8_t no_of_planes
;
780 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
781 __FILE__
, __LINE__
, __func__
);
783 denali
->dev_info
.wDeviceMaker
= ioread32(denali
->flash_reg
+ MANUFACTURER_ID
);
784 denali
->dev_info
.wDeviceID
= ioread32(denali
->flash_reg
+ DEVICE_ID
);
785 denali
->dev_info
.bDeviceParam0
= ioread32(denali
->flash_reg
+ DEVICE_PARAM_0
);
786 denali
->dev_info
.bDeviceParam1
= ioread32(denali
->flash_reg
+ DEVICE_PARAM_1
);
787 denali
->dev_info
.bDeviceParam2
= ioread32(denali
->flash_reg
+ DEVICE_PARAM_2
);
789 denali
->dev_info
.MLCDevice
= ioread32(denali
->flash_reg
+ DEVICE_PARAM_0
) & 0x0c;
791 if (ioread32(denali
->flash_reg
+ ONFI_DEVICE_NO_OF_LUNS
) &
792 ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE
) { /* ONFI 1.0 NAND */
793 if (FAIL
== get_onfi_nand_para(denali
))
795 } else if (denali
->dev_info
.wDeviceMaker
== 0xEC) { /* Samsung NAND */
796 get_samsung_nand_para(denali
);
797 } else if (denali
->dev_info
.wDeviceMaker
== 0x98) { /* Toshiba NAND */
798 get_toshiba_nand_para(denali
);
799 } else if (denali
->dev_info
.wDeviceMaker
== 0xAD) { /* Hynix NAND */
800 get_hynix_nand_para(denali
);
802 denali
->dev_info
.wTotalBlocks
= GLOB_HWCTL_DEFAULT_BLKS
;
805 nand_dbg_print(NAND_DBG_DEBUG
, "Dump timing register values:"
806 "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
807 "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
808 "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
809 ioread32(denali
->flash_reg
+ ACC_CLKS
),
810 ioread32(denali
->flash_reg
+ RE_2_WE
),
811 ioread32(denali
->flash_reg
+ WE_2_RE
),
812 ioread32(denali
->flash_reg
+ ADDR_2_DATA
),
813 ioread32(denali
->flash_reg
+ RDWR_EN_LO_CNT
),
814 ioread32(denali
->flash_reg
+ RDWR_EN_HI_CNT
),
815 ioread32(denali
->flash_reg
+ CS_SETUP_CNT
));
817 denali
->dev_info
.wHWRevision
= ioread32(denali
->flash_reg
+ REVISION
);
818 denali
->dev_info
.wHWFeatures
= ioread32(denali
->flash_reg
+ FEATURES
);
820 denali
->dev_info
.wDeviceMainAreaSize
=
821 ioread32(denali
->flash_reg
+ DEVICE_MAIN_AREA_SIZE
);
822 denali
->dev_info
.wDeviceSpareAreaSize
=
823 ioread32(denali
->flash_reg
+ DEVICE_SPARE_AREA_SIZE
);
825 denali
->dev_info
.wPageDataSize
=
826 ioread32(denali
->flash_reg
+ LOGICAL_PAGE_DATA_SIZE
);
828 /* Note: When using the Micon 4K NAND device, the controller will report
829 * Page Spare Size as 216 bytes. But Micron's Spec say it's 218 bytes.
830 * And if force set it to 218 bytes, the controller can not work
831 * correctly. So just let it be. But keep in mind that this bug may
833 * other problems in future. - Yunpeng 2008-10-10
835 denali
->dev_info
.wPageSpareSize
=
836 ioread32(denali
->flash_reg
+ LOGICAL_PAGE_SPARE_SIZE
);
838 denali
->dev_info
.wPagesPerBlock
= ioread32(denali
->flash_reg
+ PAGES_PER_BLOCK
);
840 denali
->dev_info
.wPageSize
=
841 denali
->dev_info
.wPageDataSize
+ denali
->dev_info
.wPageSpareSize
;
842 denali
->dev_info
.wBlockSize
=
843 denali
->dev_info
.wPageSize
* denali
->dev_info
.wPagesPerBlock
;
844 denali
->dev_info
.wBlockDataSize
=
845 denali
->dev_info
.wPagesPerBlock
* denali
->dev_info
.wPageDataSize
;
847 denali
->dev_info
.wDeviceWidth
= ioread32(denali
->flash_reg
+ DEVICE_WIDTH
);
848 denali
->dev_info
.wDeviceType
=
849 ((ioread32(denali
->flash_reg
+ DEVICE_WIDTH
) > 0) ? 16 : 8);
851 denali
->dev_info
.wDevicesConnected
= ioread32(denali
->flash_reg
+ DEVICES_CONNECTED
);
853 denali
->dev_info
.wSpareSkipBytes
=
854 ioread32(denali
->flash_reg
+ SPARE_AREA_SKIP_BYTES
) *
855 denali
->dev_info
.wDevicesConnected
;
857 denali
->dev_info
.nBitsInPageNumber
=
858 ilog2(denali
->dev_info
.wPagesPerBlock
);
859 denali
->dev_info
.nBitsInPageDataSize
=
860 ilog2(denali
->dev_info
.wPageDataSize
);
861 denali
->dev_info
.nBitsInBlockDataSize
=
862 ilog2(denali
->dev_info
.wBlockDataSize
);
864 set_ecc_config(denali
);
866 no_of_planes
= ioread32(denali
->flash_reg
+ NUMBER_OF_PLANES
) &
867 NUMBER_OF_PLANES__VALUE
;
869 switch (no_of_planes
) {
874 denali
->dev_info
.bPlaneNum
= no_of_planes
+ 1;
881 find_valid_banks(denali
);
883 detect_partition_feature(denali
);
885 dump_device_info(denali
);
887 /* If the user specified to override the default timings
888 * with a specific ONFI mode, we apply those changes here.
890 if (onfi_timing_mode
!= NAND_DEFAULT_TIMINGS
)
892 NAND_ONFi_Timing_Mode(denali
, onfi_timing_mode
);
898 static void NAND_LLD_Enable_Disable_Interrupts(struct denali_nand_info
*denali
,
901 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
902 __FILE__
, __LINE__
, __func__
);
905 denali_write32(1, denali
->flash_reg
+ GLOBAL_INT_ENABLE
);
907 denali_write32(0, denali
->flash_reg
+ GLOBAL_INT_ENABLE
);
910 /* validation function to verify that the controlling software is making
913 static inline bool is_flash_bank_valid(int flash_bank
)
915 return (flash_bank
>= 0 && flash_bank
< 4);
918 static void denali_irq_init(struct denali_nand_info
*denali
)
920 uint32_t int_mask
= 0;
922 /* Disable global interrupts */
923 NAND_LLD_Enable_Disable_Interrupts(denali
, false);
925 int_mask
= DENALI_IRQ_ALL
;
927 /* Clear all status bits */
928 denali_write32(0xFFFF, denali
->flash_reg
+ INTR_STATUS0
);
929 denali_write32(0xFFFF, denali
->flash_reg
+ INTR_STATUS1
);
930 denali_write32(0xFFFF, denali
->flash_reg
+ INTR_STATUS2
);
931 denali_write32(0xFFFF, denali
->flash_reg
+ INTR_STATUS3
);
933 denali_irq_enable(denali
, int_mask
);
936 static void denali_irq_cleanup(int irqnum
, struct denali_nand_info
*denali
)
938 NAND_LLD_Enable_Disable_Interrupts(denali
, false);
939 free_irq(irqnum
, denali
);
942 static void denali_irq_enable(struct denali_nand_info
*denali
, uint32_t int_mask
)
944 denali_write32(int_mask
, denali
->flash_reg
+ INTR_EN0
);
945 denali_write32(int_mask
, denali
->flash_reg
+ INTR_EN1
);
946 denali_write32(int_mask
, denali
->flash_reg
+ INTR_EN2
);
947 denali_write32(int_mask
, denali
->flash_reg
+ INTR_EN3
);
950 /* This function only returns when an interrupt that this driver cares about
951 * occurs. This is to reduce the overhead of servicing interrupts
953 static inline uint32_t denali_irq_detected(struct denali_nand_info
*denali
)
955 return (read_interrupt_status(denali
) & DENALI_IRQ_ALL
);
958 /* Interrupts are cleared by writing a 1 to the appropriate status bit */
959 static inline void clear_interrupt(struct denali_nand_info
*denali
, uint32_t irq_mask
)
961 uint32_t intr_status_reg
= 0;
963 intr_status_reg
= intr_status_addresses
[denali
->flash_bank
];
965 denali_write32(irq_mask
, denali
->flash_reg
+ intr_status_reg
);
968 static void clear_interrupts(struct denali_nand_info
*denali
)
970 uint32_t status
= 0x0;
971 spin_lock_irq(&denali
->irq_lock
);
973 status
= read_interrupt_status(denali
);
976 denali
->irq_debug_array
[denali
->idx
++] = 0x30000000 | status
;
980 denali
->irq_status
= 0x0;
981 spin_unlock_irq(&denali
->irq_lock
);
984 static uint32_t read_interrupt_status(struct denali_nand_info
*denali
)
986 uint32_t intr_status_reg
= 0;
988 intr_status_reg
= intr_status_addresses
[denali
->flash_bank
];
990 return ioread32(denali
->flash_reg
+ intr_status_reg
);
994 static void print_irq_log(struct denali_nand_info
*denali
)
998 printk("ISR debug log index = %X\n", denali
->idx
);
999 for (i
= 0; i
< 32; i
++)
1001 printk("%08X: %08X\n", i
, denali
->irq_debug_array
[i
]);
1006 /* This is the interrupt service routine. It handles all interrupts
1007 * sent to this device. Note that on CE4100, this is a shared
1010 static irqreturn_t
denali_isr(int irq
, void *dev_id
)
1012 struct denali_nand_info
*denali
= dev_id
;
1013 uint32_t irq_status
= 0x0;
1014 irqreturn_t result
= IRQ_NONE
;
1016 spin_lock(&denali
->irq_lock
);
1018 /* check to see if a valid NAND chip has
1021 if (is_flash_bank_valid(denali
->flash_bank
))
1023 /* check to see if controller generated
1024 * the interrupt, since this is a shared interrupt */
1025 if ((irq_status
= denali_irq_detected(denali
)) != 0)
1028 denali
->irq_debug_array
[denali
->idx
++] = 0x10000000 | irq_status
;
1031 printk("IRQ status = 0x%04x\n", irq_status
);
1033 /* handle interrupt */
1034 /* first acknowledge it */
1035 clear_interrupt(denali
, irq_status
);
1036 /* store the status in the device context for someone
1038 denali
->irq_status
|= irq_status
;
1039 /* notify anyone who cares that it happened */
1040 complete(&denali
->complete
);
1041 /* tell the OS that we've handled this */
1042 result
= IRQ_HANDLED
;
1045 spin_unlock(&denali
->irq_lock
);
1048 #define BANK(x) ((x) << 24)
1050 static uint32_t wait_for_irq(struct denali_nand_info
*denali
, uint32_t irq_mask
)
1052 unsigned long comp_res
= 0;
1053 uint32_t intr_status
= 0;
1055 unsigned long timeout
= msecs_to_jiffies(1000);
1060 printk("waiting for 0x%x\n", irq_mask
);
1062 comp_res
= wait_for_completion_timeout(&denali
->complete
, timeout
);
1063 spin_lock_irq(&denali
->irq_lock
);
1064 intr_status
= denali
->irq_status
;
1067 denali
->irq_debug_array
[denali
->idx
++] = 0x20000000 | (irq_mask
<< 16) | intr_status
;
1071 if (intr_status
& irq_mask
)
1073 denali
->irq_status
&= ~irq_mask
;
1074 spin_unlock_irq(&denali
->irq_lock
);
1076 if (retry
) printk("status on retry = 0x%x\n", intr_status
);
1078 /* our interrupt was detected */
1083 /* these are not the interrupts you are looking for -
1084 * need to wait again */
1085 spin_unlock_irq(&denali
->irq_lock
);
1087 print_irq_log(denali
);
1088 printk("received irq nobody cared: irq_status = 0x%x,"
1089 " irq_mask = 0x%x, timeout = %ld\n", intr_status
, irq_mask
, comp_res
);
1093 } while (comp_res
!= 0);
1098 printk(KERN_ERR
"timeout occurred, status = 0x%x, mask = 0x%x\n",
1099 intr_status
, irq_mask
);
1106 /* This helper function setups the registers for ECC and whether or not
1107 the spare area will be transfered. */
1108 static void setup_ecc_for_xfer(struct denali_nand_info
*denali
, bool ecc_en
,
1109 bool transfer_spare
)
1111 int ecc_en_flag
= 0, transfer_spare_flag
= 0;
1113 /* set ECC, transfer spare bits if needed */
1114 ecc_en_flag
= ecc_en
? ECC_ENABLE__FLAG
: 0;
1115 transfer_spare_flag
= transfer_spare
? TRANSFER_SPARE_REG__FLAG
: 0;
1117 /* Enable spare area/ECC per user's request. */
1118 denali_write32(ecc_en_flag
, denali
->flash_reg
+ ECC_ENABLE
);
1119 denali_write32(transfer_spare_flag
, denali
->flash_reg
+ TRANSFER_SPARE_REG
);
1122 /* sends a pipeline command operation to the controller. See the Denali NAND
1123 controller's user guide for more information (section 4.2.3.6).
1125 static int denali_send_pipeline_cmd(struct denali_nand_info
*denali
, bool ecc_en
,
1126 bool transfer_spare
, int access_type
,
1130 uint32_t addr
= 0x0, cmd
= 0x0, page_count
= 1, irq_status
= 0,
1133 if (op
== DENALI_READ
) irq_mask
= INTR_STATUS0__LOAD_COMP
;
1134 else if (op
== DENALI_WRITE
) irq_mask
= 0;
1137 setup_ecc_for_xfer(denali
, ecc_en
, transfer_spare
);
1140 spin_lock_irq(&denali
->irq_lock
);
1141 denali
->irq_debug_array
[denali
->idx
++] = 0x40000000 | ioread32(denali
->flash_reg
+ ECC_ENABLE
) | (access_type
<< 4);
1143 spin_unlock_irq(&denali
->irq_lock
);
1147 /* clear interrupts */
1148 clear_interrupts(denali
);
1150 addr
= BANK(denali
->flash_bank
) | denali
->page
;
1152 if (op
== DENALI_WRITE
&& access_type
!= SPARE_ACCESS
)
1154 cmd
= MODE_01
| addr
;
1155 denali_write32(cmd
, denali
->flash_mem
);
1157 else if (op
== DENALI_WRITE
&& access_type
== SPARE_ACCESS
)
1159 /* read spare area */
1160 cmd
= MODE_10
| addr
;
1161 index_addr(denali
, (uint32_t)cmd
, access_type
);
1163 cmd
= MODE_01
| addr
;
1164 denali_write32(cmd
, denali
->flash_mem
);
1166 else if (op
== DENALI_READ
)
1168 /* setup page read request for access type */
1169 cmd
= MODE_10
| addr
;
1170 index_addr(denali
, (uint32_t)cmd
, access_type
);
1172 /* page 33 of the NAND controller spec indicates we should not
1173 use the pipeline commands in Spare area only mode. So we
1176 if (access_type
== SPARE_ACCESS
)
1178 cmd
= MODE_01
| addr
;
1179 denali_write32(cmd
, denali
->flash_mem
);
1183 index_addr(denali
, (uint32_t)cmd
, 0x2000 | op
| page_count
);
1185 /* wait for command to be accepted
1186 * can always use status0 bit as the mask is identical for each
1188 irq_status
= wait_for_irq(denali
, irq_mask
);
1190 if (irq_status
== 0)
1192 printk(KERN_ERR
"cmd, page, addr on timeout "
1193 "(0x%x, 0x%x, 0x%x)\n", cmd
, denali
->page
, addr
);
1198 cmd
= MODE_01
| addr
;
1199 denali_write32(cmd
, denali
->flash_mem
);
1206 /* helper function that simply writes a buffer to the flash */
1207 static int write_data_to_flash_mem(struct denali_nand_info
*denali
, const uint8_t *buf
,
1210 uint32_t i
= 0, *buf32
;
1212 /* verify that the len is a multiple of 4. see comment in
1213 * read_data_from_flash_mem() */
1214 BUG_ON((len
% 4) != 0);
1216 /* write the data to the flash memory */
1217 buf32
= (uint32_t *)buf
;
1218 for (i
= 0; i
< len
/ 4; i
++)
1220 denali_write32(*buf32
++, denali
->flash_mem
+ 0x10);
1222 return i
*4; /* intent is to return the number of bytes read */
1225 /* helper function that simply reads a buffer from the flash */
1226 static int read_data_from_flash_mem(struct denali_nand_info
*denali
, uint8_t *buf
,
1229 uint32_t i
= 0, *buf32
;
1231 /* we assume that len will be a multiple of 4, if not
1232 * it would be nice to know about it ASAP rather than
1233 * have random failures...
1234 * This assumption is based on the fact that this
1235 * function is designed to be used to read flash pages,
1236 * which are typically multiples of 4...
1239 BUG_ON((len
% 4) != 0);
1241 /* transfer the data from the flash */
1242 buf32
= (uint32_t *)buf
;
1243 for (i
= 0; i
< len
/ 4; i
++)
1245 *buf32
++ = ioread32(denali
->flash_mem
+ 0x10);
1247 return i
*4; /* intent is to return the number of bytes read */
1250 /* writes OOB data to the device */
1251 static int write_oob_data(struct mtd_info
*mtd
, uint8_t *buf
, int page
)
1253 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
1254 uint32_t irq_status
= 0;
1255 uint32_t irq_mask
= INTR_STATUS0__PROGRAM_COMP
|
1256 INTR_STATUS0__PROGRAM_FAIL
;
1259 denali
->page
= page
;
1261 if (denali_send_pipeline_cmd(denali
, false, false, SPARE_ACCESS
,
1262 DENALI_WRITE
) == PASS
)
1264 write_data_to_flash_mem(denali
, buf
, mtd
->oobsize
);
1267 spin_lock_irq(&denali
->irq_lock
);
1268 denali
->irq_debug_array
[denali
->idx
++] = 0x80000000 | mtd
->oobsize
;
1270 spin_unlock_irq(&denali
->irq_lock
);
1274 /* wait for operation to complete */
1275 irq_status
= wait_for_irq(denali
, irq_mask
);
1277 if (irq_status
== 0)
1279 printk(KERN_ERR
"OOB write failed\n");
1285 printk(KERN_ERR
"unable to send pipeline command\n");
1291 /* reads OOB data from the device */
1292 static void read_oob_data(struct mtd_info
*mtd
, uint8_t *buf
, int page
)
1294 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
1295 uint32_t irq_mask
= INTR_STATUS0__LOAD_COMP
, irq_status
= 0, addr
= 0x0, cmd
= 0x0;
1297 denali
->page
= page
;
1300 printk("read_oob %d\n", page
);
1302 if (denali_send_pipeline_cmd(denali
, false, true, SPARE_ACCESS
,
1303 DENALI_READ
) == PASS
)
1305 read_data_from_flash_mem(denali
, buf
, mtd
->oobsize
);
1307 /* wait for command to be accepted
1308 * can always use status0 bit as the mask is identical for each
1310 irq_status
= wait_for_irq(denali
, irq_mask
);
1312 if (irq_status
== 0)
1314 printk(KERN_ERR
"page on OOB timeout %d\n", denali
->page
);
1317 /* We set the device back to MAIN_ACCESS here as I observed
1318 * instability with the controller if you do a block erase
1319 * and the last transaction was a SPARE_ACCESS. Block erase
1320 * is reliable (according to the MTD test infrastructure)
1321 * if you are in MAIN_ACCESS.
1323 addr
= BANK(denali
->flash_bank
) | denali
->page
;
1324 cmd
= MODE_10
| addr
;
1325 index_addr(denali
, (uint32_t)cmd
, MAIN_ACCESS
);
1328 spin_lock_irq(&denali
->irq_lock
);
1329 denali
->irq_debug_array
[denali
->idx
++] = 0x60000000 | mtd
->oobsize
;
1331 spin_unlock_irq(&denali
->irq_lock
);
1336 /* this function examines buffers to see if they contain data that
1337 * indicate that the buffer is part of an erased region of flash.
1339 bool is_erased(uint8_t *buf
, int len
)
1342 for (i
= 0; i
< len
; i
++)
1351 #define ECC_SECTOR_SIZE 512
1353 #define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
1354 #define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
1355 #define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
1356 #define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO))
1357 #define ECC_ERR_DEVICE(x) ((x) & ERR_CORRECTION_INFO__DEVICE_NR >> 8)
1358 #define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
1360 static bool handle_ecc(struct denali_nand_info
*denali
, uint8_t *buf
,
1361 uint8_t *oobbuf
, uint32_t irq_status
)
1363 bool check_erased_page
= false;
1365 if (irq_status
& INTR_STATUS0__ECC_ERR
)
1367 /* read the ECC errors. we'll ignore them for now */
1368 uint32_t err_address
= 0, err_correction_info
= 0;
1369 uint32_t err_byte
= 0, err_sector
= 0, err_device
= 0;
1370 uint32_t err_correction_value
= 0;
1374 err_address
= ioread32(denali
->flash_reg
+
1376 err_sector
= ECC_SECTOR(err_address
);
1377 err_byte
= ECC_BYTE(err_address
);
1380 err_correction_info
= ioread32(denali
->flash_reg
+
1381 ERR_CORRECTION_INFO
);
1382 err_correction_value
=
1383 ECC_CORRECTION_VALUE(err_correction_info
);
1384 err_device
= ECC_ERR_DEVICE(err_correction_info
);
1386 if (ECC_ERROR_CORRECTABLE(err_correction_info
))
1388 /* offset in our buffer is computed as:
1389 sector number * sector size + offset in
1392 int offset
= err_sector
* ECC_SECTOR_SIZE
+
1394 if (offset
< denali
->mtd
.writesize
)
1396 /* correct the ECC error */
1397 buf
[offset
] ^= err_correction_value
;
1398 denali
->mtd
.ecc_stats
.corrected
++;
1402 /* bummer, couldn't correct the error */
1403 printk(KERN_ERR
"ECC offset invalid\n");
1404 denali
->mtd
.ecc_stats
.failed
++;
1409 /* if the error is not correctable, need to
1410 * look at the page to see if it is an erased page.
1411 * if so, then it's not a real ECC error */
1412 check_erased_page
= true;
1416 printk("Detected ECC error in page %d: err_addr = 0x%08x,"
1417 " info to fix is 0x%08x\n", denali
->page
, err_address
,
1418 err_correction_info
);
1420 } while (!ECC_LAST_ERR(err_correction_info
));
1422 return check_erased_page
;
1425 /* programs the controller to either enable/disable DMA transfers */
1426 static void denali_enable_dma(struct denali_nand_info
*denali
, bool en
)
1428 uint32_t reg_val
= 0x0;
1430 if (en
) reg_val
= DMA_ENABLE__FLAG
;
1432 denali_write32(reg_val
, denali
->flash_reg
+ DMA_ENABLE
);
1433 ioread32(denali
->flash_reg
+ DMA_ENABLE
);
1436 /* setups the HW to perform the data DMA */
1437 static void denali_setup_dma(struct denali_nand_info
*denali
, int op
)
1439 uint32_t mode
= 0x0;
1440 const int page_count
= 1;
1441 dma_addr_t addr
= denali
->buf
.dma_buf
;
1443 mode
= MODE_10
| BANK(denali
->flash_bank
);
1445 /* DMA is a four step process */
1447 /* 1. setup transfer type and # of pages */
1448 index_addr(denali
, mode
| denali
->page
, 0x2000 | op
| page_count
);
1450 /* 2. set memory high address bits 23:8 */
1451 index_addr(denali
, mode
| ((uint16_t)(addr
>> 16) << 8), 0x2200);
1453 /* 3. set memory low address bits 23:8 */
1454 index_addr(denali
, mode
| ((uint16_t)addr
<< 8), 0x2300);
1456 /* 4. interrupt when complete, burst len = 64 bytes*/
1457 index_addr(denali
, mode
| 0x14000, 0x2400);
1460 /* writes a page. user specifies type, and this function handles the
1461 configuration details. */
1462 static void write_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1463 const uint8_t *buf
, bool raw_xfer
)
1465 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
1466 struct pci_dev
*pci_dev
= denali
->dev
;
1468 dma_addr_t addr
= denali
->buf
.dma_buf
;
1469 size_t size
= denali
->mtd
.writesize
+ denali
->mtd
.oobsize
;
1471 uint32_t irq_status
= 0;
1472 uint32_t irq_mask
= INTR_STATUS0__DMA_CMD_COMP
|
1473 INTR_STATUS0__PROGRAM_FAIL
;
1475 /* if it is a raw xfer, we want to disable ecc, and send
1477 * !raw_xfer - enable ecc
1478 * raw_xfer - transfer spare
1480 setup_ecc_for_xfer(denali
, !raw_xfer
, raw_xfer
);
1482 /* copy buffer into DMA buffer */
1483 memcpy(denali
->buf
.buf
, buf
, mtd
->writesize
);
1487 /* transfer the data to the spare area */
1488 memcpy(denali
->buf
.buf
+ mtd
->writesize
,
1493 pci_dma_sync_single_for_device(pci_dev
, addr
, size
, PCI_DMA_TODEVICE
);
1495 clear_interrupts(denali
);
1496 denali_enable_dma(denali
, true);
1498 denali_setup_dma(denali
, DENALI_WRITE
);
1500 /* wait for operation to complete */
1501 irq_status
= wait_for_irq(denali
, irq_mask
);
1503 if (irq_status
== 0)
1505 printk(KERN_ERR
"timeout on write_page (type = %d)\n", raw_xfer
);
1507 (irq_status
& INTR_STATUS0__PROGRAM_FAIL
) ? NAND_STATUS_FAIL
:
1511 denali_enable_dma(denali
, false);
1512 pci_dma_sync_single_for_cpu(pci_dev
, addr
, size
, PCI_DMA_TODEVICE
);
1515 /* NAND core entry points */
1517 /* this is the callback that the NAND core calls to write a page. Since
1518 writing a page with ECC or without is similar, all the work is done
1519 by write_page above. */
1520 static void denali_write_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1523 /* for regular page writes, we let HW handle all the ECC
1524 * data written to the device. */
1525 write_page(mtd
, chip
, buf
, false);
1528 /* This is the callback that the NAND core calls to write a page without ECC.
1529 raw access is similiar to ECC page writes, so all the work is done in the
1530 write_page() function above.
1532 static void denali_write_page_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1535 /* for raw page writes, we want to disable ECC and simply write
1536 whatever data is in the buffer. */
1537 write_page(mtd
, chip
, buf
, true);
1540 static int denali_write_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1543 return write_oob_data(mtd
, chip
->oob_poi
, page
);
1546 static int denali_read_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1547 int page
, int sndcmd
)
1549 read_oob_data(mtd
, chip
->oob_poi
, page
);
1551 return 0; /* notify NAND core to send command to
1555 static int denali_read_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1556 uint8_t *buf
, int page
)
1558 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
1559 struct pci_dev
*pci_dev
= denali
->dev
;
1561 dma_addr_t addr
= denali
->buf
.dma_buf
;
1562 size_t size
= denali
->mtd
.writesize
+ denali
->mtd
.oobsize
;
1564 uint32_t irq_status
= 0;
1565 uint32_t irq_mask
= INTR_STATUS0__ECC_TRANSACTION_DONE
|
1566 INTR_STATUS0__ECC_ERR
;
1567 bool check_erased_page
= false;
1569 setup_ecc_for_xfer(denali
, true, false);
1571 denali_enable_dma(denali
, true);
1572 pci_dma_sync_single_for_device(pci_dev
, addr
, size
, PCI_DMA_FROMDEVICE
);
1574 clear_interrupts(denali
);
1575 denali_setup_dma(denali
, DENALI_READ
);
1577 /* wait for operation to complete */
1578 irq_status
= wait_for_irq(denali
, irq_mask
);
1580 pci_dma_sync_single_for_cpu(pci_dev
, addr
, size
, PCI_DMA_FROMDEVICE
);
1582 memcpy(buf
, denali
->buf
.buf
, mtd
->writesize
);
1584 check_erased_page
= handle_ecc(denali
, buf
, chip
->oob_poi
, irq_status
);
1585 denali_enable_dma(denali
, false);
1587 if (check_erased_page
)
1589 read_oob_data(&denali
->mtd
, chip
->oob_poi
, denali
->page
);
1591 /* check ECC failures that may have occurred on erased pages */
1592 if (check_erased_page
)
1594 if (!is_erased(buf
, denali
->mtd
.writesize
))
1596 denali
->mtd
.ecc_stats
.failed
++;
1598 if (!is_erased(buf
, denali
->mtd
.oobsize
))
1600 denali
->mtd
.ecc_stats
.failed
++;
1607 static int denali_read_page_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1608 uint8_t *buf
, int page
)
1610 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
1611 struct pci_dev
*pci_dev
= denali
->dev
;
1613 dma_addr_t addr
= denali
->buf
.dma_buf
;
1614 size_t size
= denali
->mtd
.writesize
+ denali
->mtd
.oobsize
;
1616 uint32_t irq_status
= 0;
1617 uint32_t irq_mask
= INTR_STATUS0__DMA_CMD_COMP
;
1619 setup_ecc_for_xfer(denali
, false, true);
1620 denali_enable_dma(denali
, true);
1622 pci_dma_sync_single_for_device(pci_dev
, addr
, size
, PCI_DMA_FROMDEVICE
);
1624 clear_interrupts(denali
);
1625 denali_setup_dma(denali
, DENALI_READ
);
1627 /* wait for operation to complete */
1628 irq_status
= wait_for_irq(denali
, irq_mask
);
1630 pci_dma_sync_single_for_cpu(pci_dev
, addr
, size
, PCI_DMA_FROMDEVICE
);
1632 denali_enable_dma(denali
, false);
1634 memcpy(buf
, denali
->buf
.buf
, mtd
->writesize
);
1635 memcpy(chip
->oob_poi
, denali
->buf
.buf
+ mtd
->writesize
, mtd
->oobsize
);
1640 static uint8_t denali_read_byte(struct mtd_info
*mtd
)
1642 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
1643 uint8_t result
= 0xff;
1645 if (denali
->buf
.head
< denali
->buf
.tail
)
1647 result
= denali
->buf
.buf
[denali
->buf
.head
++];
1651 printk("read byte -> 0x%02x\n", result
);
1656 static void denali_select_chip(struct mtd_info
*mtd
, int chip
)
1658 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
1660 printk("denali select chip %d\n", chip
);
1662 spin_lock_irq(&denali
->irq_lock
);
1663 denali
->flash_bank
= chip
;
1664 spin_unlock_irq(&denali
->irq_lock
);
1667 static int denali_waitfunc(struct mtd_info
*mtd
, struct nand_chip
*chip
)
1669 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
1670 int status
= denali
->status
;
1674 printk("waitfunc %d\n", status
);
1679 static void denali_erase(struct mtd_info
*mtd
, int page
)
1681 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
1683 uint32_t cmd
= 0x0, irq_status
= 0;
1686 printk("erase page: %d\n", page
);
1688 /* clear interrupts */
1689 clear_interrupts(denali
);
1691 /* setup page read request for access type */
1692 cmd
= MODE_10
| BANK(denali
->flash_bank
) | page
;
1693 index_addr(denali
, (uint32_t)cmd
, 0x1);
1695 /* wait for erase to complete or failure to occur */
1696 irq_status
= wait_for_irq(denali
, INTR_STATUS0__ERASE_COMP
|
1697 INTR_STATUS0__ERASE_FAIL
);
1699 denali
->status
= (irq_status
& INTR_STATUS0__ERASE_FAIL
) ? NAND_STATUS_FAIL
:
1703 static void denali_cmdfunc(struct mtd_info
*mtd
, unsigned int cmd
, int col
,
1706 struct denali_nand_info
*denali
= mtd_to_denali(mtd
);
1709 printk("cmdfunc: 0x%x %d %d\n", cmd
, col
, page
);
1713 case NAND_CMD_PAGEPROG
:
1715 case NAND_CMD_STATUS
:
1716 read_status(denali
);
1718 case NAND_CMD_READID
:
1720 if (denali
->flash_bank
< denali
->total_used_banks
)
1722 /* write manufacturer information into nand
1723 buffer for NAND subsystem to fetch.
1725 write_byte_to_buf(denali
, denali
->dev_info
.wDeviceMaker
);
1726 write_byte_to_buf(denali
, denali
->dev_info
.wDeviceID
);
1727 write_byte_to_buf(denali
, denali
->dev_info
.bDeviceParam0
);
1728 write_byte_to_buf(denali
, denali
->dev_info
.bDeviceParam1
);
1729 write_byte_to_buf(denali
, denali
->dev_info
.bDeviceParam2
);
1734 for (i
= 0; i
< 5; i
++)
1735 write_byte_to_buf(denali
, 0xff);
1738 case NAND_CMD_READ0
:
1739 case NAND_CMD_SEQIN
:
1740 denali
->page
= page
;
1742 case NAND_CMD_RESET
:
1745 case NAND_CMD_READOOB
:
1746 /* TODO: Read OOB data */
1749 printk(KERN_ERR
": unsupported command received 0x%x\n", cmd
);
1754 /* stubs for ECC functions not used by the NAND core */
1755 static int denali_ecc_calculate(struct mtd_info
*mtd
, const uint8_t *data
,
1758 printk(KERN_ERR
"denali_ecc_calculate called unexpectedly\n");
1763 static int denali_ecc_correct(struct mtd_info
*mtd
, uint8_t *data
,
1764 uint8_t *read_ecc
, uint8_t *calc_ecc
)
1766 printk(KERN_ERR
"denali_ecc_correct called unexpectedly\n");
1771 static void denali_ecc_hwctl(struct mtd_info
*mtd
, int mode
)
1773 printk(KERN_ERR
"denali_ecc_hwctl called unexpectedly\n");
1776 /* end NAND core entry points */
1778 /* Initialization code to bring the device up to a known good state */
1779 static void denali_hw_init(struct denali_nand_info
*denali
)
1781 denali_irq_init(denali
);
1782 NAND_Flash_Reset(denali
);
1783 denali_write32(0x0F, denali
->flash_reg
+ RB_PIN_ENABLED
);
1784 denali_write32(CHIP_EN_DONT_CARE__FLAG
, denali
->flash_reg
+ CHIP_ENABLE_DONT_CARE
);
1786 denali_write32(0x0, denali
->flash_reg
+ SPARE_AREA_SKIP_BYTES
);
1787 denali_write32(0xffff, denali
->flash_reg
+ SPARE_AREA_MARKER
);
1789 /* Should set value for these registers when init */
1790 denali_write32(0, denali
->flash_reg
+ TWO_ROW_ADDR_CYCLES
);
1791 denali_write32(1, denali
->flash_reg
+ ECC_ENABLE
);
1794 /* ECC layout for SLC devices. Denali spec indicates SLC fixed at 4 bytes */
1795 #define ECC_BYTES_SLC 4 * (2048 / ECC_SECTOR_SIZE)
1796 static struct nand_ecclayout nand_oob_slc
= {
1798 .eccpos
= { 0, 1, 2, 3 }, /* not used */
1800 .offset
= ECC_BYTES_SLC
,
1801 .length
= 64 - ECC_BYTES_SLC
1805 #define ECC_BYTES_MLC 14 * (2048 / ECC_SECTOR_SIZE)
1806 static struct nand_ecclayout nand_oob_mlc_14bit
= {
1808 .eccpos
= { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13 }, /* not used */
1810 .offset
= ECC_BYTES_MLC
,
1811 .length
= 64 - ECC_BYTES_MLC
1815 static uint8_t bbt_pattern
[] = {'B', 'b', 't', '0' };
1816 static uint8_t mirror_pattern
[] = {'1', 't', 'b', 'B' };
1818 static struct nand_bbt_descr bbt_main_descr
= {
1819 .options
= NAND_BBT_LASTBLOCK
| NAND_BBT_CREATE
| NAND_BBT_WRITE
1820 | NAND_BBT_2BIT
| NAND_BBT_VERSION
| NAND_BBT_PERCHIP
,
1825 .pattern
= bbt_pattern
,
1828 static struct nand_bbt_descr bbt_mirror_descr
= {
1829 .options
= NAND_BBT_LASTBLOCK
| NAND_BBT_CREATE
| NAND_BBT_WRITE
1830 | NAND_BBT_2BIT
| NAND_BBT_VERSION
| NAND_BBT_PERCHIP
,
1835 .pattern
= mirror_pattern
,
1838 /* initalize driver data structures */
1839 void denali_drv_init(struct denali_nand_info
*denali
)
1843 /* setup interrupt handler */
1844 /* the completion object will be used to notify
1845 * the callee that the interrupt is done */
1846 init_completion(&denali
->complete
);
1848 /* the spinlock will be used to synchronize the ISR
1849 * with any element that might be access shared
1850 * data (interrupt status) */
1851 spin_lock_init(&denali
->irq_lock
);
1853 /* indicate that MTD has not selected a valid bank yet */
1854 denali
->flash_bank
= CHIP_SELECT_INVALID
;
1856 /* initialize our irq_status variable to indicate no interrupts */
1857 denali
->irq_status
= 0;
1860 /* driver entry point */
1861 static int denali_pci_probe(struct pci_dev
*dev
, const struct pci_device_id
*id
)
1864 resource_size_t csr_base
, mem_base
;
1865 unsigned long csr_len
, mem_len
;
1866 struct denali_nand_info
*denali
;
1868 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
1869 __FILE__
, __LINE__
, __func__
);
1871 denali
= kzalloc(sizeof(*denali
), GFP_KERNEL
);
1875 ret
= pci_enable_device(dev
);
1877 printk(KERN_ERR
"Spectra: pci_enable_device failed.\n");
1881 if (id
->driver_data
== INTEL_CE4100
) {
1882 /* Due to a silicon limitation, we can only support
1883 * ONFI timing mode 1 and below.
1885 if (onfi_timing_mode
< -1 || onfi_timing_mode
> 1)
1887 printk("Intel CE4100 only supports ONFI timing mode 1 "
1892 denali
->platform
= INTEL_CE4100
;
1893 mem_base
= pci_resource_start(dev
, 0);
1894 mem_len
= pci_resource_len(dev
, 1);
1895 csr_base
= pci_resource_start(dev
, 1);
1896 csr_len
= pci_resource_len(dev
, 1);
1898 denali
->platform
= INTEL_MRST
;
1899 csr_base
= pci_resource_start(dev
, 0);
1900 csr_len
= pci_resource_start(dev
, 0);
1901 mem_base
= pci_resource_start(dev
, 1);
1902 mem_len
= pci_resource_len(dev
, 1);
1904 mem_base
= csr_base
+ csr_len
;
1906 nand_dbg_print(NAND_DBG_WARN
,
1907 "Spectra: No second BAR for PCI device; assuming %08Lx\n",
1908 (uint64_t)csr_base
);
1912 /* Is 32-bit DMA supported? */
1913 ret
= pci_set_dma_mask(dev
, DMA_BIT_MASK(32));
1917 printk(KERN_ERR
"Spectra: no usable DMA configuration\n");
1920 denali
->buf
.dma_buf
= pci_map_single(dev
, denali
->buf
.buf
, DENALI_BUF_SIZE
,
1921 PCI_DMA_BIDIRECTIONAL
);
1923 if (pci_dma_mapping_error(dev
, denali
->buf
.dma_buf
))
1925 printk(KERN_ERR
"Spectra: failed to map DMA buffer\n");
1929 pci_set_master(dev
);
1932 ret
= pci_request_regions(dev
, DENALI_NAND_NAME
);
1934 printk(KERN_ERR
"Spectra: Unable to request memory regions\n");
1935 goto failed_req_csr
;
1938 denali
->flash_reg
= ioremap_nocache(csr_base
, csr_len
);
1939 if (!denali
->flash_reg
) {
1940 printk(KERN_ERR
"Spectra: Unable to remap memory region\n");
1942 goto failed_remap_csr
;
1944 nand_dbg_print(NAND_DBG_DEBUG
, "Spectra: CSR 0x%08Lx -> 0x%p (0x%lx)\n",
1945 (uint64_t)csr_base
, denali
->flash_reg
, csr_len
);
1947 denali
->flash_mem
= ioremap_nocache(mem_base
, mem_len
);
1948 if (!denali
->flash_mem
) {
1949 printk(KERN_ERR
"Spectra: ioremap_nocache failed!");
1950 iounmap(denali
->flash_reg
);
1952 goto failed_remap_csr
;
1955 nand_dbg_print(NAND_DBG_WARN
,
1956 "Spectra: Remapped flash base address: "
1958 denali
->flash_mem
, csr_len
);
1960 denali_hw_init(denali
);
1961 denali_drv_init(denali
);
1963 nand_dbg_print(NAND_DBG_DEBUG
, "Spectra: IRQ %d\n", dev
->irq
);
1964 if (request_irq(dev
->irq
, denali_isr
, IRQF_SHARED
,
1965 DENALI_NAND_NAME
, denali
)) {
1966 printk(KERN_ERR
"Spectra: Unable to allocate IRQ\n");
1968 goto failed_request_irq
;
1971 /* now that our ISR is registered, we can enable interrupts */
1972 NAND_LLD_Enable_Disable_Interrupts(denali
, true);
1974 pci_set_drvdata(dev
, denali
);
1976 NAND_Read_Device_ID(denali
);
1978 /* MTD supported page sizes vary by kernel. We validate our
1979 * kernel supports the device here.
1981 if (denali
->dev_info
.wPageSize
> NAND_MAX_PAGESIZE
+ NAND_MAX_OOBSIZE
)
1984 printk(KERN_ERR
"Spectra: device size not supported by this "
1989 nand_dbg_print(NAND_DBG_DEBUG
, "Dump timing register values:"
1990 "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
1991 "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
1992 "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
1993 ioread32(denali
->flash_reg
+ ACC_CLKS
),
1994 ioread32(denali
->flash_reg
+ RE_2_WE
),
1995 ioread32(denali
->flash_reg
+ WE_2_RE
),
1996 ioread32(denali
->flash_reg
+ ADDR_2_DATA
),
1997 ioread32(denali
->flash_reg
+ RDWR_EN_LO_CNT
),
1998 ioread32(denali
->flash_reg
+ RDWR_EN_HI_CNT
),
1999 ioread32(denali
->flash_reg
+ CS_SETUP_CNT
));
2001 denali
->mtd
.name
= "Denali NAND";
2002 denali
->mtd
.owner
= THIS_MODULE
;
2003 denali
->mtd
.priv
= &denali
->nand
;
2005 /* register the driver with the NAND core subsystem */
2006 denali
->nand
.select_chip
= denali_select_chip
;
2007 denali
->nand
.cmdfunc
= denali_cmdfunc
;
2008 denali
->nand
.read_byte
= denali_read_byte
;
2009 denali
->nand
.waitfunc
= denali_waitfunc
;
2011 /* scan for NAND devices attached to the controller
2012 * this is the first stage in a two step process to register
2013 * with the nand subsystem */
2014 if (nand_scan_ident(&denali
->mtd
, LLD_MAX_FLASH_BANKS
, NULL
))
2020 /* second stage of the NAND scan
2021 * this stage requires information regarding ECC and
2022 * bad block management. */
2024 /* Bad block management */
2025 denali
->nand
.bbt_td
= &bbt_main_descr
;
2026 denali
->nand
.bbt_md
= &bbt_mirror_descr
;
2028 /* skip the scan for now until we have OOB read and write support */
2029 denali
->nand
.options
|= NAND_USE_FLASH_BBT
| NAND_SKIP_BBTSCAN
;
2030 denali
->nand
.ecc
.mode
= NAND_ECC_HW_SYNDROME
;
2032 if (denali
->dev_info
.MLCDevice
)
2034 denali
->nand
.ecc
.layout
= &nand_oob_mlc_14bit
;
2035 denali
->nand
.ecc
.bytes
= ECC_BYTES_MLC
;
2039 denali
->nand
.ecc
.layout
= &nand_oob_slc
;
2040 denali
->nand
.ecc
.bytes
= ECC_BYTES_SLC
;
2043 /* These functions are required by the NAND core framework, otherwise,
2044 * the NAND core will assert. However, we don't need them, so we'll stub
2046 denali
->nand
.ecc
.calculate
= denali_ecc_calculate
;
2047 denali
->nand
.ecc
.correct
= denali_ecc_correct
;
2048 denali
->nand
.ecc
.hwctl
= denali_ecc_hwctl
;
2050 /* override the default read operations */
2051 denali
->nand
.ecc
.size
= denali
->mtd
.writesize
;
2052 denali
->nand
.ecc
.read_page
= denali_read_page
;
2053 denali
->nand
.ecc
.read_page_raw
= denali_read_page_raw
;
2054 denali
->nand
.ecc
.write_page
= denali_write_page
;
2055 denali
->nand
.ecc
.write_page_raw
= denali_write_page_raw
;
2056 denali
->nand
.ecc
.read_oob
= denali_read_oob
;
2057 denali
->nand
.ecc
.write_oob
= denali_write_oob
;
2058 denali
->nand
.erase_cmd
= denali_erase
;
2060 if (nand_scan_tail(&denali
->mtd
))
2066 ret
= add_mtd_device(&denali
->mtd
);
2068 printk(KERN_ERR
"Spectra: Failed to register MTD device: %d\n", ret
);
2074 denali_irq_cleanup(dev
->irq
, denali
);
2076 iounmap(denali
->flash_reg
);
2077 iounmap(denali
->flash_mem
);
2079 pci_release_regions(dev
);
2081 pci_unmap_single(dev
, denali
->buf
.dma_buf
, DENALI_BUF_SIZE
,
2082 PCI_DMA_BIDIRECTIONAL
);
2088 /* driver exit point */
2089 static void denali_pci_remove(struct pci_dev
*dev
)
2091 struct denali_nand_info
*denali
= pci_get_drvdata(dev
);
2093 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d, Function: %s\n",
2094 __FILE__
, __LINE__
, __func__
);
2096 nand_release(&denali
->mtd
);
2097 del_mtd_device(&denali
->mtd
);
2099 denali_irq_cleanup(dev
->irq
, denali
);
2101 iounmap(denali
->flash_reg
);
2102 iounmap(denali
->flash_mem
);
2103 pci_release_regions(dev
);
2104 pci_disable_device(dev
);
2105 pci_unmap_single(dev
, denali
->buf
.dma_buf
, DENALI_BUF_SIZE
,
2106 PCI_DMA_BIDIRECTIONAL
);
2107 pci_set_drvdata(dev
, NULL
);
2111 MODULE_DEVICE_TABLE(pci
, denali_pci_ids
);
2113 static struct pci_driver denali_pci_driver
= {
2114 .name
= DENALI_NAND_NAME
,
2115 .id_table
= denali_pci_ids
,
2116 .probe
= denali_pci_probe
,
2117 .remove
= denali_pci_remove
,
2120 static int __devinit
denali_init(void)
2122 printk(KERN_INFO
"Spectra MTD driver built on %s @ %s\n", __DATE__
, __TIME__
);
2123 return pci_register_driver(&denali_pci_driver
);
2127 static void __devexit
denali_exit(void)
2129 pci_unregister_driver(&denali_pci_driver
);
2132 module_init(denali_init
);
2133 module_exit(denali_exit
);