2 * Freescale GPMI NAND Flash Driver
4 * Copyright (C) 2010-2015 Freescale Semiconductor, Inc.
5 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 #include <linux/clk.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/mtd/partitions.h>
27 #include <linux/of_device.h>
28 #include "gpmi-nand.h"
31 /* Resource names for the GPMI NAND driver. */
32 #define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand"
33 #define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch"
34 #define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch"
36 /* add our owner bbt descriptor */
37 static uint8_t scan_ff_pattern
[] = { 0xff };
38 static struct nand_bbt_descr gpmi_bbt_descr
= {
42 .pattern
= scan_ff_pattern
46 * We may change the layout if we can get the ECC info from the datasheet,
47 * else we will use all the (page + OOB).
49 static int gpmi_ooblayout_ecc(struct mtd_info
*mtd
, int section
,
50 struct mtd_oob_region
*oobregion
)
52 struct nand_chip
*chip
= mtd_to_nand(mtd
);
53 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
54 struct bch_geometry
*geo
= &this->bch_geometry
;
59 oobregion
->offset
= 0;
60 oobregion
->length
= geo
->page_size
- mtd
->writesize
;
65 static int gpmi_ooblayout_free(struct mtd_info
*mtd
, int section
,
66 struct mtd_oob_region
*oobregion
)
68 struct nand_chip
*chip
= mtd_to_nand(mtd
);
69 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
70 struct bch_geometry
*geo
= &this->bch_geometry
;
75 /* The available oob size we have. */
76 if (geo
->page_size
< mtd
->writesize
+ mtd
->oobsize
) {
77 oobregion
->offset
= geo
->page_size
- mtd
->writesize
;
78 oobregion
->length
= mtd
->oobsize
- oobregion
->offset
;
84 static const struct mtd_ooblayout_ops gpmi_ooblayout_ops
= {
85 .ecc
= gpmi_ooblayout_ecc
,
86 .free
= gpmi_ooblayout_free
,
89 static const struct gpmi_devdata gpmi_devdata_imx23
= {
91 .bch_max_ecc_strength
= 20,
92 .max_chain_delay
= 16,
95 static const struct gpmi_devdata gpmi_devdata_imx28
= {
97 .bch_max_ecc_strength
= 20,
98 .max_chain_delay
= 16,
101 static const struct gpmi_devdata gpmi_devdata_imx6q
= {
103 .bch_max_ecc_strength
= 40,
104 .max_chain_delay
= 12,
107 static const struct gpmi_devdata gpmi_devdata_imx6sx
= {
109 .bch_max_ecc_strength
= 62,
110 .max_chain_delay
= 12,
113 static irqreturn_t
bch_irq(int irq
, void *cookie
)
115 struct gpmi_nand_data
*this = cookie
;
117 gpmi_clear_bch(this);
118 complete(&this->bch_done
);
123 * Calculate the ECC strength by hand:
124 * E : The ECC strength.
125 * G : the length of Galois Field.
126 * N : The chunk count of per page.
127 * O : the oobsize of the NAND chip.
128 * M : the metasize of per page.
132 * ------------ <= (O - M)
140 static inline int get_ecc_strength(struct gpmi_nand_data
*this)
142 struct bch_geometry
*geo
= &this->bch_geometry
;
143 struct mtd_info
*mtd
= nand_to_mtd(&this->nand
);
146 ecc_strength
= ((mtd
->oobsize
- geo
->metadata_size
) * 8)
147 / (geo
->gf_len
* geo
->ecc_chunk_count
);
149 /* We need the minor even number. */
150 return round_down(ecc_strength
, 2);
153 static inline bool gpmi_check_ecc(struct gpmi_nand_data
*this)
155 struct bch_geometry
*geo
= &this->bch_geometry
;
157 /* Do the sanity check. */
158 if (GPMI_IS_MX23(this) || GPMI_IS_MX28(this)) {
159 /* The mx23/mx28 only support the GF13. */
160 if (geo
->gf_len
== 14)
163 return geo
->ecc_strength
<= this->devdata
->bch_max_ecc_strength
;
167 * If we can get the ECC information from the nand chip, we do not
168 * need to calculate them ourselves.
170 * We may have available oob space in this case.
172 static int set_geometry_by_ecc_info(struct gpmi_nand_data
*this)
174 struct bch_geometry
*geo
= &this->bch_geometry
;
175 struct nand_chip
*chip
= &this->nand
;
176 struct mtd_info
*mtd
= nand_to_mtd(chip
);
177 unsigned int block_mark_bit_offset
;
179 if (!(chip
->ecc_strength_ds
> 0 && chip
->ecc_step_ds
> 0))
182 switch (chip
->ecc_step_ds
) {
191 "unsupported nand chip. ecc bits : %d, ecc size : %d\n",
192 chip
->ecc_strength_ds
, chip
->ecc_step_ds
);
195 geo
->ecc_chunk_size
= chip
->ecc_step_ds
;
196 geo
->ecc_strength
= round_up(chip
->ecc_strength_ds
, 2);
197 if (!gpmi_check_ecc(this))
200 /* Keep the C >= O */
201 if (geo
->ecc_chunk_size
< mtd
->oobsize
) {
203 "unsupported nand chip. ecc size: %d, oob size : %d\n",
204 chip
->ecc_step_ds
, mtd
->oobsize
);
208 /* The default value, see comment in the legacy_set_geometry(). */
209 geo
->metadata_size
= 10;
211 geo
->ecc_chunk_count
= mtd
->writesize
/ geo
->ecc_chunk_size
;
214 * Now, the NAND chip with 2K page(data chunk is 512byte) shows below:
217 * |<----------------------------------------------------->|
221 * |<-------------------------------------------->| D | | O' |
224 * +---+----------+-+----------+-+----------+-+----------+-+-----+
225 * | M | data |E| data |E| data |E| data |E| |
226 * +---+----------+-+----------+-+----------+-+----------+-+-----+
232 * P : the page size for BCH module.
233 * E : The ECC strength.
234 * G : the length of Galois Field.
235 * N : The chunk count of per page.
236 * M : the metasize of per page.
237 * C : the ecc chunk size, aka the "data" above.
238 * P': the nand chip's page size.
239 * O : the nand chip's oob size.
242 * The formula for P is :
245 * P = ------------ + P' + M
248 * The position of block mark moves forward in the ECC-based view
249 * of page, and the delta is:
252 * D = (---------------- + M)
255 * Please see the comment in legacy_set_geometry().
256 * With the condition C >= O , we still can get same result.
257 * So the bit position of the physical block mark within the ECC-based
258 * view of the page is :
261 geo
->page_size
= mtd
->writesize
+ geo
->metadata_size
+
262 (geo
->gf_len
* geo
->ecc_strength
* geo
->ecc_chunk_count
) / 8;
264 geo
->payload_size
= mtd
->writesize
;
266 geo
->auxiliary_status_offset
= ALIGN(geo
->metadata_size
, 4);
267 geo
->auxiliary_size
= ALIGN(geo
->metadata_size
, 4)
268 + ALIGN(geo
->ecc_chunk_count
, 4);
270 if (!this->swap_block_mark
)
274 block_mark_bit_offset
= mtd
->writesize
* 8 -
275 (geo
->ecc_strength
* geo
->gf_len
* (geo
->ecc_chunk_count
- 1)
276 + geo
->metadata_size
* 8);
278 geo
->block_mark_byte_offset
= block_mark_bit_offset
/ 8;
279 geo
->block_mark_bit_offset
= block_mark_bit_offset
% 8;
283 static int legacy_set_geometry(struct gpmi_nand_data
*this)
285 struct bch_geometry
*geo
= &this->bch_geometry
;
286 struct mtd_info
*mtd
= nand_to_mtd(&this->nand
);
287 unsigned int metadata_size
;
288 unsigned int status_size
;
289 unsigned int block_mark_bit_offset
;
292 * The size of the metadata can be changed, though we set it to 10
293 * bytes now. But it can't be too large, because we have to save
294 * enough space for BCH.
296 geo
->metadata_size
= 10;
298 /* The default for the length of Galois Field. */
301 /* The default for chunk size. */
302 geo
->ecc_chunk_size
= 512;
303 while (geo
->ecc_chunk_size
< mtd
->oobsize
) {
304 geo
->ecc_chunk_size
*= 2; /* keep C >= O */
308 geo
->ecc_chunk_count
= mtd
->writesize
/ geo
->ecc_chunk_size
;
310 /* We use the same ECC strength for all chunks. */
311 geo
->ecc_strength
= get_ecc_strength(this);
312 if (!gpmi_check_ecc(this)) {
314 "ecc strength: %d cannot be supported by the controller (%d)\n"
315 "try to use minimum ecc strength that NAND chip required\n",
317 this->devdata
->bch_max_ecc_strength
);
321 geo
->page_size
= mtd
->writesize
+ mtd
->oobsize
;
322 geo
->payload_size
= mtd
->writesize
;
325 * The auxiliary buffer contains the metadata and the ECC status. The
326 * metadata is padded to the nearest 32-bit boundary. The ECC status
327 * contains one byte for every ECC chunk, and is also padded to the
328 * nearest 32-bit boundary.
330 metadata_size
= ALIGN(geo
->metadata_size
, 4);
331 status_size
= ALIGN(geo
->ecc_chunk_count
, 4);
333 geo
->auxiliary_size
= metadata_size
+ status_size
;
334 geo
->auxiliary_status_offset
= metadata_size
;
336 if (!this->swap_block_mark
)
340 * We need to compute the byte and bit offsets of
341 * the physical block mark within the ECC-based view of the page.
343 * NAND chip with 2K page shows below:
349 * +---+----------+-+----------+-+----------+-+----------+-+
350 * | M | data |E| data |E| data |E| data |E|
351 * +---+----------+-+----------+-+----------+-+----------+-+
353 * The position of block mark moves forward in the ECC-based view
354 * of page, and the delta is:
357 * D = (---------------- + M)
360 * With the formula to compute the ECC strength, and the condition
361 * : C >= O (C is the ecc chunk size)
363 * It's easy to deduce to the following result:
365 * E * G (O - M) C - M C - M
366 * ----------- <= ------- <= -------- < ---------
372 * D = (---------------- + M) < C
375 * The above inequality means the position of block mark
376 * within the ECC-based view of the page is still in the data chunk,
377 * and it's NOT in the ECC bits of the chunk.
379 * Use the following to compute the bit position of the
380 * physical block mark within the ECC-based view of the page:
381 * (page_size - D) * 8
385 block_mark_bit_offset
= mtd
->writesize
* 8 -
386 (geo
->ecc_strength
* geo
->gf_len
* (geo
->ecc_chunk_count
- 1)
387 + geo
->metadata_size
* 8);
389 geo
->block_mark_byte_offset
= block_mark_bit_offset
/ 8;
390 geo
->block_mark_bit_offset
= block_mark_bit_offset
% 8;
394 int common_nfc_set_geometry(struct gpmi_nand_data
*this)
396 if ((of_property_read_bool(this->dev
->of_node
, "fsl,use-minimum-ecc"))
397 || legacy_set_geometry(this))
398 return set_geometry_by_ecc_info(this);
403 struct dma_chan
*get_dma_chan(struct gpmi_nand_data
*this)
405 /* We use the DMA channel 0 to access all the nand chips. */
406 return this->dma_chans
[0];
409 /* Can we use the upper's buffer directly for DMA? */
410 void prepare_data_dma(struct gpmi_nand_data
*this, enum dma_data_direction dr
)
412 struct scatterlist
*sgl
= &this->data_sgl
;
415 /* first try to map the upper buffer directly */
416 if (virt_addr_valid(this->upper_buf
) &&
417 !object_is_on_stack(this->upper_buf
)) {
418 sg_init_one(sgl
, this->upper_buf
, this->upper_len
);
419 ret
= dma_map_sg(this->dev
, sgl
, 1, dr
);
423 this->direct_dma_map_ok
= true;
428 /* We have to use our own DMA buffer. */
429 sg_init_one(sgl
, this->data_buffer_dma
, this->upper_len
);
431 if (dr
== DMA_TO_DEVICE
)
432 memcpy(this->data_buffer_dma
, this->upper_buf
, this->upper_len
);
434 dma_map_sg(this->dev
, sgl
, 1, dr
);
436 this->direct_dma_map_ok
= false;
439 /* This will be called after the DMA operation is finished. */
440 static void dma_irq_callback(void *param
)
442 struct gpmi_nand_data
*this = param
;
443 struct completion
*dma_c
= &this->dma_done
;
445 switch (this->dma_type
) {
446 case DMA_FOR_COMMAND
:
447 dma_unmap_sg(this->dev
, &this->cmd_sgl
, 1, DMA_TO_DEVICE
);
450 case DMA_FOR_READ_DATA
:
451 dma_unmap_sg(this->dev
, &this->data_sgl
, 1, DMA_FROM_DEVICE
);
452 if (this->direct_dma_map_ok
== false)
453 memcpy(this->upper_buf
, this->data_buffer_dma
,
457 case DMA_FOR_WRITE_DATA
:
458 dma_unmap_sg(this->dev
, &this->data_sgl
, 1, DMA_TO_DEVICE
);
461 case DMA_FOR_READ_ECC_PAGE
:
462 case DMA_FOR_WRITE_ECC_PAGE
:
463 /* We have to wait the BCH interrupt to finish. */
467 dev_err(this->dev
, "in wrong DMA operation.\n");
473 int start_dma_without_bch_irq(struct gpmi_nand_data
*this,
474 struct dma_async_tx_descriptor
*desc
)
476 struct completion
*dma_c
= &this->dma_done
;
477 unsigned long timeout
;
479 init_completion(dma_c
);
481 desc
->callback
= dma_irq_callback
;
482 desc
->callback_param
= this;
483 dmaengine_submit(desc
);
484 dma_async_issue_pending(get_dma_chan(this));
486 /* Wait for the interrupt from the DMA block. */
487 timeout
= wait_for_completion_timeout(dma_c
, msecs_to_jiffies(1000));
489 dev_err(this->dev
, "DMA timeout, last DMA :%d\n",
490 this->last_dma_type
);
491 gpmi_dump_info(this);
498 * This function is used in BCH reading or BCH writing pages.
499 * It will wait for the BCH interrupt as long as ONE second.
500 * Actually, we must wait for two interrupts :
501 * [1] firstly the DMA interrupt and
502 * [2] secondly the BCH interrupt.
504 int start_dma_with_bch_irq(struct gpmi_nand_data
*this,
505 struct dma_async_tx_descriptor
*desc
)
507 struct completion
*bch_c
= &this->bch_done
;
508 unsigned long timeout
;
510 /* Prepare to receive an interrupt from the BCH block. */
511 init_completion(bch_c
);
514 start_dma_without_bch_irq(this, desc
);
516 /* Wait for the interrupt from the BCH block. */
517 timeout
= wait_for_completion_timeout(bch_c
, msecs_to_jiffies(1000));
519 dev_err(this->dev
, "BCH timeout, last DMA :%d\n",
520 this->last_dma_type
);
521 gpmi_dump_info(this);
527 static int acquire_register_block(struct gpmi_nand_data
*this,
528 const char *res_name
)
530 struct platform_device
*pdev
= this->pdev
;
531 struct resources
*res
= &this->resources
;
535 r
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, res_name
);
536 p
= devm_ioremap_resource(&pdev
->dev
, r
);
540 if (!strcmp(res_name
, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME
))
542 else if (!strcmp(res_name
, GPMI_NAND_BCH_REGS_ADDR_RES_NAME
))
545 dev_err(this->dev
, "unknown resource name : %s\n", res_name
);
550 static int acquire_bch_irq(struct gpmi_nand_data
*this, irq_handler_t irq_h
)
552 struct platform_device
*pdev
= this->pdev
;
553 const char *res_name
= GPMI_NAND_BCH_INTERRUPT_RES_NAME
;
557 r
= platform_get_resource_byname(pdev
, IORESOURCE_IRQ
, res_name
);
559 dev_err(this->dev
, "Can't get resource for %s\n", res_name
);
563 err
= devm_request_irq(this->dev
, r
->start
, irq_h
, 0, res_name
, this);
565 dev_err(this->dev
, "error requesting BCH IRQ\n");
570 static void release_dma_channels(struct gpmi_nand_data
*this)
573 for (i
= 0; i
< DMA_CHANS
; i
++)
574 if (this->dma_chans
[i
]) {
575 dma_release_channel(this->dma_chans
[i
]);
576 this->dma_chans
[i
] = NULL
;
580 static int acquire_dma_channels(struct gpmi_nand_data
*this)
582 struct platform_device
*pdev
= this->pdev
;
583 struct dma_chan
*dma_chan
;
585 /* request dma channel */
586 dma_chan
= dma_request_slave_channel(&pdev
->dev
, "rx-tx");
588 dev_err(this->dev
, "Failed to request DMA channel.\n");
592 this->dma_chans
[0] = dma_chan
;
596 release_dma_channels(this);
600 static char *extra_clks_for_mx6q
[GPMI_CLK_MAX
] = {
601 "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
604 static int gpmi_get_clks(struct gpmi_nand_data
*this)
606 struct resources
*r
= &this->resources
;
607 char **extra_clks
= NULL
;
611 /* The main clock is stored in the first. */
612 r
->clock
[0] = devm_clk_get(this->dev
, "gpmi_io");
613 if (IS_ERR(r
->clock
[0])) {
614 err
= PTR_ERR(r
->clock
[0]);
618 /* Get extra clocks */
619 if (GPMI_IS_MX6(this))
620 extra_clks
= extra_clks_for_mx6q
;
624 for (i
= 1; i
< GPMI_CLK_MAX
; i
++) {
625 if (extra_clks
[i
- 1] == NULL
)
628 clk
= devm_clk_get(this->dev
, extra_clks
[i
- 1]);
637 if (GPMI_IS_MX6(this))
639 * Set the default value for the gpmi clock.
641 * If you want to use the ONFI nand which is in the
642 * Synchronous Mode, you should change the clock as you need.
644 clk_set_rate(r
->clock
[0], 22000000);
649 dev_dbg(this->dev
, "failed in finding the clocks.\n");
653 static int acquire_resources(struct gpmi_nand_data
*this)
657 ret
= acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME
);
661 ret
= acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME
);
665 ret
= acquire_bch_irq(this, bch_irq
);
669 ret
= acquire_dma_channels(this);
673 ret
= gpmi_get_clks(this);
679 release_dma_channels(this);
684 static void release_resources(struct gpmi_nand_data
*this)
686 release_dma_channels(this);
689 static int init_hardware(struct gpmi_nand_data
*this)
694 * This structure contains the "safe" GPMI timing that should succeed
695 * with any NAND Flash device
696 * (although, with less-than-optimal performance).
698 struct nand_timing safe_timing
= {
699 .data_setup_in_ns
= 80,
700 .data_hold_in_ns
= 60,
701 .address_setup_in_ns
= 25,
702 .gpmi_sample_delay_in_ns
= 6,
708 /* Initialize the hardwares. */
709 ret
= gpmi_init(this);
713 this->timing
= safe_timing
;
717 static int read_page_prepare(struct gpmi_nand_data
*this,
718 void *destination
, unsigned length
,
719 void *alt_virt
, dma_addr_t alt_phys
, unsigned alt_size
,
720 void **use_virt
, dma_addr_t
*use_phys
)
722 struct device
*dev
= this->dev
;
724 if (virt_addr_valid(destination
)) {
725 dma_addr_t dest_phys
;
727 dest_phys
= dma_map_single(dev
, destination
,
728 length
, DMA_FROM_DEVICE
);
729 if (dma_mapping_error(dev
, dest_phys
)) {
730 if (alt_size
< length
) {
731 dev_err(dev
, "Alternate buffer is too small\n");
736 *use_virt
= destination
;
737 *use_phys
= dest_phys
;
738 this->direct_dma_map_ok
= true;
743 *use_virt
= alt_virt
;
744 *use_phys
= alt_phys
;
745 this->direct_dma_map_ok
= false;
749 static inline void read_page_end(struct gpmi_nand_data
*this,
750 void *destination
, unsigned length
,
751 void *alt_virt
, dma_addr_t alt_phys
, unsigned alt_size
,
752 void *used_virt
, dma_addr_t used_phys
)
754 if (this->direct_dma_map_ok
)
755 dma_unmap_single(this->dev
, used_phys
, length
, DMA_FROM_DEVICE
);
758 static inline void read_page_swap_end(struct gpmi_nand_data
*this,
759 void *destination
, unsigned length
,
760 void *alt_virt
, dma_addr_t alt_phys
, unsigned alt_size
,
761 void *used_virt
, dma_addr_t used_phys
)
763 if (!this->direct_dma_map_ok
)
764 memcpy(destination
, alt_virt
, length
);
767 static int send_page_prepare(struct gpmi_nand_data
*this,
768 const void *source
, unsigned length
,
769 void *alt_virt
, dma_addr_t alt_phys
, unsigned alt_size
,
770 const void **use_virt
, dma_addr_t
*use_phys
)
772 struct device
*dev
= this->dev
;
774 if (virt_addr_valid(source
)) {
775 dma_addr_t source_phys
;
777 source_phys
= dma_map_single(dev
, (void *)source
, length
,
779 if (dma_mapping_error(dev
, source_phys
)) {
780 if (alt_size
< length
) {
781 dev_err(dev
, "Alternate buffer is too small\n");
787 *use_phys
= source_phys
;
792 * Copy the content of the source buffer into the alternate
793 * buffer and set up the return values accordingly.
795 memcpy(alt_virt
, source
, length
);
797 *use_virt
= alt_virt
;
798 *use_phys
= alt_phys
;
802 static void send_page_end(struct gpmi_nand_data
*this,
803 const void *source
, unsigned length
,
804 void *alt_virt
, dma_addr_t alt_phys
, unsigned alt_size
,
805 const void *used_virt
, dma_addr_t used_phys
)
807 struct device
*dev
= this->dev
;
808 if (used_virt
== source
)
809 dma_unmap_single(dev
, used_phys
, length
, DMA_TO_DEVICE
);
812 static void gpmi_free_dma_buffer(struct gpmi_nand_data
*this)
814 struct device
*dev
= this->dev
;
816 if (this->page_buffer_virt
&& virt_addr_valid(this->page_buffer_virt
))
817 dma_free_coherent(dev
, this->page_buffer_size
,
818 this->page_buffer_virt
,
819 this->page_buffer_phys
);
820 kfree(this->cmd_buffer
);
821 kfree(this->data_buffer_dma
);
822 kfree(this->raw_buffer
);
824 this->cmd_buffer
= NULL
;
825 this->data_buffer_dma
= NULL
;
826 this->raw_buffer
= NULL
;
827 this->page_buffer_virt
= NULL
;
828 this->page_buffer_size
= 0;
831 /* Allocate the DMA buffers */
832 static int gpmi_alloc_dma_buffer(struct gpmi_nand_data
*this)
834 struct bch_geometry
*geo
= &this->bch_geometry
;
835 struct device
*dev
= this->dev
;
836 struct mtd_info
*mtd
= nand_to_mtd(&this->nand
);
838 /* [1] Allocate a command buffer. PAGE_SIZE is enough. */
839 this->cmd_buffer
= kzalloc(PAGE_SIZE
, GFP_DMA
| GFP_KERNEL
);
840 if (this->cmd_buffer
== NULL
)
844 * [2] Allocate a read/write data buffer.
845 * The gpmi_alloc_dma_buffer can be called twice.
846 * We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer
847 * is called before the nand_scan_ident; and we allocate a buffer
848 * of the real NAND page size when the gpmi_alloc_dma_buffer is
849 * called after the nand_scan_ident.
851 this->data_buffer_dma
= kzalloc(mtd
->writesize
?: PAGE_SIZE
,
852 GFP_DMA
| GFP_KERNEL
);
853 if (this->data_buffer_dma
== NULL
)
857 * [3] Allocate the page buffer.
859 * Both the payload buffer and the auxiliary buffer must appear on
860 * 32-bit boundaries. We presume the size of the payload buffer is a
861 * power of two and is much larger than four, which guarantees the
862 * auxiliary buffer will appear on a 32-bit boundary.
864 this->page_buffer_size
= geo
->payload_size
+ geo
->auxiliary_size
;
865 this->page_buffer_virt
= dma_alloc_coherent(dev
, this->page_buffer_size
,
866 &this->page_buffer_phys
, GFP_DMA
);
867 if (!this->page_buffer_virt
)
870 this->raw_buffer
= kzalloc(mtd
->writesize
+ mtd
->oobsize
, GFP_KERNEL
);
871 if (!this->raw_buffer
)
874 /* Slice up the page buffer. */
875 this->payload_virt
= this->page_buffer_virt
;
876 this->payload_phys
= this->page_buffer_phys
;
877 this->auxiliary_virt
= this->payload_virt
+ geo
->payload_size
;
878 this->auxiliary_phys
= this->payload_phys
+ geo
->payload_size
;
882 gpmi_free_dma_buffer(this);
886 static void gpmi_cmd_ctrl(struct mtd_info
*mtd
, int data
, unsigned int ctrl
)
888 struct nand_chip
*chip
= mtd_to_nand(mtd
);
889 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
893 * Every operation begins with a command byte and a series of zero or
894 * more address bytes. These are distinguished by either the Address
895 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
896 * asserted. When MTD is ready to execute the command, it will deassert
897 * both latch enables.
899 * Rather than run a separate DMA operation for every single byte, we
900 * queue them up and run a single DMA operation for the entire series
901 * of command and data bytes. NAND_CMD_NONE means the END of the queue.
903 if ((ctrl
& (NAND_ALE
| NAND_CLE
))) {
904 if (data
!= NAND_CMD_NONE
)
905 this->cmd_buffer
[this->command_length
++] = data
;
909 if (!this->command_length
)
912 ret
= gpmi_send_command(this);
914 dev_err(this->dev
, "Chip: %u, Error %d\n",
915 this->current_chip
, ret
);
917 this->command_length
= 0;
920 static int gpmi_dev_ready(struct mtd_info
*mtd
)
922 struct nand_chip
*chip
= mtd_to_nand(mtd
);
923 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
925 return gpmi_is_ready(this, this->current_chip
);
928 static void gpmi_select_chip(struct mtd_info
*mtd
, int chipnr
)
930 struct nand_chip
*chip
= mtd_to_nand(mtd
);
931 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
933 if ((this->current_chip
< 0) && (chipnr
>= 0))
935 else if ((this->current_chip
>= 0) && (chipnr
< 0))
938 this->current_chip
= chipnr
;
941 static void gpmi_read_buf(struct mtd_info
*mtd
, uint8_t *buf
, int len
)
943 struct nand_chip
*chip
= mtd_to_nand(mtd
);
944 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
946 dev_dbg(this->dev
, "len is %d\n", len
);
947 this->upper_buf
= buf
;
948 this->upper_len
= len
;
950 gpmi_read_data(this);
953 static void gpmi_write_buf(struct mtd_info
*mtd
, const uint8_t *buf
, int len
)
955 struct nand_chip
*chip
= mtd_to_nand(mtd
);
956 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
958 dev_dbg(this->dev
, "len is %d\n", len
);
959 this->upper_buf
= (uint8_t *)buf
;
960 this->upper_len
= len
;
962 gpmi_send_data(this);
965 static uint8_t gpmi_read_byte(struct mtd_info
*mtd
)
967 struct nand_chip
*chip
= mtd_to_nand(mtd
);
968 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
969 uint8_t *buf
= this->data_buffer_dma
;
971 gpmi_read_buf(mtd
, buf
, 1);
976 * Handles block mark swapping.
977 * It can be called in swapping the block mark, or swapping it back,
978 * because the the operations are the same.
980 static void block_mark_swapping(struct gpmi_nand_data
*this,
981 void *payload
, void *auxiliary
)
983 struct bch_geometry
*nfc_geo
= &this->bch_geometry
;
988 unsigned char from_data
;
989 unsigned char from_oob
;
991 if (!this->swap_block_mark
)
995 * If control arrives here, we're swapping. Make some convenience
998 bit
= nfc_geo
->block_mark_bit_offset
;
999 p
= payload
+ nfc_geo
->block_mark_byte_offset
;
1003 * Get the byte from the data area that overlays the block mark. Since
1004 * the ECC engine applies its own view to the bits in the page, the
1005 * physical block mark won't (in general) appear on a byte boundary in
1008 from_data
= (p
[0] >> bit
) | (p
[1] << (8 - bit
));
1010 /* Get the byte from the OOB. */
1016 mask
= (0x1 << bit
) - 1;
1017 p
[0] = (p
[0] & mask
) | (from_oob
<< bit
);
1020 p
[1] = (p
[1] & mask
) | (from_oob
>> (8 - bit
));
1023 static int gpmi_ecc_read_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1024 uint8_t *buf
, int oob_required
, int page
)
1026 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
1027 struct bch_geometry
*nfc_geo
= &this->bch_geometry
;
1029 dma_addr_t payload_phys
;
1030 void *auxiliary_virt
;
1031 dma_addr_t auxiliary_phys
;
1033 unsigned char *status
;
1034 unsigned int max_bitflips
= 0;
1037 dev_dbg(this->dev
, "page number is : %d\n", page
);
1038 ret
= read_page_prepare(this, buf
, nfc_geo
->payload_size
,
1039 this->payload_virt
, this->payload_phys
,
1040 nfc_geo
->payload_size
,
1041 &payload_virt
, &payload_phys
);
1043 dev_err(this->dev
, "Inadequate DMA buffer\n");
1047 auxiliary_virt
= this->auxiliary_virt
;
1048 auxiliary_phys
= this->auxiliary_phys
;
1051 ret
= gpmi_read_page(this, payload_phys
, auxiliary_phys
);
1052 read_page_end(this, buf
, nfc_geo
->payload_size
,
1053 this->payload_virt
, this->payload_phys
,
1054 nfc_geo
->payload_size
,
1055 payload_virt
, payload_phys
);
1057 dev_err(this->dev
, "Error in ECC-based read: %d\n", ret
);
1061 /* handle the block mark swapping */
1062 block_mark_swapping(this, payload_virt
, auxiliary_virt
);
1064 /* Loop over status bytes, accumulating ECC status. */
1065 status
= auxiliary_virt
+ nfc_geo
->auxiliary_status_offset
;
1067 read_page_swap_end(this, buf
, nfc_geo
->payload_size
,
1068 this->payload_virt
, this->payload_phys
,
1069 nfc_geo
->payload_size
,
1070 payload_virt
, payload_phys
);
1072 for (i
= 0; i
< nfc_geo
->ecc_chunk_count
; i
++, status
++) {
1073 if ((*status
== STATUS_GOOD
) || (*status
== STATUS_ERASED
))
1076 if (*status
== STATUS_UNCORRECTABLE
) {
1077 int eccbits
= nfc_geo
->ecc_strength
* nfc_geo
->gf_len
;
1078 u8
*eccbuf
= this->raw_buffer
;
1079 int offset
, bitoffset
;
1083 /* Read ECC bytes into our internal raw_buffer */
1084 offset
= nfc_geo
->metadata_size
* 8;
1085 offset
+= ((8 * nfc_geo
->ecc_chunk_size
) + eccbits
) * (i
+ 1);
1087 bitoffset
= offset
% 8;
1088 eccbytes
= DIV_ROUND_UP(offset
+ eccbits
, 8);
1091 chip
->cmdfunc(mtd
, NAND_CMD_RNDOUT
, offset
, -1);
1092 chip
->read_buf(mtd
, eccbuf
, eccbytes
);
1095 * ECC data are not byte aligned and we may have
1096 * in-band data in the first and last byte of
1097 * eccbuf. Set non-eccbits to one so that
1098 * nand_check_erased_ecc_chunk() does not count them
1102 eccbuf
[0] |= GENMASK(bitoffset
- 1, 0);
1104 bitoffset
= (bitoffset
+ eccbits
) % 8;
1106 eccbuf
[eccbytes
- 1] |= GENMASK(7, bitoffset
);
1109 * The ECC hardware has an uncorrectable ECC status
1110 * code in case we have bitflips in an erased page. As
1111 * nothing was written into this subpage the ECC is
1112 * obviously wrong and we can not trust it. We assume
1113 * at this point that we are reading an erased page and
1114 * try to correct the bitflips in buffer up to
1115 * ecc_strength bitflips. If this is a page with random
1116 * data, we exceed this number of bitflips and have a
1117 * ECC failure. Otherwise we use the corrected buffer.
1120 /* The first block includes metadata */
1121 flips
= nand_check_erased_ecc_chunk(
1122 buf
+ i
* nfc_geo
->ecc_chunk_size
,
1123 nfc_geo
->ecc_chunk_size
,
1126 nfc_geo
->metadata_size
,
1127 nfc_geo
->ecc_strength
);
1129 flips
= nand_check_erased_ecc_chunk(
1130 buf
+ i
* nfc_geo
->ecc_chunk_size
,
1131 nfc_geo
->ecc_chunk_size
,
1134 nfc_geo
->ecc_strength
);
1138 max_bitflips
= max_t(unsigned int, max_bitflips
,
1140 mtd
->ecc_stats
.corrected
+= flips
;
1144 mtd
->ecc_stats
.failed
++;
1148 mtd
->ecc_stats
.corrected
+= *status
;
1149 max_bitflips
= max_t(unsigned int, max_bitflips
, *status
);
1154 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
1155 * for details about our policy for delivering the OOB.
1157 * We fill the caller's buffer with set bits, and then copy the
1158 * block mark to th caller's buffer. Note that, if block mark
1159 * swapping was necessary, it has already been done, so we can
1160 * rely on the first byte of the auxiliary buffer to contain
1163 memset(chip
->oob_poi
, ~0, mtd
->oobsize
);
1164 chip
->oob_poi
[0] = ((uint8_t *) auxiliary_virt
)[0];
1167 return max_bitflips
;
1170 /* Fake a virtual small page for the subpage read */
1171 static int gpmi_ecc_read_subpage(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1172 uint32_t offs
, uint32_t len
, uint8_t *buf
, int page
)
1174 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
1175 void __iomem
*bch_regs
= this->resources
.bch_regs
;
1176 struct bch_geometry old_geo
= this->bch_geometry
;
1177 struct bch_geometry
*geo
= &this->bch_geometry
;
1178 int size
= chip
->ecc
.size
; /* ECC chunk size */
1179 int meta
, n
, page_size
;
1180 u32 r1_old
, r2_old
, r1_new
, r2_new
;
1181 unsigned int max_bitflips
;
1182 int first
, last
, marker_pos
;
1183 int ecc_parity_size
;
1185 int old_swap_block_mark
= this->swap_block_mark
;
1187 /* The size of ECC parity */
1188 ecc_parity_size
= geo
->gf_len
* geo
->ecc_strength
/ 8;
1190 /* Align it with the chunk size */
1191 first
= offs
/ size
;
1192 last
= (offs
+ len
- 1) / size
;
1194 if (this->swap_block_mark
) {
1196 * Find the chunk which contains the Block Marker.
1197 * If this chunk is in the range of [first, last],
1198 * we have to read out the whole page.
1199 * Why? since we had swapped the data at the position of Block
1200 * Marker to the metadata which is bound with the chunk 0.
1202 marker_pos
= geo
->block_mark_byte_offset
/ size
;
1203 if (last
>= marker_pos
&& first
<= marker_pos
) {
1205 "page:%d, first:%d, last:%d, marker at:%d\n",
1206 page
, first
, last
, marker_pos
);
1207 return gpmi_ecc_read_page(mtd
, chip
, buf
, 0, page
);
1211 meta
= geo
->metadata_size
;
1213 col
= meta
+ (size
+ ecc_parity_size
) * first
;
1214 chip
->cmdfunc(mtd
, NAND_CMD_RNDOUT
, col
, -1);
1217 buf
= buf
+ first
* size
;
1220 /* Save the old environment */
1221 r1_old
= r1_new
= readl(bch_regs
+ HW_BCH_FLASH0LAYOUT0
);
1222 r2_old
= r2_new
= readl(bch_regs
+ HW_BCH_FLASH0LAYOUT1
);
1224 /* change the BCH registers and bch_geometry{} */
1225 n
= last
- first
+ 1;
1226 page_size
= meta
+ (size
+ ecc_parity_size
) * n
;
1228 r1_new
&= ~(BM_BCH_FLASH0LAYOUT0_NBLOCKS
|
1229 BM_BCH_FLASH0LAYOUT0_META_SIZE
);
1230 r1_new
|= BF_BCH_FLASH0LAYOUT0_NBLOCKS(n
- 1)
1231 | BF_BCH_FLASH0LAYOUT0_META_SIZE(meta
);
1232 writel(r1_new
, bch_regs
+ HW_BCH_FLASH0LAYOUT0
);
1234 r2_new
&= ~BM_BCH_FLASH0LAYOUT1_PAGE_SIZE
;
1235 r2_new
|= BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size
);
1236 writel(r2_new
, bch_regs
+ HW_BCH_FLASH0LAYOUT1
);
1238 geo
->ecc_chunk_count
= n
;
1239 geo
->payload_size
= n
* size
;
1240 geo
->page_size
= page_size
;
1241 geo
->auxiliary_status_offset
= ALIGN(meta
, 4);
1243 dev_dbg(this->dev
, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n",
1244 page
, offs
, len
, col
, first
, n
, page_size
);
1246 /* Read the subpage now */
1247 this->swap_block_mark
= false;
1248 max_bitflips
= gpmi_ecc_read_page(mtd
, chip
, buf
, 0, page
);
1251 writel(r1_old
, bch_regs
+ HW_BCH_FLASH0LAYOUT0
);
1252 writel(r2_old
, bch_regs
+ HW_BCH_FLASH0LAYOUT1
);
1253 this->bch_geometry
= old_geo
;
1254 this->swap_block_mark
= old_swap_block_mark
;
1256 return max_bitflips
;
1259 static int gpmi_ecc_write_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1260 const uint8_t *buf
, int oob_required
, int page
)
1262 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
1263 struct bch_geometry
*nfc_geo
= &this->bch_geometry
;
1264 const void *payload_virt
;
1265 dma_addr_t payload_phys
;
1266 const void *auxiliary_virt
;
1267 dma_addr_t auxiliary_phys
;
1270 dev_dbg(this->dev
, "ecc write page.\n");
1271 if (this->swap_block_mark
) {
1273 * If control arrives here, we're doing block mark swapping.
1274 * Since we can't modify the caller's buffers, we must copy them
1277 memcpy(this->payload_virt
, buf
, mtd
->writesize
);
1278 payload_virt
= this->payload_virt
;
1279 payload_phys
= this->payload_phys
;
1281 memcpy(this->auxiliary_virt
, chip
->oob_poi
,
1282 nfc_geo
->auxiliary_size
);
1283 auxiliary_virt
= this->auxiliary_virt
;
1284 auxiliary_phys
= this->auxiliary_phys
;
1286 /* Handle block mark swapping. */
1287 block_mark_swapping(this,
1288 (void *)payload_virt
, (void *)auxiliary_virt
);
1291 * If control arrives here, we're not doing block mark swapping,
1292 * so we can to try and use the caller's buffers.
1294 ret
= send_page_prepare(this,
1295 buf
, mtd
->writesize
,
1296 this->payload_virt
, this->payload_phys
,
1297 nfc_geo
->payload_size
,
1298 &payload_virt
, &payload_phys
);
1300 dev_err(this->dev
, "Inadequate payload DMA buffer\n");
1304 ret
= send_page_prepare(this,
1305 chip
->oob_poi
, mtd
->oobsize
,
1306 this->auxiliary_virt
, this->auxiliary_phys
,
1307 nfc_geo
->auxiliary_size
,
1308 &auxiliary_virt
, &auxiliary_phys
);
1310 dev_err(this->dev
, "Inadequate auxiliary DMA buffer\n");
1311 goto exit_auxiliary
;
1316 ret
= gpmi_send_page(this, payload_phys
, auxiliary_phys
);
1318 dev_err(this->dev
, "Error in ECC-based write: %d\n", ret
);
1320 if (!this->swap_block_mark
) {
1321 send_page_end(this, chip
->oob_poi
, mtd
->oobsize
,
1322 this->auxiliary_virt
, this->auxiliary_phys
,
1323 nfc_geo
->auxiliary_size
,
1324 auxiliary_virt
, auxiliary_phys
);
1326 send_page_end(this, buf
, mtd
->writesize
,
1327 this->payload_virt
, this->payload_phys
,
1328 nfc_geo
->payload_size
,
1329 payload_virt
, payload_phys
);
1336 * There are several places in this driver where we have to handle the OOB and
1337 * block marks. This is the function where things are the most complicated, so
1338 * this is where we try to explain it all. All the other places refer back to
1341 * These are the rules, in order of decreasing importance:
1343 * 1) Nothing the caller does can be allowed to imperil the block mark.
1345 * 2) In read operations, the first byte of the OOB we return must reflect the
1346 * true state of the block mark, no matter where that block mark appears in
1347 * the physical page.
1349 * 3) ECC-based read operations return an OOB full of set bits (since we never
1350 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
1353 * 4) "Raw" read operations return a direct view of the physical bytes in the
1354 * page, using the conventional definition of which bytes are data and which
1355 * are OOB. This gives the caller a way to see the actual, physical bytes
1356 * in the page, without the distortions applied by our ECC engine.
1359 * What we do for this specific read operation depends on two questions:
1361 * 1) Are we doing a "raw" read, or an ECC-based read?
1363 * 2) Are we using block mark swapping or transcription?
1365 * There are four cases, illustrated by the following Karnaugh map:
1367 * | Raw | ECC-based |
1368 * -------------+-------------------------+-------------------------+
1369 * | Read the conventional | |
1370 * | OOB at the end of the | |
1371 * Swapping | page and return it. It | |
1372 * | contains exactly what | |
1373 * | we want. | Read the block mark and |
1374 * -------------+-------------------------+ return it in a buffer |
1375 * | Read the conventional | full of set bits. |
1376 * | OOB at the end of the | |
1377 * | page and also the block | |
1378 * Transcribing | mark in the metadata. | |
1379 * | Copy the block mark | |
1380 * | into the first byte of | |
1382 * -------------+-------------------------+-------------------------+
1384 * Note that we break rule #4 in the Transcribing/Raw case because we're not
1385 * giving an accurate view of the actual, physical bytes in the page (we're
1386 * overwriting the block mark). That's OK because it's more important to follow
1389 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
1390 * easy. When reading a page, for example, the NAND Flash MTD code calls our
1391 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
1392 * ECC-based or raw view of the page is implicit in which function it calls
1393 * (there is a similar pair of ECC-based/raw functions for writing).
1395 static int gpmi_ecc_read_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1398 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
1400 dev_dbg(this->dev
, "page number is %d\n", page
);
1401 /* clear the OOB buffer */
1402 memset(chip
->oob_poi
, ~0, mtd
->oobsize
);
1404 /* Read out the conventional OOB. */
1405 chip
->cmdfunc(mtd
, NAND_CMD_READ0
, mtd
->writesize
, page
);
1406 chip
->read_buf(mtd
, chip
->oob_poi
, mtd
->oobsize
);
1409 * Now, we want to make sure the block mark is correct. In the
1410 * non-transcribing case (!GPMI_IS_MX23()), we already have it.
1411 * Otherwise, we need to explicitly read it.
1413 if (GPMI_IS_MX23(this)) {
1414 /* Read the block mark into the first byte of the OOB buffer. */
1415 chip
->cmdfunc(mtd
, NAND_CMD_READ0
, 0, page
);
1416 chip
->oob_poi
[0] = chip
->read_byte(mtd
);
1423 gpmi_ecc_write_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
, int page
)
1425 struct mtd_oob_region of
= { };
1428 /* Do we have available oob area? */
1429 mtd_ooblayout_free(mtd
, 0, &of
);
1433 if (!nand_is_slc(chip
))
1436 chip
->cmdfunc(mtd
, NAND_CMD_SEQIN
, mtd
->writesize
+ of
.offset
, page
);
1437 chip
->write_buf(mtd
, chip
->oob_poi
+ of
.offset
, of
.length
);
1438 chip
->cmdfunc(mtd
, NAND_CMD_PAGEPROG
, -1, -1);
1440 status
= chip
->waitfunc(mtd
, chip
);
1441 return status
& NAND_STATUS_FAIL
? -EIO
: 0;
1445 * This function reads a NAND page without involving the ECC engine (no HW
1447 * The tricky part in the GPMI/BCH controller is that it stores ECC bits
1448 * inline (interleaved with payload DATA), and do not align data chunk on
1450 * We thus need to take care moving the payload data and ECC bits stored in the
1451 * page into the provided buffers, which is why we're using gpmi_copy_bits.
1453 * See set_geometry_by_ecc_info inline comments to have a full description
1454 * of the layout used by the GPMI controller.
1456 static int gpmi_ecc_read_page_raw(struct mtd_info
*mtd
,
1457 struct nand_chip
*chip
, uint8_t *buf
,
1458 int oob_required
, int page
)
1460 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
1461 struct bch_geometry
*nfc_geo
= &this->bch_geometry
;
1462 int eccsize
= nfc_geo
->ecc_chunk_size
;
1463 int eccbits
= nfc_geo
->ecc_strength
* nfc_geo
->gf_len
;
1464 u8
*tmp_buf
= this->raw_buffer
;
1467 size_t oob_byte_off
;
1468 uint8_t *oob
= chip
->oob_poi
;
1471 chip
->read_buf(mtd
, tmp_buf
,
1472 mtd
->writesize
+ mtd
->oobsize
);
1475 * If required, swap the bad block marker and the data stored in the
1476 * metadata section, so that we don't wrongly consider a block as bad.
1478 * See the layout description for a detailed explanation on why this
1481 if (this->swap_block_mark
) {
1482 u8 swap
= tmp_buf
[0];
1484 tmp_buf
[0] = tmp_buf
[mtd
->writesize
];
1485 tmp_buf
[mtd
->writesize
] = swap
;
1489 * Copy the metadata section into the oob buffer (this section is
1490 * guaranteed to be aligned on a byte boundary).
1493 memcpy(oob
, tmp_buf
, nfc_geo
->metadata_size
);
1495 oob_bit_off
= nfc_geo
->metadata_size
* 8;
1496 src_bit_off
= oob_bit_off
;
1498 /* Extract interleaved payload data and ECC bits */
1499 for (step
= 0; step
< nfc_geo
->ecc_chunk_count
; step
++) {
1501 gpmi_copy_bits(buf
, step
* eccsize
* 8,
1502 tmp_buf
, src_bit_off
,
1504 src_bit_off
+= eccsize
* 8;
1506 /* Align last ECC block to align a byte boundary */
1507 if (step
== nfc_geo
->ecc_chunk_count
- 1 &&
1508 (oob_bit_off
+ eccbits
) % 8)
1509 eccbits
+= 8 - ((oob_bit_off
+ eccbits
) % 8);
1512 gpmi_copy_bits(oob
, oob_bit_off
,
1513 tmp_buf
, src_bit_off
,
1516 src_bit_off
+= eccbits
;
1517 oob_bit_off
+= eccbits
;
1521 oob_byte_off
= oob_bit_off
/ 8;
1523 if (oob_byte_off
< mtd
->oobsize
)
1524 memcpy(oob
+ oob_byte_off
,
1525 tmp_buf
+ mtd
->writesize
+ oob_byte_off
,
1526 mtd
->oobsize
- oob_byte_off
);
1533 * This function writes a NAND page without involving the ECC engine (no HW
1535 * The tricky part in the GPMI/BCH controller is that it stores ECC bits
1536 * inline (interleaved with payload DATA), and do not align data chunk on
1538 * We thus need to take care moving the OOB area at the right place in the
1539 * final page, which is why we're using gpmi_copy_bits.
1541 * See set_geometry_by_ecc_info inline comments to have a full description
1542 * of the layout used by the GPMI controller.
1544 static int gpmi_ecc_write_page_raw(struct mtd_info
*mtd
,
1545 struct nand_chip
*chip
,
1547 int oob_required
, int page
)
1549 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
1550 struct bch_geometry
*nfc_geo
= &this->bch_geometry
;
1551 int eccsize
= nfc_geo
->ecc_chunk_size
;
1552 int eccbits
= nfc_geo
->ecc_strength
* nfc_geo
->gf_len
;
1553 u8
*tmp_buf
= this->raw_buffer
;
1554 uint8_t *oob
= chip
->oob_poi
;
1557 size_t oob_byte_off
;
1561 * Initialize all bits to 1 in case we don't have a buffer for the
1562 * payload or oob data in order to leave unspecified bits of data
1563 * to their initial state.
1565 if (!buf
|| !oob_required
)
1566 memset(tmp_buf
, 0xff, mtd
->writesize
+ mtd
->oobsize
);
1569 * First copy the metadata section (stored in oob buffer) at the
1570 * beginning of the page, as imposed by the GPMI layout.
1572 memcpy(tmp_buf
, oob
, nfc_geo
->metadata_size
);
1573 oob_bit_off
= nfc_geo
->metadata_size
* 8;
1574 dst_bit_off
= oob_bit_off
;
1576 /* Interleave payload data and ECC bits */
1577 for (step
= 0; step
< nfc_geo
->ecc_chunk_count
; step
++) {
1579 gpmi_copy_bits(tmp_buf
, dst_bit_off
,
1580 buf
, step
* eccsize
* 8, eccsize
* 8);
1581 dst_bit_off
+= eccsize
* 8;
1583 /* Align last ECC block to align a byte boundary */
1584 if (step
== nfc_geo
->ecc_chunk_count
- 1 &&
1585 (oob_bit_off
+ eccbits
) % 8)
1586 eccbits
+= 8 - ((oob_bit_off
+ eccbits
) % 8);
1589 gpmi_copy_bits(tmp_buf
, dst_bit_off
,
1590 oob
, oob_bit_off
, eccbits
);
1592 dst_bit_off
+= eccbits
;
1593 oob_bit_off
+= eccbits
;
1596 oob_byte_off
= oob_bit_off
/ 8;
1598 if (oob_required
&& oob_byte_off
< mtd
->oobsize
)
1599 memcpy(tmp_buf
+ mtd
->writesize
+ oob_byte_off
,
1600 oob
+ oob_byte_off
, mtd
->oobsize
- oob_byte_off
);
1603 * If required, swap the bad block marker and the first byte of the
1604 * metadata section, so that we don't modify the bad block marker.
1606 * See the layout description for a detailed explanation on why this
1609 if (this->swap_block_mark
) {
1610 u8 swap
= tmp_buf
[0];
1612 tmp_buf
[0] = tmp_buf
[mtd
->writesize
];
1613 tmp_buf
[mtd
->writesize
] = swap
;
1616 chip
->write_buf(mtd
, tmp_buf
, mtd
->writesize
+ mtd
->oobsize
);
1621 static int gpmi_ecc_read_oob_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1624 chip
->cmdfunc(mtd
, NAND_CMD_READ0
, 0, page
);
1626 return gpmi_ecc_read_page_raw(mtd
, chip
, NULL
, 1, page
);
1629 static int gpmi_ecc_write_oob_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1632 chip
->cmdfunc(mtd
, NAND_CMD_SEQIN
, 0, page
);
1634 return gpmi_ecc_write_page_raw(mtd
, chip
, NULL
, 1, page
);
1637 static int gpmi_block_markbad(struct mtd_info
*mtd
, loff_t ofs
)
1639 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1640 struct gpmi_nand_data
*this = nand_get_controller_data(chip
);
1642 uint8_t *block_mark
;
1643 int column
, page
, status
, chipnr
;
1645 chipnr
= (int)(ofs
>> chip
->chip_shift
);
1646 chip
->select_chip(mtd
, chipnr
);
1648 column
= !GPMI_IS_MX23(this) ? mtd
->writesize
: 0;
1650 /* Write the block mark. */
1651 block_mark
= this->data_buffer_dma
;
1652 block_mark
[0] = 0; /* bad block marker */
1654 /* Shift to get page */
1655 page
= (int)(ofs
>> chip
->page_shift
);
1657 chip
->cmdfunc(mtd
, NAND_CMD_SEQIN
, column
, page
);
1658 chip
->write_buf(mtd
, block_mark
, 1);
1659 chip
->cmdfunc(mtd
, NAND_CMD_PAGEPROG
, -1, -1);
1661 status
= chip
->waitfunc(mtd
, chip
);
1662 if (status
& NAND_STATUS_FAIL
)
1665 chip
->select_chip(mtd
, -1);
1670 static int nand_boot_set_geometry(struct gpmi_nand_data
*this)
1672 struct boot_rom_geometry
*geometry
= &this->rom_geometry
;
1675 * Set the boot block stride size.
1677 * In principle, we should be reading this from the OTP bits, since
1678 * that's where the ROM is going to get it. In fact, we don't have any
1679 * way to read the OTP bits, so we go with the default and hope for the
1682 geometry
->stride_size_in_pages
= 64;
1685 * Set the search area stride exponent.
1687 * In principle, we should be reading this from the OTP bits, since
1688 * that's where the ROM is going to get it. In fact, we don't have any
1689 * way to read the OTP bits, so we go with the default and hope for the
1692 geometry
->search_area_stride_exponent
= 2;
1696 static const char *fingerprint
= "STMP";
1697 static int mx23_check_transcription_stamp(struct gpmi_nand_data
*this)
1699 struct boot_rom_geometry
*rom_geo
= &this->rom_geometry
;
1700 struct device
*dev
= this->dev
;
1701 struct nand_chip
*chip
= &this->nand
;
1702 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1703 unsigned int search_area_size_in_strides
;
1704 unsigned int stride
;
1706 uint8_t *buffer
= chip
->buffers
->databuf
;
1707 int saved_chip_number
;
1708 int found_an_ncb_fingerprint
= false;
1710 /* Compute the number of strides in a search area. */
1711 search_area_size_in_strides
= 1 << rom_geo
->search_area_stride_exponent
;
1713 saved_chip_number
= this->current_chip
;
1714 chip
->select_chip(mtd
, 0);
1717 * Loop through the first search area, looking for the NCB fingerprint.
1719 dev_dbg(dev
, "Scanning for an NCB fingerprint...\n");
1721 for (stride
= 0; stride
< search_area_size_in_strides
; stride
++) {
1722 /* Compute the page addresses. */
1723 page
= stride
* rom_geo
->stride_size_in_pages
;
1725 dev_dbg(dev
, "Looking for a fingerprint in page 0x%x\n", page
);
1728 * Read the NCB fingerprint. The fingerprint is four bytes long
1729 * and starts in the 12th byte of the page.
1731 chip
->cmdfunc(mtd
, NAND_CMD_READ0
, 12, page
);
1732 chip
->read_buf(mtd
, buffer
, strlen(fingerprint
));
1734 /* Look for the fingerprint. */
1735 if (!memcmp(buffer
, fingerprint
, strlen(fingerprint
))) {
1736 found_an_ncb_fingerprint
= true;
1742 chip
->select_chip(mtd
, saved_chip_number
);
1744 if (found_an_ncb_fingerprint
)
1745 dev_dbg(dev
, "\tFound a fingerprint\n");
1747 dev_dbg(dev
, "\tNo fingerprint found\n");
1748 return found_an_ncb_fingerprint
;
1751 /* Writes a transcription stamp. */
1752 static int mx23_write_transcription_stamp(struct gpmi_nand_data
*this)
1754 struct device
*dev
= this->dev
;
1755 struct boot_rom_geometry
*rom_geo
= &this->rom_geometry
;
1756 struct nand_chip
*chip
= &this->nand
;
1757 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1758 unsigned int block_size_in_pages
;
1759 unsigned int search_area_size_in_strides
;
1760 unsigned int search_area_size_in_pages
;
1761 unsigned int search_area_size_in_blocks
;
1763 unsigned int stride
;
1765 uint8_t *buffer
= chip
->buffers
->databuf
;
1766 int saved_chip_number
;
1769 /* Compute the search area geometry. */
1770 block_size_in_pages
= mtd
->erasesize
/ mtd
->writesize
;
1771 search_area_size_in_strides
= 1 << rom_geo
->search_area_stride_exponent
;
1772 search_area_size_in_pages
= search_area_size_in_strides
*
1773 rom_geo
->stride_size_in_pages
;
1774 search_area_size_in_blocks
=
1775 (search_area_size_in_pages
+ (block_size_in_pages
- 1)) /
1776 block_size_in_pages
;
1778 dev_dbg(dev
, "Search Area Geometry :\n");
1779 dev_dbg(dev
, "\tin Blocks : %u\n", search_area_size_in_blocks
);
1780 dev_dbg(dev
, "\tin Strides: %u\n", search_area_size_in_strides
);
1781 dev_dbg(dev
, "\tin Pages : %u\n", search_area_size_in_pages
);
1783 /* Select chip 0. */
1784 saved_chip_number
= this->current_chip
;
1785 chip
->select_chip(mtd
, 0);
1787 /* Loop over blocks in the first search area, erasing them. */
1788 dev_dbg(dev
, "Erasing the search area...\n");
1790 for (block
= 0; block
< search_area_size_in_blocks
; block
++) {
1791 /* Compute the page address. */
1792 page
= block
* block_size_in_pages
;
1794 /* Erase this block. */
1795 dev_dbg(dev
, "\tErasing block 0x%x\n", block
);
1796 chip
->cmdfunc(mtd
, NAND_CMD_ERASE1
, -1, page
);
1797 chip
->cmdfunc(mtd
, NAND_CMD_ERASE2
, -1, -1);
1799 /* Wait for the erase to finish. */
1800 status
= chip
->waitfunc(mtd
, chip
);
1801 if (status
& NAND_STATUS_FAIL
)
1802 dev_err(dev
, "[%s] Erase failed.\n", __func__
);
1805 /* Write the NCB fingerprint into the page buffer. */
1806 memset(buffer
, ~0, mtd
->writesize
);
1807 memcpy(buffer
+ 12, fingerprint
, strlen(fingerprint
));
1809 /* Loop through the first search area, writing NCB fingerprints. */
1810 dev_dbg(dev
, "Writing NCB fingerprints...\n");
1811 for (stride
= 0; stride
< search_area_size_in_strides
; stride
++) {
1812 /* Compute the page addresses. */
1813 page
= stride
* rom_geo
->stride_size_in_pages
;
1815 /* Write the first page of the current stride. */
1816 dev_dbg(dev
, "Writing an NCB fingerprint in page 0x%x\n", page
);
1817 chip
->cmdfunc(mtd
, NAND_CMD_SEQIN
, 0x00, page
);
1818 chip
->ecc
.write_page_raw(mtd
, chip
, buffer
, 0, page
);
1819 chip
->cmdfunc(mtd
, NAND_CMD_PAGEPROG
, -1, -1);
1821 /* Wait for the write to finish. */
1822 status
= chip
->waitfunc(mtd
, chip
);
1823 if (status
& NAND_STATUS_FAIL
)
1824 dev_err(dev
, "[%s] Write failed.\n", __func__
);
1827 /* Deselect chip 0. */
1828 chip
->select_chip(mtd
, saved_chip_number
);
1832 static int mx23_boot_init(struct gpmi_nand_data
*this)
1834 struct device
*dev
= this->dev
;
1835 struct nand_chip
*chip
= &this->nand
;
1836 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1837 unsigned int block_count
;
1846 * If control arrives here, we can't use block mark swapping, which
1847 * means we're forced to use transcription. First, scan for the
1848 * transcription stamp. If we find it, then we don't have to do
1849 * anything -- the block marks are already transcribed.
1851 if (mx23_check_transcription_stamp(this))
1855 * If control arrives here, we couldn't find a transcription stamp, so
1856 * so we presume the block marks are in the conventional location.
1858 dev_dbg(dev
, "Transcribing bad block marks...\n");
1860 /* Compute the number of blocks in the entire medium. */
1861 block_count
= chip
->chipsize
>> chip
->phys_erase_shift
;
1864 * Loop over all the blocks in the medium, transcribing block marks as
1867 for (block
= 0; block
< block_count
; block
++) {
1869 * Compute the chip, page and byte addresses for this block's
1870 * conventional mark.
1872 chipnr
= block
>> (chip
->chip_shift
- chip
->phys_erase_shift
);
1873 page
= block
<< (chip
->phys_erase_shift
- chip
->page_shift
);
1874 byte
= block
<< chip
->phys_erase_shift
;
1876 /* Send the command to read the conventional block mark. */
1877 chip
->select_chip(mtd
, chipnr
);
1878 chip
->cmdfunc(mtd
, NAND_CMD_READ0
, mtd
->writesize
, page
);
1879 block_mark
= chip
->read_byte(mtd
);
1880 chip
->select_chip(mtd
, -1);
1883 * Check if the block is marked bad. If so, we need to mark it
1884 * again, but this time the result will be a mark in the
1885 * location where we transcribe block marks.
1887 if (block_mark
!= 0xff) {
1888 dev_dbg(dev
, "Transcribing mark in block %u\n", block
);
1889 ret
= chip
->block_markbad(mtd
, byte
);
1892 "Failed to mark block bad with ret %d\n",
1897 /* Write the stamp that indicates we've transcribed the block marks. */
1898 mx23_write_transcription_stamp(this);
1902 static int nand_boot_init(struct gpmi_nand_data
*this)
1904 nand_boot_set_geometry(this);
1906 /* This is ROM arch-specific initilization before the BBT scanning. */
1907 if (GPMI_IS_MX23(this))
1908 return mx23_boot_init(this);
1912 static int gpmi_set_geometry(struct gpmi_nand_data
*this)
1916 /* Free the temporary DMA memory for reading ID. */
1917 gpmi_free_dma_buffer(this);
1919 /* Set up the NFC geometry which is used by BCH. */
1920 ret
= bch_set_geometry(this);
1922 dev_err(this->dev
, "Error setting BCH geometry : %d\n", ret
);
1926 /* Alloc the new DMA buffers according to the pagesize and oobsize */
1927 return gpmi_alloc_dma_buffer(this);
1930 static void gpmi_nand_exit(struct gpmi_nand_data
*this)
1932 nand_release(nand_to_mtd(&this->nand
));
1933 gpmi_free_dma_buffer(this);
1936 static int gpmi_init_last(struct gpmi_nand_data
*this)
1938 struct nand_chip
*chip
= &this->nand
;
1939 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1940 struct nand_ecc_ctrl
*ecc
= &chip
->ecc
;
1941 struct bch_geometry
*bch_geo
= &this->bch_geometry
;
1944 /* Set up the medium geometry */
1945 ret
= gpmi_set_geometry(this);
1949 /* Init the nand_ecc_ctrl{} */
1950 ecc
->read_page
= gpmi_ecc_read_page
;
1951 ecc
->write_page
= gpmi_ecc_write_page
;
1952 ecc
->read_oob
= gpmi_ecc_read_oob
;
1953 ecc
->write_oob
= gpmi_ecc_write_oob
;
1954 ecc
->read_page_raw
= gpmi_ecc_read_page_raw
;
1955 ecc
->write_page_raw
= gpmi_ecc_write_page_raw
;
1956 ecc
->read_oob_raw
= gpmi_ecc_read_oob_raw
;
1957 ecc
->write_oob_raw
= gpmi_ecc_write_oob_raw
;
1958 ecc
->mode
= NAND_ECC_HW
;
1959 ecc
->size
= bch_geo
->ecc_chunk_size
;
1960 ecc
->strength
= bch_geo
->ecc_strength
;
1961 mtd_set_ooblayout(mtd
, &gpmi_ooblayout_ops
);
1964 * We only enable the subpage read when:
1965 * (1) the chip is imx6, and
1966 * (2) the size of the ECC parity is byte aligned.
1968 if (GPMI_IS_MX6(this) &&
1969 ((bch_geo
->gf_len
* bch_geo
->ecc_strength
) % 8) == 0) {
1970 ecc
->read_subpage
= gpmi_ecc_read_subpage
;
1971 chip
->options
|= NAND_SUBPAGE_READ
;
1975 * Can we enable the extra features? such as EDO or Sync mode.
1977 * We do not check the return value now. That's means if we fail in
1978 * enable the extra features, we still can run in the normal way.
1980 gpmi_extra_init(this);
1985 static int gpmi_nand_init(struct gpmi_nand_data
*this)
1987 struct nand_chip
*chip
= &this->nand
;
1988 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1991 /* init current chip */
1992 this->current_chip
= -1;
1994 /* init the MTD data structures */
1995 mtd
->name
= "gpmi-nand";
1996 mtd
->dev
.parent
= this->dev
;
1998 /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
1999 nand_set_controller_data(chip
, this);
2000 nand_set_flash_node(chip
, this->pdev
->dev
.of_node
);
2001 chip
->select_chip
= gpmi_select_chip
;
2002 chip
->cmd_ctrl
= gpmi_cmd_ctrl
;
2003 chip
->dev_ready
= gpmi_dev_ready
;
2004 chip
->read_byte
= gpmi_read_byte
;
2005 chip
->read_buf
= gpmi_read_buf
;
2006 chip
->write_buf
= gpmi_write_buf
;
2007 chip
->badblock_pattern
= &gpmi_bbt_descr
;
2008 chip
->block_markbad
= gpmi_block_markbad
;
2009 chip
->options
|= NAND_NO_SUBPAGE_WRITE
;
2011 /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
2012 this->swap_block_mark
= !GPMI_IS_MX23(this);
2015 * Allocate a temporary DMA buffer for reading ID in the
2016 * nand_scan_ident().
2018 this->bch_geometry
.payload_size
= 1024;
2019 this->bch_geometry
.auxiliary_size
= 128;
2020 ret
= gpmi_alloc_dma_buffer(this);
2024 ret
= nand_scan_ident(mtd
, GPMI_IS_MX6(this) ? 2 : 1, NULL
);
2028 if (chip
->bbt_options
& NAND_BBT_USE_FLASH
) {
2029 chip
->bbt_options
|= NAND_BBT_NO_OOB
;
2031 if (of_property_read_bool(this->dev
->of_node
,
2032 "fsl,no-blockmark-swap"))
2033 this->swap_block_mark
= false;
2035 dev_dbg(this->dev
, "Blockmark swapping %sabled\n",
2036 this->swap_block_mark
? "en" : "dis");
2038 ret
= gpmi_init_last(this);
2042 chip
->options
|= NAND_SKIP_BBTSCAN
;
2043 ret
= nand_scan_tail(mtd
);
2047 ret
= nand_boot_init(this);
2050 ret
= chip
->scan_bbt(mtd
);
2054 ret
= mtd_device_register(mtd
, NULL
, 0);
2060 gpmi_nand_exit(this);
2064 static const struct of_device_id gpmi_nand_id_table
[] = {
2066 .compatible
= "fsl,imx23-gpmi-nand",
2067 .data
= &gpmi_devdata_imx23
,
2069 .compatible
= "fsl,imx28-gpmi-nand",
2070 .data
= &gpmi_devdata_imx28
,
2072 .compatible
= "fsl,imx6q-gpmi-nand",
2073 .data
= &gpmi_devdata_imx6q
,
2075 .compatible
= "fsl,imx6sx-gpmi-nand",
2076 .data
= &gpmi_devdata_imx6sx
,
2079 MODULE_DEVICE_TABLE(of
, gpmi_nand_id_table
);
2081 static int gpmi_nand_probe(struct platform_device
*pdev
)
2083 struct gpmi_nand_data
*this;
2084 const struct of_device_id
*of_id
;
2087 this = devm_kzalloc(&pdev
->dev
, sizeof(*this), GFP_KERNEL
);
2091 of_id
= of_match_device(gpmi_nand_id_table
, &pdev
->dev
);
2093 this->devdata
= of_id
->data
;
2095 dev_err(&pdev
->dev
, "Failed to find the right device id.\n");
2099 platform_set_drvdata(pdev
, this);
2101 this->dev
= &pdev
->dev
;
2103 ret
= acquire_resources(this);
2105 goto exit_acquire_resources
;
2107 ret
= init_hardware(this);
2111 ret
= gpmi_nand_init(this);
2115 dev_info(this->dev
, "driver registered.\n");
2120 release_resources(this);
2121 exit_acquire_resources
:
2126 static int gpmi_nand_remove(struct platform_device
*pdev
)
2128 struct gpmi_nand_data
*this = platform_get_drvdata(pdev
);
2130 gpmi_nand_exit(this);
2131 release_resources(this);
2135 #ifdef CONFIG_PM_SLEEP
2136 static int gpmi_pm_suspend(struct device
*dev
)
2138 struct gpmi_nand_data
*this = dev_get_drvdata(dev
);
2140 release_dma_channels(this);
2144 static int gpmi_pm_resume(struct device
*dev
)
2146 struct gpmi_nand_data
*this = dev_get_drvdata(dev
);
2149 ret
= acquire_dma_channels(this);
2153 /* re-init the GPMI registers */
2154 this->flags
&= ~GPMI_TIMING_INIT_OK
;
2155 ret
= gpmi_init(this);
2157 dev_err(this->dev
, "Error setting GPMI : %d\n", ret
);
2161 /* re-init the BCH registers */
2162 ret
= bch_set_geometry(this);
2164 dev_err(this->dev
, "Error setting BCH : %d\n", ret
);
2168 /* re-init others */
2169 gpmi_extra_init(this);
2173 #endif /* CONFIG_PM_SLEEP */
2175 static const struct dev_pm_ops gpmi_pm_ops
= {
2176 SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend
, gpmi_pm_resume
)
2179 static struct platform_driver gpmi_nand_driver
= {
2181 .name
= "gpmi-nand",
2183 .of_match_table
= gpmi_nand_id_table
,
2185 .probe
= gpmi_nand_probe
,
2186 .remove
= gpmi_nand_remove
,
2188 module_platform_driver(gpmi_nand_driver
);
2190 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
2191 MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
2192 MODULE_LICENSE("GPL");