2 * Copyright © 2010-2015 Broadcom Corporation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/version.h>
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/delay.h>
18 #include <linux/device.h>
19 #include <linux/platform_device.h>
20 #include <linux/err.h>
21 #include <linux/completion.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/ioport.h>
26 #include <linux/bug.h>
27 #include <linux/kernel.h>
28 #include <linux/bitops.h>
30 #include <linux/mtd/mtd.h>
31 #include <linux/mtd/nand.h>
32 #include <linux/mtd/partitions.h>
34 #include <linux/of_mtd.h>
35 #include <linux/of_platform.h>
36 #include <linux/slab.h>
37 #include <linux/list.h>
38 #include <linux/log2.h>
43 * This flag controls if WP stays on between erase/write commands to mitigate
44 * flash corruption due to power glitches. Values:
45 * 0: NAND_WP is not used or not available
46 * 1: NAND_WP is set by default, cleared for erase/write operations
47 * 2: NAND_WP is always cleared
50 module_param(wp_on
, int, 0444);
52 /***********************************************************************
54 ***********************************************************************/
56 #define DRV_NAME "brcmnand"
59 #define CMD_PAGE_READ 0x01
60 #define CMD_SPARE_AREA_READ 0x02
61 #define CMD_STATUS_READ 0x03
62 #define CMD_PROGRAM_PAGE 0x04
63 #define CMD_PROGRAM_SPARE_AREA 0x05
64 #define CMD_COPY_BACK 0x06
65 #define CMD_DEVICE_ID_READ 0x07
66 #define CMD_BLOCK_ERASE 0x08
67 #define CMD_FLASH_RESET 0x09
68 #define CMD_BLOCKS_LOCK 0x0a
69 #define CMD_BLOCKS_LOCK_DOWN 0x0b
70 #define CMD_BLOCKS_UNLOCK 0x0c
71 #define CMD_READ_BLOCKS_LOCK_STATUS 0x0d
72 #define CMD_PARAMETER_READ 0x0e
73 #define CMD_PARAMETER_CHANGE_COL 0x0f
74 #define CMD_LOW_LEVEL_OP 0x10
76 struct brcm_nand_dma_desc
{
91 /* Bitfields for brcm_nand_dma_desc::status_valid */
92 #define FLASH_DMA_ECC_ERROR (1 << 8)
93 #define FLASH_DMA_CORR_ERROR (1 << 9)
95 /* 512B flash cache in the NAND controller HW */
98 #define FC_WORDS (FC_BYTES >> 2)
100 #define BRCMNAND_MIN_PAGESIZE 512
101 #define BRCMNAND_MIN_BLOCKSIZE (8 * 1024)
102 #define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024)
104 /* Controller feature flags */
106 BRCMNAND_HAS_1K_SECTORS
= BIT(0),
107 BRCMNAND_HAS_PREFETCH
= BIT(1),
108 BRCMNAND_HAS_CACHE_MODE
= BIT(2),
109 BRCMNAND_HAS_WP
= BIT(3),
112 struct brcmnand_controller
{
114 struct nand_hw_control controller
;
115 void __iomem
*nand_base
;
116 void __iomem
*nand_fc
; /* flash cache */
117 void __iomem
*flash_dma_base
;
119 unsigned int dma_irq
;
122 /* Some SoCs provide custom interrupt status register(s) */
123 struct brcmnand_soc
*soc
;
127 struct completion done
;
128 struct completion dma_done
;
130 /* List of NAND hosts (one for each chip-select) */
131 struct list_head host_list
;
133 struct brcm_nand_dma_desc
*dma_desc
;
136 /* in-memory cache of the FLASH_CACHE, used only for some commands */
137 u32 flash_cache
[FC_WORDS
];
139 /* Controller revision details */
140 const u16
*reg_offsets
;
141 unsigned int reg_spacing
; /* between CS1, CS2, ... regs */
142 const u8
*cs_offsets
; /* within each chip-select */
143 const u8
*cs0_offsets
; /* within CS0, if different */
144 unsigned int max_block_size
;
145 const unsigned int *block_sizes
;
146 unsigned int max_page_size
;
147 const unsigned int *page_sizes
;
148 unsigned int max_oob
;
151 /* for low-power standby/resume only */
152 u32 nand_cs_nand_select
;
153 u32 nand_cs_nand_xor
;
154 u32 corr_stat_threshold
;
158 struct brcmnand_cfg
{
160 unsigned int block_size
;
161 unsigned int page_size
;
162 unsigned int spare_area_size
;
163 unsigned int device_width
;
164 unsigned int col_adr_bytes
;
165 unsigned int blk_adr_bytes
;
166 unsigned int ful_adr_bytes
;
167 unsigned int sector_size_1k
;
168 unsigned int ecc_level
;
169 /* use for low-power standby/resume only */
177 struct brcmnand_host
{
178 struct list_head node
;
179 struct device_node
*of_node
;
181 struct nand_chip chip
;
183 struct platform_device
*pdev
;
186 unsigned int last_cmd
;
187 unsigned int last_byte
;
189 struct brcmnand_cfg hwcfg
;
190 struct brcmnand_controller
*ctrl
;
194 BRCMNAND_CMD_START
= 0,
195 BRCMNAND_CMD_EXT_ADDRESS
,
196 BRCMNAND_CMD_ADDRESS
,
197 BRCMNAND_INTFC_STATUS
,
202 BRCMNAND_CS1_BASE
, /* CS1 regs, if non-contiguous */
203 BRCMNAND_CORR_THRESHOLD
,
204 BRCMNAND_CORR_THRESHOLD_EXT
,
205 BRCMNAND_UNCORR_COUNT
,
207 BRCMNAND_CORR_EXT_ADDR
,
209 BRCMNAND_UNCORR_EXT_ADDR
,
210 BRCMNAND_UNCORR_ADDR
,
215 BRCMNAND_OOB_READ_BASE
,
216 BRCMNAND_OOB_READ_10_BASE
, /* offset 0x10, if non-contiguous */
217 BRCMNAND_OOB_WRITE_BASE
,
218 BRCMNAND_OOB_WRITE_10_BASE
, /* offset 0x10, if non-contiguous */
223 static const u16 brcmnand_regs_v40
[] = {
224 [BRCMNAND_CMD_START
] = 0x04,
225 [BRCMNAND_CMD_EXT_ADDRESS
] = 0x08,
226 [BRCMNAND_CMD_ADDRESS
] = 0x0c,
227 [BRCMNAND_INTFC_STATUS
] = 0x6c,
228 [BRCMNAND_CS_SELECT
] = 0x14,
229 [BRCMNAND_CS_XOR
] = 0x18,
230 [BRCMNAND_LL_OP
] = 0x178,
231 [BRCMNAND_CS0_BASE
] = 0x40,
232 [BRCMNAND_CS1_BASE
] = 0xd0,
233 [BRCMNAND_CORR_THRESHOLD
] = 0x84,
234 [BRCMNAND_CORR_THRESHOLD_EXT
] = 0,
235 [BRCMNAND_UNCORR_COUNT
] = 0,
236 [BRCMNAND_CORR_COUNT
] = 0,
237 [BRCMNAND_CORR_EXT_ADDR
] = 0x70,
238 [BRCMNAND_CORR_ADDR
] = 0x74,
239 [BRCMNAND_UNCORR_EXT_ADDR
] = 0x78,
240 [BRCMNAND_UNCORR_ADDR
] = 0x7c,
241 [BRCMNAND_SEMAPHORE
] = 0x58,
242 [BRCMNAND_ID
] = 0x60,
243 [BRCMNAND_ID_EXT
] = 0x64,
244 [BRCMNAND_LL_RDATA
] = 0x17c,
245 [BRCMNAND_OOB_READ_BASE
] = 0x20,
246 [BRCMNAND_OOB_READ_10_BASE
] = 0x130,
247 [BRCMNAND_OOB_WRITE_BASE
] = 0x30,
248 [BRCMNAND_OOB_WRITE_10_BASE
] = 0,
249 [BRCMNAND_FC_BASE
] = 0x200,
253 static const u16 brcmnand_regs_v50
[] = {
254 [BRCMNAND_CMD_START
] = 0x04,
255 [BRCMNAND_CMD_EXT_ADDRESS
] = 0x08,
256 [BRCMNAND_CMD_ADDRESS
] = 0x0c,
257 [BRCMNAND_INTFC_STATUS
] = 0x6c,
258 [BRCMNAND_CS_SELECT
] = 0x14,
259 [BRCMNAND_CS_XOR
] = 0x18,
260 [BRCMNAND_LL_OP
] = 0x178,
261 [BRCMNAND_CS0_BASE
] = 0x40,
262 [BRCMNAND_CS1_BASE
] = 0xd0,
263 [BRCMNAND_CORR_THRESHOLD
] = 0x84,
264 [BRCMNAND_CORR_THRESHOLD_EXT
] = 0,
265 [BRCMNAND_UNCORR_COUNT
] = 0,
266 [BRCMNAND_CORR_COUNT
] = 0,
267 [BRCMNAND_CORR_EXT_ADDR
] = 0x70,
268 [BRCMNAND_CORR_ADDR
] = 0x74,
269 [BRCMNAND_UNCORR_EXT_ADDR
] = 0x78,
270 [BRCMNAND_UNCORR_ADDR
] = 0x7c,
271 [BRCMNAND_SEMAPHORE
] = 0x58,
272 [BRCMNAND_ID
] = 0x60,
273 [BRCMNAND_ID_EXT
] = 0x64,
274 [BRCMNAND_LL_RDATA
] = 0x17c,
275 [BRCMNAND_OOB_READ_BASE
] = 0x20,
276 [BRCMNAND_OOB_READ_10_BASE
] = 0x130,
277 [BRCMNAND_OOB_WRITE_BASE
] = 0x30,
278 [BRCMNAND_OOB_WRITE_10_BASE
] = 0x140,
279 [BRCMNAND_FC_BASE
] = 0x200,
282 /* BRCMNAND v6.0 - v7.1 */
283 static const u16 brcmnand_regs_v60
[] = {
284 [BRCMNAND_CMD_START
] = 0x04,
285 [BRCMNAND_CMD_EXT_ADDRESS
] = 0x08,
286 [BRCMNAND_CMD_ADDRESS
] = 0x0c,
287 [BRCMNAND_INTFC_STATUS
] = 0x14,
288 [BRCMNAND_CS_SELECT
] = 0x18,
289 [BRCMNAND_CS_XOR
] = 0x1c,
290 [BRCMNAND_LL_OP
] = 0x20,
291 [BRCMNAND_CS0_BASE
] = 0x50,
292 [BRCMNAND_CS1_BASE
] = 0,
293 [BRCMNAND_CORR_THRESHOLD
] = 0xc0,
294 [BRCMNAND_CORR_THRESHOLD_EXT
] = 0xc4,
295 [BRCMNAND_UNCORR_COUNT
] = 0xfc,
296 [BRCMNAND_CORR_COUNT
] = 0x100,
297 [BRCMNAND_CORR_EXT_ADDR
] = 0x10c,
298 [BRCMNAND_CORR_ADDR
] = 0x110,
299 [BRCMNAND_UNCORR_EXT_ADDR
] = 0x114,
300 [BRCMNAND_UNCORR_ADDR
] = 0x118,
301 [BRCMNAND_SEMAPHORE
] = 0x150,
302 [BRCMNAND_ID
] = 0x194,
303 [BRCMNAND_ID_EXT
] = 0x198,
304 [BRCMNAND_LL_RDATA
] = 0x19c,
305 [BRCMNAND_OOB_READ_BASE
] = 0x200,
306 [BRCMNAND_OOB_READ_10_BASE
] = 0,
307 [BRCMNAND_OOB_WRITE_BASE
] = 0x280,
308 [BRCMNAND_OOB_WRITE_10_BASE
] = 0,
309 [BRCMNAND_FC_BASE
] = 0x400,
312 enum brcmnand_cs_reg
{
313 BRCMNAND_CS_CFG_EXT
= 0,
315 BRCMNAND_CS_ACC_CONTROL
,
320 /* Per chip-select offsets for v7.1 */
321 static const u8 brcmnand_cs_offsets_v71
[] = {
322 [BRCMNAND_CS_ACC_CONTROL
] = 0x00,
323 [BRCMNAND_CS_CFG_EXT
] = 0x04,
324 [BRCMNAND_CS_CFG
] = 0x08,
325 [BRCMNAND_CS_TIMING1
] = 0x0c,
326 [BRCMNAND_CS_TIMING2
] = 0x10,
329 /* Per chip-select offsets for pre v7.1, except CS0 on <= v5.0 */
330 static const u8 brcmnand_cs_offsets
[] = {
331 [BRCMNAND_CS_ACC_CONTROL
] = 0x00,
332 [BRCMNAND_CS_CFG_EXT
] = 0x04,
333 [BRCMNAND_CS_CFG
] = 0x04,
334 [BRCMNAND_CS_TIMING1
] = 0x08,
335 [BRCMNAND_CS_TIMING2
] = 0x0c,
338 /* Per chip-select offset for <= v5.0 on CS0 only */
339 static const u8 brcmnand_cs_offsets_cs0
[] = {
340 [BRCMNAND_CS_ACC_CONTROL
] = 0x00,
341 [BRCMNAND_CS_CFG_EXT
] = 0x08,
342 [BRCMNAND_CS_CFG
] = 0x08,
343 [BRCMNAND_CS_TIMING1
] = 0x10,
344 [BRCMNAND_CS_TIMING2
] = 0x14,
347 /* BRCMNAND_INTFC_STATUS */
349 INTFC_FLASH_STATUS
= GENMASK(7, 0),
351 INTFC_ERASED
= BIT(27),
352 INTFC_OOB_VALID
= BIT(28),
353 INTFC_CACHE_VALID
= BIT(29),
354 INTFC_FLASH_READY
= BIT(30),
355 INTFC_CTLR_READY
= BIT(31),
358 static inline u32
nand_readreg(struct brcmnand_controller
*ctrl
, u32 offs
)
360 return brcmnand_readl(ctrl
->nand_base
+ offs
);
363 static inline void nand_writereg(struct brcmnand_controller
*ctrl
, u32 offs
,
366 brcmnand_writel(val
, ctrl
->nand_base
+ offs
);
369 static int brcmnand_revision_init(struct brcmnand_controller
*ctrl
)
371 static const unsigned int block_sizes_v6
[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 };
372 static const unsigned int block_sizes_v4
[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 };
373 static const unsigned int page_sizes
[] = { 512, 2048, 4096, 8192, 0 };
375 ctrl
->nand_version
= nand_readreg(ctrl
, 0) & 0xffff;
377 /* Only support v4.0+? */
378 if (ctrl
->nand_version
< 0x0400) {
379 dev_err(ctrl
->dev
, "version %#x not supported\n",
384 /* Register offsets */
385 if (ctrl
->nand_version
>= 0x0600)
386 ctrl
->reg_offsets
= brcmnand_regs_v60
;
387 else if (ctrl
->nand_version
>= 0x0500)
388 ctrl
->reg_offsets
= brcmnand_regs_v50
;
389 else if (ctrl
->nand_version
>= 0x0400)
390 ctrl
->reg_offsets
= brcmnand_regs_v40
;
392 /* Chip-select stride */
393 if (ctrl
->nand_version
>= 0x0701)
394 ctrl
->reg_spacing
= 0x14;
396 ctrl
->reg_spacing
= 0x10;
398 /* Per chip-select registers */
399 if (ctrl
->nand_version
>= 0x0701) {
400 ctrl
->cs_offsets
= brcmnand_cs_offsets_v71
;
402 ctrl
->cs_offsets
= brcmnand_cs_offsets
;
404 /* v5.0 and earlier has a different CS0 offset layout */
405 if (ctrl
->nand_version
<= 0x0500)
406 ctrl
->cs0_offsets
= brcmnand_cs_offsets_cs0
;
409 /* Page / block sizes */
410 if (ctrl
->nand_version
>= 0x0701) {
411 /* >= v7.1 use nice power-of-2 values! */
412 ctrl
->max_page_size
= 16 * 1024;
413 ctrl
->max_block_size
= 2 * 1024 * 1024;
415 ctrl
->page_sizes
= page_sizes
;
416 if (ctrl
->nand_version
>= 0x0600)
417 ctrl
->block_sizes
= block_sizes_v6
;
419 ctrl
->block_sizes
= block_sizes_v4
;
421 if (ctrl
->nand_version
< 0x0400) {
422 ctrl
->max_page_size
= 4096;
423 ctrl
->max_block_size
= 512 * 1024;
427 /* Maximum spare area sector size (per 512B) */
428 if (ctrl
->nand_version
>= 0x0600)
430 else if (ctrl
->nand_version
>= 0x0500)
435 /* v6.0 and newer (except v6.1) have prefetch support */
436 if (ctrl
->nand_version
>= 0x0600 && ctrl
->nand_version
!= 0x0601)
437 ctrl
->features
|= BRCMNAND_HAS_PREFETCH
;
440 * v6.x has cache mode, but it's implemented differently. Ignore it for
443 if (ctrl
->nand_version
>= 0x0700)
444 ctrl
->features
|= BRCMNAND_HAS_CACHE_MODE
;
446 if (ctrl
->nand_version
>= 0x0500)
447 ctrl
->features
|= BRCMNAND_HAS_1K_SECTORS
;
449 if (ctrl
->nand_version
>= 0x0700)
450 ctrl
->features
|= BRCMNAND_HAS_WP
;
451 else if (of_property_read_bool(ctrl
->dev
->of_node
, "brcm,nand-has-wp"))
452 ctrl
->features
|= BRCMNAND_HAS_WP
;
457 static inline u32
brcmnand_read_reg(struct brcmnand_controller
*ctrl
,
458 enum brcmnand_reg reg
)
460 u16 offs
= ctrl
->reg_offsets
[reg
];
463 return nand_readreg(ctrl
, offs
);
468 static inline void brcmnand_write_reg(struct brcmnand_controller
*ctrl
,
469 enum brcmnand_reg reg
, u32 val
)
471 u16 offs
= ctrl
->reg_offsets
[reg
];
474 nand_writereg(ctrl
, offs
, val
);
477 static inline void brcmnand_rmw_reg(struct brcmnand_controller
*ctrl
,
478 enum brcmnand_reg reg
, u32 mask
, unsigned
481 u32 tmp
= brcmnand_read_reg(ctrl
, reg
);
485 brcmnand_write_reg(ctrl
, reg
, tmp
);
488 static inline u32
brcmnand_read_fc(struct brcmnand_controller
*ctrl
, int word
)
490 return __raw_readl(ctrl
->nand_fc
+ word
* 4);
493 static inline void brcmnand_write_fc(struct brcmnand_controller
*ctrl
,
496 __raw_writel(val
, ctrl
->nand_fc
+ word
* 4);
499 static inline u16
brcmnand_cs_offset(struct brcmnand_controller
*ctrl
, int cs
,
500 enum brcmnand_cs_reg reg
)
502 u16 offs_cs0
= ctrl
->reg_offsets
[BRCMNAND_CS0_BASE
];
503 u16 offs_cs1
= ctrl
->reg_offsets
[BRCMNAND_CS1_BASE
];
506 if (cs
== 0 && ctrl
->cs0_offsets
)
507 cs_offs
= ctrl
->cs0_offsets
[reg
];
509 cs_offs
= ctrl
->cs_offsets
[reg
];
512 return offs_cs1
+ (cs
- 1) * ctrl
->reg_spacing
+ cs_offs
;
514 return offs_cs0
+ cs
* ctrl
->reg_spacing
+ cs_offs
;
517 static inline u32
brcmnand_count_corrected(struct brcmnand_controller
*ctrl
)
519 if (ctrl
->nand_version
< 0x0600)
521 return brcmnand_read_reg(ctrl
, BRCMNAND_CORR_COUNT
);
524 static void brcmnand_wr_corr_thresh(struct brcmnand_host
*host
, u8 val
)
526 struct brcmnand_controller
*ctrl
= host
->ctrl
;
527 unsigned int shift
= 0, bits
;
528 enum brcmnand_reg reg
= BRCMNAND_CORR_THRESHOLD
;
531 if (ctrl
->nand_version
>= 0x0600)
533 else if (ctrl
->nand_version
>= 0x0500)
538 if (ctrl
->nand_version
>= 0x0600) {
540 reg
= BRCMNAND_CORR_THRESHOLD_EXT
;
541 shift
= (cs
% 5) * bits
;
543 brcmnand_rmw_reg(ctrl
, reg
, (bits
- 1) << shift
, shift
, val
);
546 static inline int brcmnand_cmd_shift(struct brcmnand_controller
*ctrl
)
548 if (ctrl
->nand_version
< 0x0700)
553 /***********************************************************************
554 * NAND ACC CONTROL bitfield
556 * Some bits have remained constant throughout hardware revision, while
557 * others have shifted around.
558 ***********************************************************************/
560 /* Constant for all versions (where supported) */
562 /* See BRCMNAND_HAS_CACHE_MODE */
563 ACC_CONTROL_CACHE_MODE
= BIT(22),
565 /* See BRCMNAND_HAS_PREFETCH */
566 ACC_CONTROL_PREFETCH
= BIT(23),
568 ACC_CONTROL_PAGE_HIT
= BIT(24),
569 ACC_CONTROL_WR_PREEMPT
= BIT(25),
570 ACC_CONTROL_PARTIAL_PAGE
= BIT(26),
571 ACC_CONTROL_RD_ERASED
= BIT(27),
572 ACC_CONTROL_FAST_PGM_RDIN
= BIT(28),
573 ACC_CONTROL_WR_ECC
= BIT(30),
574 ACC_CONTROL_RD_ECC
= BIT(31),
577 static inline u32
brcmnand_spare_area_mask(struct brcmnand_controller
*ctrl
)
579 if (ctrl
->nand_version
>= 0x0600)
580 return GENMASK(6, 0);
582 return GENMASK(5, 0);
585 #define NAND_ACC_CONTROL_ECC_SHIFT 16
587 static inline u32
brcmnand_ecc_level_mask(struct brcmnand_controller
*ctrl
)
589 u32 mask
= (ctrl
->nand_version
>= 0x0600) ? 0x1f : 0x0f;
591 return mask
<< NAND_ACC_CONTROL_ECC_SHIFT
;
594 static void brcmnand_set_ecc_enabled(struct brcmnand_host
*host
, int en
)
596 struct brcmnand_controller
*ctrl
= host
->ctrl
;
597 u16 offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_ACC_CONTROL
);
598 u32 acc_control
= nand_readreg(ctrl
, offs
);
599 u32 ecc_flags
= ACC_CONTROL_WR_ECC
| ACC_CONTROL_RD_ECC
;
602 acc_control
|= ecc_flags
; /* enable RD/WR ECC */
603 acc_control
|= host
->hwcfg
.ecc_level
604 << NAND_ACC_CONTROL_ECC_SHIFT
;
606 acc_control
&= ~ecc_flags
; /* disable RD/WR ECC */
607 acc_control
&= ~brcmnand_ecc_level_mask(ctrl
);
610 nand_writereg(ctrl
, offs
, acc_control
);
613 static inline int brcmnand_sector_1k_shift(struct brcmnand_controller
*ctrl
)
615 if (ctrl
->nand_version
>= 0x0600)
617 else if (ctrl
->nand_version
>= 0x0500)
623 static int brcmnand_get_sector_size_1k(struct brcmnand_host
*host
)
625 struct brcmnand_controller
*ctrl
= host
->ctrl
;
626 int shift
= brcmnand_sector_1k_shift(ctrl
);
627 u16 acc_control_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
628 BRCMNAND_CS_ACC_CONTROL
);
633 return (nand_readreg(ctrl
, acc_control_offs
) >> shift
) & 0x1;
636 static void brcmnand_set_sector_size_1k(struct brcmnand_host
*host
, int val
)
638 struct brcmnand_controller
*ctrl
= host
->ctrl
;
639 int shift
= brcmnand_sector_1k_shift(ctrl
);
640 u16 acc_control_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
641 BRCMNAND_CS_ACC_CONTROL
);
647 tmp
= nand_readreg(ctrl
, acc_control_offs
);
648 tmp
&= ~(1 << shift
);
649 tmp
|= (!!val
) << shift
;
650 nand_writereg(ctrl
, acc_control_offs
, tmp
);
653 /***********************************************************************
655 ***********************************************************************/
658 CS_SELECT_NAND_WP
= BIT(29),
659 CS_SELECT_AUTO_DEVICE_ID_CFG
= BIT(30),
662 static inline void brcmnand_set_wp(struct brcmnand_controller
*ctrl
, bool en
)
664 u32 val
= en
? CS_SELECT_NAND_WP
: 0;
666 brcmnand_rmw_reg(ctrl
, BRCMNAND_CS_SELECT
, CS_SELECT_NAND_WP
, 0, val
);
669 /***********************************************************************
671 ***********************************************************************/
674 FLASH_DMA_REVISION
= 0x00,
675 FLASH_DMA_FIRST_DESC
= 0x04,
676 FLASH_DMA_FIRST_DESC_EXT
= 0x08,
677 FLASH_DMA_CTRL
= 0x0c,
678 FLASH_DMA_MODE
= 0x10,
679 FLASH_DMA_STATUS
= 0x14,
680 FLASH_DMA_INTERRUPT_DESC
= 0x18,
681 FLASH_DMA_INTERRUPT_DESC_EXT
= 0x1c,
682 FLASH_DMA_ERROR_STATUS
= 0x20,
683 FLASH_DMA_CURRENT_DESC
= 0x24,
684 FLASH_DMA_CURRENT_DESC_EXT
= 0x28,
687 static inline bool has_flash_dma(struct brcmnand_controller
*ctrl
)
689 return ctrl
->flash_dma_base
;
692 static inline bool flash_dma_buf_ok(const void *buf
)
694 return buf
&& !is_vmalloc_addr(buf
) &&
695 likely(IS_ALIGNED((uintptr_t)buf
, 4));
698 static inline void flash_dma_writel(struct brcmnand_controller
*ctrl
, u8 offs
,
701 brcmnand_writel(val
, ctrl
->flash_dma_base
+ offs
);
704 static inline u32
flash_dma_readl(struct brcmnand_controller
*ctrl
, u8 offs
)
706 return brcmnand_readl(ctrl
->flash_dma_base
+ offs
);
709 /* Low-level operation types: command, address, write, or read */
710 enum brcmnand_llop_type
{
717 /***********************************************************************
718 * Internal support functions
719 ***********************************************************************/
721 static inline bool is_hamming_ecc(struct brcmnand_cfg
*cfg
)
723 return cfg
->sector_size_1k
== 0 && cfg
->spare_area_size
== 16 &&
724 cfg
->ecc_level
== 15;
728 * Returns a nand_ecclayout strucutre for the given layout/configuration.
729 * Returns NULL on failure.
731 static struct nand_ecclayout
*brcmnand_create_layout(int ecc_level
,
732 struct brcmnand_host
*host
)
734 struct brcmnand_cfg
*cfg
= &host
->hwcfg
;
736 struct nand_ecclayout
*layout
;
742 layout
= devm_kzalloc(&host
->pdev
->dev
, sizeof(*layout
), GFP_KERNEL
);
746 sectors
= cfg
->page_size
/ (512 << cfg
->sector_size_1k
);
747 sas
= cfg
->spare_area_size
<< cfg
->sector_size_1k
;
750 if (is_hamming_ecc(cfg
)) {
751 for (i
= 0, idx1
= 0, idx2
= 0; i
< sectors
; i
++) {
752 /* First sector of each page may have BBI */
754 layout
->oobfree
[idx2
].offset
= i
* sas
+ 1;
755 /* Small-page NAND use byte 6 for BBI */
756 if (cfg
->page_size
== 512)
757 layout
->oobfree
[idx2
].offset
--;
758 layout
->oobfree
[idx2
].length
= 5;
760 layout
->oobfree
[idx2
].offset
= i
* sas
;
761 layout
->oobfree
[idx2
].length
= 6;
764 layout
->eccpos
[idx1
++] = i
* sas
+ 6;
765 layout
->eccpos
[idx1
++] = i
* sas
+ 7;
766 layout
->eccpos
[idx1
++] = i
* sas
+ 8;
767 layout
->oobfree
[idx2
].offset
= i
* sas
+ 9;
768 layout
->oobfree
[idx2
].length
= 7;
770 /* Leave zero-terminated entry for OOBFREE */
771 if (idx1
>= MTD_MAX_ECCPOS_ENTRIES_LARGE
||
772 idx2
>= MTD_MAX_OOBFREE_ENTRIES_LARGE
- 1)
779 * CONTROLLER_VERSION:
780 * < v5.0: ECC_REQ = ceil(BCH_T * 13/8)
781 * >= v5.0: ECC_REQ = ceil(BCH_T * 14/8)
782 * But we will just be conservative.
784 req
= DIV_ROUND_UP(ecc_level
* 14, 8);
786 dev_err(&host
->pdev
->dev
,
787 "error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n",
792 layout
->eccbytes
= req
* sectors
;
793 for (i
= 0, idx1
= 0, idx2
= 0; i
< sectors
; i
++) {
794 for (j
= sas
- req
; j
< sas
&& idx1
<
795 MTD_MAX_ECCPOS_ENTRIES_LARGE
; j
++, idx1
++)
796 layout
->eccpos
[idx1
] = i
* sas
+ j
;
798 /* First sector of each page may have BBI */
800 if (cfg
->page_size
== 512 && (sas
- req
>= 6)) {
801 /* Small-page NAND use byte 6 for BBI */
802 layout
->oobfree
[idx2
].offset
= 0;
803 layout
->oobfree
[idx2
].length
= 5;
806 layout
->oobfree
[idx2
].offset
= 6;
807 layout
->oobfree
[idx2
].length
=
811 } else if (sas
> req
+ 1) {
812 layout
->oobfree
[idx2
].offset
= i
* sas
+ 1;
813 layout
->oobfree
[idx2
].length
= sas
- req
- 1;
816 } else if (sas
> req
) {
817 layout
->oobfree
[idx2
].offset
= i
* sas
;
818 layout
->oobfree
[idx2
].length
= sas
- req
;
821 /* Leave zero-terminated entry for OOBFREE */
822 if (idx1
>= MTD_MAX_ECCPOS_ENTRIES_LARGE
||
823 idx2
>= MTD_MAX_OOBFREE_ENTRIES_LARGE
- 1)
827 /* Sum available OOB */
828 for (i
= 0; i
< MTD_MAX_OOBFREE_ENTRIES_LARGE
; i
++)
829 layout
->oobavail
+= layout
->oobfree
[i
].length
;
833 static struct nand_ecclayout
*brcmstb_choose_ecc_layout(
834 struct brcmnand_host
*host
)
836 struct nand_ecclayout
*layout
;
837 struct brcmnand_cfg
*p
= &host
->hwcfg
;
838 unsigned int ecc_level
= p
->ecc_level
;
840 if (p
->sector_size_1k
)
843 layout
= brcmnand_create_layout(ecc_level
, host
);
845 dev_err(&host
->pdev
->dev
,
846 "no proper ecc_layout for this NAND cfg\n");
853 static void brcmnand_wp(struct mtd_info
*mtd
, int wp
)
855 struct nand_chip
*chip
= mtd
->priv
;
856 struct brcmnand_host
*host
= chip
->priv
;
857 struct brcmnand_controller
*ctrl
= host
->ctrl
;
859 if ((ctrl
->features
& BRCMNAND_HAS_WP
) && wp_on
== 1) {
860 static int old_wp
= -1;
863 dev_dbg(ctrl
->dev
, "WP %s\n", wp
? "on" : "off");
866 brcmnand_set_wp(ctrl
, wp
);
870 /* Helper functions for reading and writing OOB registers */
871 static inline u8
oob_reg_read(struct brcmnand_controller
*ctrl
, u32 offs
)
873 u16 offset0
, offset10
, reg_offs
;
875 offset0
= ctrl
->reg_offsets
[BRCMNAND_OOB_READ_BASE
];
876 offset10
= ctrl
->reg_offsets
[BRCMNAND_OOB_READ_10_BASE
];
878 if (offs
>= ctrl
->max_oob
)
881 if (offs
>= 16 && offset10
)
882 reg_offs
= offset10
+ ((offs
- 0x10) & ~0x03);
884 reg_offs
= offset0
+ (offs
& ~0x03);
886 return nand_readreg(ctrl
, reg_offs
) >> (24 - ((offs
& 0x03) << 3));
889 static inline void oob_reg_write(struct brcmnand_controller
*ctrl
, u32 offs
,
892 u16 offset0
, offset10
, reg_offs
;
894 offset0
= ctrl
->reg_offsets
[BRCMNAND_OOB_WRITE_BASE
];
895 offset10
= ctrl
->reg_offsets
[BRCMNAND_OOB_WRITE_10_BASE
];
897 if (offs
>= ctrl
->max_oob
)
900 if (offs
>= 16 && offset10
)
901 reg_offs
= offset10
+ ((offs
- 0x10) & ~0x03);
903 reg_offs
= offset0
+ (offs
& ~0x03);
905 nand_writereg(ctrl
, reg_offs
, data
);
909 * read_oob_from_regs - read data from OOB registers
910 * @ctrl: NAND controller
911 * @i: sub-page sector index
912 * @oob: buffer to read to
913 * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
914 * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
916 static int read_oob_from_regs(struct brcmnand_controller
*ctrl
, int i
, u8
*oob
,
917 int sas
, int sector_1k
)
919 int tbytes
= sas
<< sector_1k
;
922 /* Adjust OOB values for 1K sector size */
923 if (sector_1k
&& (i
& 0x01))
924 tbytes
= max(0, tbytes
- (int)ctrl
->max_oob
);
925 tbytes
= min_t(int, tbytes
, ctrl
->max_oob
);
927 for (j
= 0; j
< tbytes
; j
++)
928 oob
[j
] = oob_reg_read(ctrl
, j
);
933 * write_oob_to_regs - write data to OOB registers
934 * @i: sub-page sector index
935 * @oob: buffer to write from
936 * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
937 * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
939 static int write_oob_to_regs(struct brcmnand_controller
*ctrl
, int i
,
940 const u8
*oob
, int sas
, int sector_1k
)
942 int tbytes
= sas
<< sector_1k
;
945 /* Adjust OOB values for 1K sector size */
946 if (sector_1k
&& (i
& 0x01))
947 tbytes
= max(0, tbytes
- (int)ctrl
->max_oob
);
948 tbytes
= min_t(int, tbytes
, ctrl
->max_oob
);
950 for (j
= 0; j
< tbytes
; j
+= 4)
951 oob_reg_write(ctrl
, j
,
959 static irqreturn_t
brcmnand_ctlrdy_irq(int irq
, void *data
)
961 struct brcmnand_controller
*ctrl
= data
;
963 /* Discard all NAND_CTLRDY interrupts during DMA */
964 if (ctrl
->dma_pending
)
967 complete(&ctrl
->done
);
971 /* Handle SoC-specific interrupt hardware */
972 static irqreturn_t
brcmnand_irq(int irq
, void *data
)
974 struct brcmnand_controller
*ctrl
= data
;
976 if (ctrl
->soc
->ctlrdy_ack(ctrl
->soc
))
977 return brcmnand_ctlrdy_irq(irq
, data
);
982 static irqreturn_t
brcmnand_dma_irq(int irq
, void *data
)
984 struct brcmnand_controller
*ctrl
= data
;
986 complete(&ctrl
->dma_done
);
991 static void brcmnand_send_cmd(struct brcmnand_host
*host
, int cmd
)
993 struct brcmnand_controller
*ctrl
= host
->ctrl
;
996 dev_dbg(ctrl
->dev
, "send native cmd %d addr_lo 0x%x\n", cmd
,
997 brcmnand_read_reg(ctrl
, BRCMNAND_CMD_ADDRESS
));
998 BUG_ON(ctrl
->cmd_pending
!= 0);
999 ctrl
->cmd_pending
= cmd
;
1001 intfc
= brcmnand_read_reg(ctrl
, BRCMNAND_INTFC_STATUS
);
1002 BUG_ON(!(intfc
& INTFC_CTLR_READY
));
1004 mb(); /* flush previous writes */
1005 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_START
,
1006 cmd
<< brcmnand_cmd_shift(ctrl
));
1009 /***********************************************************************
1010 * NAND MTD API: read/program/erase
1011 ***********************************************************************/
1013 static void brcmnand_cmd_ctrl(struct mtd_info
*mtd
, int dat
,
1016 /* intentionally left blank */
1019 static int brcmnand_waitfunc(struct mtd_info
*mtd
, struct nand_chip
*this)
1021 struct nand_chip
*chip
= mtd
->priv
;
1022 struct brcmnand_host
*host
= chip
->priv
;
1023 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1024 unsigned long timeo
= msecs_to_jiffies(100);
1026 dev_dbg(ctrl
->dev
, "wait on native cmd %d\n", ctrl
->cmd_pending
);
1027 if (ctrl
->cmd_pending
&&
1028 wait_for_completion_timeout(&ctrl
->done
, timeo
) <= 0) {
1029 u32 cmd
= brcmnand_read_reg(ctrl
, BRCMNAND_CMD_START
)
1030 >> brcmnand_cmd_shift(ctrl
);
1032 dev_err_ratelimited(ctrl
->dev
,
1033 "timeout waiting for command %#02x\n", cmd
);
1034 dev_err_ratelimited(ctrl
->dev
, "intfc status %08x\n",
1035 brcmnand_read_reg(ctrl
, BRCMNAND_INTFC_STATUS
));
1037 ctrl
->cmd_pending
= 0;
1038 return brcmnand_read_reg(ctrl
, BRCMNAND_INTFC_STATUS
) &
1047 LLOP_RETURN_IDLE
= BIT(31),
1049 LLOP_DATA_MASK
= GENMASK(15, 0),
1052 static int brcmnand_low_level_op(struct brcmnand_host
*host
,
1053 enum brcmnand_llop_type type
, u32 data
,
1056 struct mtd_info
*mtd
= &host
->mtd
;
1057 struct nand_chip
*chip
= &host
->chip
;
1058 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1061 tmp
= data
& LLOP_DATA_MASK
;
1064 tmp
|= LLOP_WE
| LLOP_CLE
;
1068 tmp
|= LLOP_WE
| LLOP_ALE
;
1081 tmp
|= LLOP_RETURN_IDLE
;
1083 dev_dbg(ctrl
->dev
, "ll_op cmd %#x\n", tmp
);
1085 brcmnand_write_reg(ctrl
, BRCMNAND_LL_OP
, tmp
);
1086 (void)brcmnand_read_reg(ctrl
, BRCMNAND_LL_OP
);
1088 brcmnand_send_cmd(host
, CMD_LOW_LEVEL_OP
);
1089 return brcmnand_waitfunc(mtd
, chip
);
1092 static void brcmnand_cmdfunc(struct mtd_info
*mtd
, unsigned command
,
1093 int column
, int page_addr
)
1095 struct nand_chip
*chip
= mtd
->priv
;
1096 struct brcmnand_host
*host
= chip
->priv
;
1097 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1098 u64 addr
= (u64
)page_addr
<< chip
->page_shift
;
1101 if (command
== NAND_CMD_READID
|| command
== NAND_CMD_PARAM
||
1102 command
== NAND_CMD_RNDOUT
)
1104 /* Avoid propagating a negative, don't-care address */
1105 else if (page_addr
< 0)
1108 dev_dbg(ctrl
->dev
, "cmd 0x%x addr 0x%llx\n", command
,
1109 (unsigned long long)addr
);
1111 host
->last_cmd
= command
;
1112 host
->last_byte
= 0;
1113 host
->last_addr
= addr
;
1116 case NAND_CMD_RESET
:
1117 native_cmd
= CMD_FLASH_RESET
;
1119 case NAND_CMD_STATUS
:
1120 native_cmd
= CMD_STATUS_READ
;
1122 case NAND_CMD_READID
:
1123 native_cmd
= CMD_DEVICE_ID_READ
;
1125 case NAND_CMD_READOOB
:
1126 native_cmd
= CMD_SPARE_AREA_READ
;
1128 case NAND_CMD_ERASE1
:
1129 native_cmd
= CMD_BLOCK_ERASE
;
1130 brcmnand_wp(mtd
, 0);
1132 case NAND_CMD_PARAM
:
1133 native_cmd
= CMD_PARAMETER_READ
;
1135 case NAND_CMD_SET_FEATURES
:
1136 case NAND_CMD_GET_FEATURES
:
1137 brcmnand_low_level_op(host
, LL_OP_CMD
, command
, false);
1138 brcmnand_low_level_op(host
, LL_OP_ADDR
, column
, false);
1140 case NAND_CMD_RNDOUT
:
1141 native_cmd
= CMD_PARAMETER_CHANGE_COL
;
1142 addr
&= ~((u64
)(FC_BYTES
- 1));
1144 * HW quirk: PARAMETER_CHANGE_COL requires SECTOR_SIZE_1K=0
1145 * NB: hwcfg.sector_size_1k may not be initialized yet
1147 if (brcmnand_get_sector_size_1k(host
)) {
1148 host
->hwcfg
.sector_size_1k
=
1149 brcmnand_get_sector_size_1k(host
);
1150 brcmnand_set_sector_size_1k(host
, 0);
1158 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
,
1159 (host
->cs
<< 16) | ((addr
>> 32) & 0xffff));
1160 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
);
1161 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_ADDRESS
, lower_32_bits(addr
));
1162 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_ADDRESS
);
1164 brcmnand_send_cmd(host
, native_cmd
);
1165 brcmnand_waitfunc(mtd
, chip
);
1167 if (native_cmd
== CMD_PARAMETER_READ
||
1168 native_cmd
== CMD_PARAMETER_CHANGE_COL
) {
1171 brcmnand_soc_data_bus_prepare(ctrl
->soc
);
1174 * Must cache the FLASH_CACHE now, since changes in
1175 * SECTOR_SIZE_1K may invalidate it
1177 for (i
= 0; i
< FC_WORDS
; i
++)
1178 ctrl
->flash_cache
[i
] = brcmnand_read_fc(ctrl
, i
);
1180 brcmnand_soc_data_bus_unprepare(ctrl
->soc
);
1182 /* Cleanup from HW quirk: restore SECTOR_SIZE_1K */
1183 if (host
->hwcfg
.sector_size_1k
)
1184 brcmnand_set_sector_size_1k(host
,
1185 host
->hwcfg
.sector_size_1k
);
1188 /* Re-enable protection is necessary only after erase */
1189 if (command
== NAND_CMD_ERASE1
)
1190 brcmnand_wp(mtd
, 1);
1193 static uint8_t brcmnand_read_byte(struct mtd_info
*mtd
)
1195 struct nand_chip
*chip
= mtd
->priv
;
1196 struct brcmnand_host
*host
= chip
->priv
;
1197 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1201 switch (host
->last_cmd
) {
1202 case NAND_CMD_READID
:
1203 if (host
->last_byte
< 4)
1204 ret
= brcmnand_read_reg(ctrl
, BRCMNAND_ID
) >>
1205 (24 - (host
->last_byte
<< 3));
1206 else if (host
->last_byte
< 8)
1207 ret
= brcmnand_read_reg(ctrl
, BRCMNAND_ID_EXT
) >>
1208 (56 - (host
->last_byte
<< 3));
1211 case NAND_CMD_READOOB
:
1212 ret
= oob_reg_read(ctrl
, host
->last_byte
);
1215 case NAND_CMD_STATUS
:
1216 ret
= brcmnand_read_reg(ctrl
, BRCMNAND_INTFC_STATUS
) &
1218 if (wp_on
) /* hide WP status */
1219 ret
|= NAND_STATUS_WP
;
1222 case NAND_CMD_PARAM
:
1223 case NAND_CMD_RNDOUT
:
1224 addr
= host
->last_addr
+ host
->last_byte
;
1225 offs
= addr
& (FC_BYTES
- 1);
1227 /* At FC_BYTES boundary, switch to next column */
1228 if (host
->last_byte
> 0 && offs
== 0)
1229 chip
->cmdfunc(mtd
, NAND_CMD_RNDOUT
, addr
, -1);
1231 ret
= ctrl
->flash_cache
[offs
>> 2] >>
1232 (24 - ((offs
& 0x03) << 3));
1234 case NAND_CMD_GET_FEATURES
:
1235 if (host
->last_byte
>= ONFI_SUBFEATURE_PARAM_LEN
) {
1238 bool last
= host
->last_byte
==
1239 ONFI_SUBFEATURE_PARAM_LEN
- 1;
1240 brcmnand_low_level_op(host
, LL_OP_RD
, 0, last
);
1241 ret
= brcmnand_read_reg(ctrl
, BRCMNAND_LL_RDATA
) & 0xff;
1245 dev_dbg(ctrl
->dev
, "read byte = 0x%02x\n", ret
);
1251 static void brcmnand_read_buf(struct mtd_info
*mtd
, uint8_t *buf
, int len
)
1255 for (i
= 0; i
< len
; i
++, buf
++)
1256 *buf
= brcmnand_read_byte(mtd
);
1259 static void brcmnand_write_buf(struct mtd_info
*mtd
, const uint8_t *buf
,
1263 struct nand_chip
*chip
= mtd
->priv
;
1264 struct brcmnand_host
*host
= chip
->priv
;
1266 switch (host
->last_cmd
) {
1267 case NAND_CMD_SET_FEATURES
:
1268 for (i
= 0; i
< len
; i
++)
1269 brcmnand_low_level_op(host
, LL_OP_WR
, buf
[i
],
1279 * Construct a FLASH_DMA descriptor as part of a linked list. You must know the
1280 * following ahead of time:
1281 * - Is this descriptor the beginning or end of a linked list?
1282 * - What is the (DMA) address of the next descriptor in the linked list?
1284 static int brcmnand_fill_dma_desc(struct brcmnand_host
*host
,
1285 struct brcm_nand_dma_desc
*desc
, u64 addr
,
1286 dma_addr_t buf
, u32 len
, u8 dma_cmd
,
1287 bool begin
, bool end
,
1288 dma_addr_t next_desc
)
1290 memset(desc
, 0, sizeof(*desc
));
1291 /* Descriptors are written in native byte order (wordwise) */
1292 desc
->next_desc
= lower_32_bits(next_desc
);
1293 desc
->next_desc_ext
= upper_32_bits(next_desc
);
1294 desc
->cmd_irq
= (dma_cmd
<< 24) |
1295 (end
? (0x03 << 8) : 0) | /* IRQ | STOP */
1296 (!!begin
) | ((!!end
) << 1); /* head, tail */
1297 #ifdef CONFIG_CPU_BIG_ENDIAN
1298 desc
->cmd_irq
|= 0x01 << 12;
1300 desc
->dram_addr
= lower_32_bits(buf
);
1301 desc
->dram_addr_ext
= upper_32_bits(buf
);
1302 desc
->tfr_len
= len
;
1303 desc
->total_len
= len
;
1304 desc
->flash_addr
= lower_32_bits(addr
);
1305 desc
->flash_addr_ext
= upper_32_bits(addr
);
1306 desc
->cs
= host
->cs
;
1307 desc
->status_valid
= 0x01;
1312 * Kick the FLASH_DMA engine, with a given DMA descriptor
1314 static void brcmnand_dma_run(struct brcmnand_host
*host
, dma_addr_t desc
)
1316 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1317 unsigned long timeo
= msecs_to_jiffies(100);
1319 flash_dma_writel(ctrl
, FLASH_DMA_FIRST_DESC
, lower_32_bits(desc
));
1320 (void)flash_dma_readl(ctrl
, FLASH_DMA_FIRST_DESC
);
1321 flash_dma_writel(ctrl
, FLASH_DMA_FIRST_DESC_EXT
, upper_32_bits(desc
));
1322 (void)flash_dma_readl(ctrl
, FLASH_DMA_FIRST_DESC_EXT
);
1324 /* Start FLASH_DMA engine */
1325 ctrl
->dma_pending
= true;
1326 mb(); /* flush previous writes */
1327 flash_dma_writel(ctrl
, FLASH_DMA_CTRL
, 0x03); /* wake | run */
1329 if (wait_for_completion_timeout(&ctrl
->dma_done
, timeo
) <= 0) {
1331 "timeout waiting for DMA; status %#x, error status %#x\n",
1332 flash_dma_readl(ctrl
, FLASH_DMA_STATUS
),
1333 flash_dma_readl(ctrl
, FLASH_DMA_ERROR_STATUS
));
1335 ctrl
->dma_pending
= false;
1336 flash_dma_writel(ctrl
, FLASH_DMA_CTRL
, 0); /* force stop */
1339 static int brcmnand_dma_trans(struct brcmnand_host
*host
, u64 addr
, u32
*buf
,
1340 u32 len
, u8 dma_cmd
)
1342 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1344 int dir
= dma_cmd
== CMD_PAGE_READ
? DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
1346 buf_pa
= dma_map_single(ctrl
->dev
, buf
, len
, dir
);
1347 if (dma_mapping_error(ctrl
->dev
, buf_pa
)) {
1348 dev_err(ctrl
->dev
, "unable to map buffer for DMA\n");
1352 brcmnand_fill_dma_desc(host
, ctrl
->dma_desc
, addr
, buf_pa
, len
,
1353 dma_cmd
, true, true, 0);
1355 brcmnand_dma_run(host
, ctrl
->dma_pa
);
1357 dma_unmap_single(ctrl
->dev
, buf_pa
, len
, dir
);
1359 if (ctrl
->dma_desc
->status_valid
& FLASH_DMA_ECC_ERROR
)
1361 else if (ctrl
->dma_desc
->status_valid
& FLASH_DMA_CORR_ERROR
)
1368 * Assumes proper CS is already set
1370 static int brcmnand_read_by_pio(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1371 u64 addr
, unsigned int trans
, u32
*buf
,
1372 u8
*oob
, u64
*err_addr
)
1374 struct brcmnand_host
*host
= chip
->priv
;
1375 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1378 /* Clear error addresses */
1379 brcmnand_write_reg(ctrl
, BRCMNAND_UNCORR_ADDR
, 0);
1380 brcmnand_write_reg(ctrl
, BRCMNAND_CORR_ADDR
, 0);
1382 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
,
1383 (host
->cs
<< 16) | ((addr
>> 32) & 0xffff));
1384 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
);
1386 for (i
= 0; i
< trans
; i
++, addr
+= FC_BYTES
) {
1387 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_ADDRESS
,
1388 lower_32_bits(addr
));
1389 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_ADDRESS
);
1390 /* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */
1391 brcmnand_send_cmd(host
, CMD_PAGE_READ
);
1392 brcmnand_waitfunc(mtd
, chip
);
1395 brcmnand_soc_data_bus_prepare(ctrl
->soc
);
1397 for (j
= 0; j
< FC_WORDS
; j
++, buf
++)
1398 *buf
= brcmnand_read_fc(ctrl
, j
);
1400 brcmnand_soc_data_bus_unprepare(ctrl
->soc
);
1404 oob
+= read_oob_from_regs(ctrl
, i
, oob
,
1405 mtd
->oobsize
/ trans
,
1406 host
->hwcfg
.sector_size_1k
);
1409 *err_addr
= brcmnand_read_reg(ctrl
,
1410 BRCMNAND_UNCORR_ADDR
) |
1411 ((u64
)(brcmnand_read_reg(ctrl
,
1412 BRCMNAND_UNCORR_EXT_ADDR
)
1419 *err_addr
= brcmnand_read_reg(ctrl
,
1420 BRCMNAND_CORR_ADDR
) |
1421 ((u64
)(brcmnand_read_reg(ctrl
,
1422 BRCMNAND_CORR_EXT_ADDR
)
1432 static int brcmnand_read(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1433 u64 addr
, unsigned int trans
, u32
*buf
, u8
*oob
)
1435 struct brcmnand_host
*host
= chip
->priv
;
1436 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1440 dev_dbg(ctrl
->dev
, "read %llx -> %p\n", (unsigned long long)addr
, buf
);
1442 brcmnand_write_reg(ctrl
, BRCMNAND_UNCORR_COUNT
, 0);
1444 if (has_flash_dma(ctrl
) && !oob
&& flash_dma_buf_ok(buf
)) {
1445 err
= brcmnand_dma_trans(host
, addr
, buf
, trans
* FC_BYTES
,
1448 if (mtd_is_bitflip_or_eccerr(err
))
1455 memset(oob
, 0x99, mtd
->oobsize
);
1457 err
= brcmnand_read_by_pio(mtd
, chip
, addr
, trans
, buf
,
1461 if (mtd_is_eccerr(err
)) {
1462 dev_dbg(ctrl
->dev
, "uncorrectable error at 0x%llx\n",
1463 (unsigned long long)err_addr
);
1464 mtd
->ecc_stats
.failed
++;
1465 /* NAND layer expects zero on ECC errors */
1469 if (mtd_is_bitflip(err
)) {
1470 unsigned int corrected
= brcmnand_count_corrected(ctrl
);
1472 dev_dbg(ctrl
->dev
, "corrected error at 0x%llx\n",
1473 (unsigned long long)err_addr
);
1474 mtd
->ecc_stats
.corrected
+= corrected
;
1475 /* Always exceed the software-imposed threshold */
1476 return max(mtd
->bitflip_threshold
, corrected
);
1482 static int brcmnand_read_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1483 uint8_t *buf
, int oob_required
, int page
)
1485 struct brcmnand_host
*host
= chip
->priv
;
1486 u8
*oob
= oob_required
? (u8
*)chip
->oob_poi
: NULL
;
1488 return brcmnand_read(mtd
, chip
, host
->last_addr
,
1489 mtd
->writesize
>> FC_SHIFT
, (u32
*)buf
, oob
);
1492 static int brcmnand_read_page_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1493 uint8_t *buf
, int oob_required
, int page
)
1495 struct brcmnand_host
*host
= chip
->priv
;
1496 u8
*oob
= oob_required
? (u8
*)chip
->oob_poi
: NULL
;
1499 brcmnand_set_ecc_enabled(host
, 0);
1500 ret
= brcmnand_read(mtd
, chip
, host
->last_addr
,
1501 mtd
->writesize
>> FC_SHIFT
, (u32
*)buf
, oob
);
1502 brcmnand_set_ecc_enabled(host
, 1);
1506 static int brcmnand_read_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1509 return brcmnand_read(mtd
, chip
, (u64
)page
<< chip
->page_shift
,
1510 mtd
->writesize
>> FC_SHIFT
,
1511 NULL
, (u8
*)chip
->oob_poi
);
1514 static int brcmnand_read_oob_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1517 struct brcmnand_host
*host
= chip
->priv
;
1519 brcmnand_set_ecc_enabled(host
, 0);
1520 brcmnand_read(mtd
, chip
, (u64
)page
<< chip
->page_shift
,
1521 mtd
->writesize
>> FC_SHIFT
,
1522 NULL
, (u8
*)chip
->oob_poi
);
1523 brcmnand_set_ecc_enabled(host
, 1);
1527 static int brcmnand_read_subpage(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1528 uint32_t data_offs
, uint32_t readlen
,
1529 uint8_t *bufpoi
, int page
)
1531 struct brcmnand_host
*host
= chip
->priv
;
1533 return brcmnand_read(mtd
, chip
, host
->last_addr
+ data_offs
,
1534 readlen
>> FC_SHIFT
, (u32
*)bufpoi
, NULL
);
1537 static int brcmnand_write(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1538 u64 addr
, const u32
*buf
, u8
*oob
)
1540 struct brcmnand_host
*host
= chip
->priv
;
1541 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1542 unsigned int i
, j
, trans
= mtd
->writesize
>> FC_SHIFT
;
1543 int status
, ret
= 0;
1545 dev_dbg(ctrl
->dev
, "write %llx <- %p\n", (unsigned long long)addr
, buf
);
1547 if (unlikely((unsigned long)buf
& 0x03)) {
1548 dev_warn(ctrl
->dev
, "unaligned buffer: %p\n", buf
);
1549 buf
= (u32
*)((unsigned long)buf
& ~0x03);
1552 brcmnand_wp(mtd
, 0);
1554 for (i
= 0; i
< ctrl
->max_oob
; i
+= 4)
1555 oob_reg_write(ctrl
, i
, 0xffffffff);
1557 if (has_flash_dma(ctrl
) && !oob
&& flash_dma_buf_ok(buf
)) {
1558 if (brcmnand_dma_trans(host
, addr
, (u32
*)buf
,
1559 mtd
->writesize
, CMD_PROGRAM_PAGE
))
1564 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
,
1565 (host
->cs
<< 16) | ((addr
>> 32) & 0xffff));
1566 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
);
1568 for (i
= 0; i
< trans
; i
++, addr
+= FC_BYTES
) {
1569 /* full address MUST be set before populating FC */
1570 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_ADDRESS
,
1571 lower_32_bits(addr
));
1572 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_ADDRESS
);
1575 brcmnand_soc_data_bus_prepare(ctrl
->soc
);
1577 for (j
= 0; j
< FC_WORDS
; j
++, buf
++)
1578 brcmnand_write_fc(ctrl
, j
, *buf
);
1580 brcmnand_soc_data_bus_unprepare(ctrl
->soc
);
1582 for (j
= 0; j
< FC_WORDS
; j
++)
1583 brcmnand_write_fc(ctrl
, j
, 0xffffffff);
1587 oob
+= write_oob_to_regs(ctrl
, i
, oob
,
1588 mtd
->oobsize
/ trans
,
1589 host
->hwcfg
.sector_size_1k
);
1592 /* we cannot use SPARE_AREA_PROGRAM when PARTIAL_PAGE_EN=0 */
1593 brcmnand_send_cmd(host
, CMD_PROGRAM_PAGE
);
1594 status
= brcmnand_waitfunc(mtd
, chip
);
1596 if (status
& NAND_STATUS_FAIL
) {
1597 dev_info(ctrl
->dev
, "program failed at %llx\n",
1598 (unsigned long long)addr
);
1604 brcmnand_wp(mtd
, 1);
1608 static int brcmnand_write_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1609 const uint8_t *buf
, int oob_required
, int page
)
1611 struct brcmnand_host
*host
= chip
->priv
;
1612 void *oob
= oob_required
? chip
->oob_poi
: NULL
;
1614 brcmnand_write(mtd
, chip
, host
->last_addr
, (const u32
*)buf
, oob
);
1618 static int brcmnand_write_page_raw(struct mtd_info
*mtd
,
1619 struct nand_chip
*chip
, const uint8_t *buf
,
1620 int oob_required
, int page
)
1622 struct brcmnand_host
*host
= chip
->priv
;
1623 void *oob
= oob_required
? chip
->oob_poi
: NULL
;
1625 brcmnand_set_ecc_enabled(host
, 0);
1626 brcmnand_write(mtd
, chip
, host
->last_addr
, (const u32
*)buf
, oob
);
1627 brcmnand_set_ecc_enabled(host
, 1);
1631 static int brcmnand_write_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1634 return brcmnand_write(mtd
, chip
, (u64
)page
<< chip
->page_shift
,
1635 NULL
, chip
->oob_poi
);
1638 static int brcmnand_write_oob_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1641 struct brcmnand_host
*host
= chip
->priv
;
1644 brcmnand_set_ecc_enabled(host
, 0);
1645 ret
= brcmnand_write(mtd
, chip
, (u64
)page
<< chip
->page_shift
, NULL
,
1646 (u8
*)chip
->oob_poi
);
1647 brcmnand_set_ecc_enabled(host
, 1);
1652 /***********************************************************************
1653 * Per-CS setup (1 NAND device)
1654 ***********************************************************************/
1656 static int brcmnand_set_cfg(struct brcmnand_host
*host
,
1657 struct brcmnand_cfg
*cfg
)
1659 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1660 struct nand_chip
*chip
= &host
->chip
;
1661 u16 cfg_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_CFG
);
1662 u16 cfg_ext_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
1663 BRCMNAND_CS_CFG_EXT
);
1664 u16 acc_control_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
1665 BRCMNAND_CS_ACC_CONTROL
);
1666 u8 block_size
= 0, page_size
= 0, device_size
= 0;
1669 if (ctrl
->block_sizes
) {
1672 for (i
= 0, found
= 0; ctrl
->block_sizes
[i
]; i
++)
1673 if (ctrl
->block_sizes
[i
] * 1024 == cfg
->block_size
) {
1678 dev_warn(ctrl
->dev
, "invalid block size %u\n",
1683 block_size
= ffs(cfg
->block_size
) - ffs(BRCMNAND_MIN_BLOCKSIZE
);
1686 if (cfg
->block_size
< BRCMNAND_MIN_BLOCKSIZE
|| (ctrl
->max_block_size
&&
1687 cfg
->block_size
> ctrl
->max_block_size
)) {
1688 dev_warn(ctrl
->dev
, "invalid block size %u\n",
1693 if (ctrl
->page_sizes
) {
1696 for (i
= 0, found
= 0; ctrl
->page_sizes
[i
]; i
++)
1697 if (ctrl
->page_sizes
[i
] == cfg
->page_size
) {
1702 dev_warn(ctrl
->dev
, "invalid page size %u\n",
1707 page_size
= ffs(cfg
->page_size
) - ffs(BRCMNAND_MIN_PAGESIZE
);
1710 if (cfg
->page_size
< BRCMNAND_MIN_PAGESIZE
|| (ctrl
->max_page_size
&&
1711 cfg
->page_size
> ctrl
->max_page_size
)) {
1712 dev_warn(ctrl
->dev
, "invalid page size %u\n", cfg
->page_size
);
1716 if (fls64(cfg
->device_size
) < fls64(BRCMNAND_MIN_DEVSIZE
)) {
1717 dev_warn(ctrl
->dev
, "invalid device size 0x%llx\n",
1718 (unsigned long long)cfg
->device_size
);
1721 device_size
= fls64(cfg
->device_size
) - fls64(BRCMNAND_MIN_DEVSIZE
);
1723 tmp
= (cfg
->blk_adr_bytes
<< 8) |
1724 (cfg
->col_adr_bytes
<< 12) |
1725 (cfg
->ful_adr_bytes
<< 16) |
1726 (!!(cfg
->device_width
== 16) << 23) |
1727 (device_size
<< 24);
1728 if (cfg_offs
== cfg_ext_offs
) {
1729 tmp
|= (page_size
<< 20) | (block_size
<< 28);
1730 nand_writereg(ctrl
, cfg_offs
, tmp
);
1732 nand_writereg(ctrl
, cfg_offs
, tmp
);
1733 tmp
= page_size
| (block_size
<< 4);
1734 nand_writereg(ctrl
, cfg_ext_offs
, tmp
);
1737 tmp
= nand_readreg(ctrl
, acc_control_offs
);
1738 tmp
&= ~brcmnand_ecc_level_mask(ctrl
);
1739 tmp
|= cfg
->ecc_level
<< NAND_ACC_CONTROL_ECC_SHIFT
;
1740 tmp
&= ~brcmnand_spare_area_mask(ctrl
);
1741 tmp
|= cfg
->spare_area_size
;
1742 nand_writereg(ctrl
, acc_control_offs
, tmp
);
1744 brcmnand_set_sector_size_1k(host
, cfg
->sector_size_1k
);
1746 /* threshold = ceil(BCH-level * 0.75) */
1747 brcmnand_wr_corr_thresh(host
, DIV_ROUND_UP(chip
->ecc
.strength
* 3, 4));
1752 static void brcmnand_print_cfg(char *buf
, struct brcmnand_cfg
*cfg
)
1755 "%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit",
1756 (unsigned long long)cfg
->device_size
>> 20,
1757 cfg
->block_size
>> 10,
1758 cfg
->page_size
>= 1024 ? cfg
->page_size
>> 10 : cfg
->page_size
,
1759 cfg
->page_size
>= 1024 ? "KiB" : "B",
1760 cfg
->spare_area_size
, cfg
->device_width
);
1762 /* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */
1763 if (is_hamming_ecc(cfg
))
1764 sprintf(buf
, ", Hamming ECC");
1765 else if (cfg
->sector_size_1k
)
1766 sprintf(buf
, ", BCH-%u (1KiB sector)", cfg
->ecc_level
<< 1);
1768 sprintf(buf
, ", BCH-%u", cfg
->ecc_level
);
1772 * Minimum number of bytes to address a page. Calculated as:
1773 * roundup(log2(size / page-size) / 8)
1775 * NB: the following does not "round up" for non-power-of-2 'size'; but this is
1776 * OK because many other things will break if 'size' is irregular...
1778 static inline int get_blk_adr_bytes(u64 size
, u32 writesize
)
1780 return ALIGN(ilog2(size
) - ilog2(writesize
), 8) >> 3;
1783 static int brcmnand_setup_dev(struct brcmnand_host
*host
)
1785 struct mtd_info
*mtd
= &host
->mtd
;
1786 struct nand_chip
*chip
= &host
->chip
;
1787 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1788 struct brcmnand_cfg
*cfg
= &host
->hwcfg
;
1790 u32 offs
, tmp
, oob_sector
;
1793 memset(cfg
, 0, sizeof(*cfg
));
1795 ret
= of_property_read_u32(chip
->flash_node
,
1796 "brcm,nand-oob-sector-size",
1799 /* Use detected size */
1800 cfg
->spare_area_size
= mtd
->oobsize
/
1801 (mtd
->writesize
>> FC_SHIFT
);
1803 cfg
->spare_area_size
= oob_sector
;
1805 if (cfg
->spare_area_size
> ctrl
->max_oob
)
1806 cfg
->spare_area_size
= ctrl
->max_oob
;
1808 * Set oobsize to be consistent with controller's spare_area_size, as
1809 * the rest is inaccessible.
1811 mtd
->oobsize
= cfg
->spare_area_size
* (mtd
->writesize
>> FC_SHIFT
);
1813 cfg
->device_size
= mtd
->size
;
1814 cfg
->block_size
= mtd
->erasesize
;
1815 cfg
->page_size
= mtd
->writesize
;
1816 cfg
->device_width
= (chip
->options
& NAND_BUSWIDTH_16
) ? 16 : 8;
1817 cfg
->col_adr_bytes
= 2;
1818 cfg
->blk_adr_bytes
= get_blk_adr_bytes(mtd
->size
, mtd
->writesize
);
1820 switch (chip
->ecc
.size
) {
1822 if (chip
->ecc
.strength
== 1) /* Hamming */
1823 cfg
->ecc_level
= 15;
1825 cfg
->ecc_level
= chip
->ecc
.strength
;
1826 cfg
->sector_size_1k
= 0;
1829 if (!(ctrl
->features
& BRCMNAND_HAS_1K_SECTORS
)) {
1830 dev_err(ctrl
->dev
, "1KB sectors not supported\n");
1833 if (chip
->ecc
.strength
& 0x1) {
1835 "odd ECC not supported with 1KB sectors\n");
1839 cfg
->ecc_level
= chip
->ecc
.strength
>> 1;
1840 cfg
->sector_size_1k
= 1;
1843 dev_err(ctrl
->dev
, "unsupported ECC size: %d\n",
1848 cfg
->ful_adr_bytes
= cfg
->blk_adr_bytes
;
1849 if (mtd
->writesize
> 512)
1850 cfg
->ful_adr_bytes
+= cfg
->col_adr_bytes
;
1852 cfg
->ful_adr_bytes
+= 1;
1854 ret
= brcmnand_set_cfg(host
, cfg
);
1858 brcmnand_set_ecc_enabled(host
, 1);
1860 brcmnand_print_cfg(msg
, cfg
);
1861 dev_info(ctrl
->dev
, "detected %s\n", msg
);
1863 /* Configure ACC_CONTROL */
1864 offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_ACC_CONTROL
);
1865 tmp
= nand_readreg(ctrl
, offs
);
1866 tmp
&= ~ACC_CONTROL_PARTIAL_PAGE
;
1867 tmp
&= ~ACC_CONTROL_RD_ERASED
;
1868 tmp
&= ~ACC_CONTROL_FAST_PGM_RDIN
;
1869 if (ctrl
->features
& BRCMNAND_HAS_PREFETCH
) {
1871 * FIXME: Flash DMA + prefetch may see spurious erased-page ECC
1874 if (has_flash_dma(ctrl
))
1875 tmp
&= ~ACC_CONTROL_PREFETCH
;
1877 tmp
|= ACC_CONTROL_PREFETCH
;
1879 nand_writereg(ctrl
, offs
, tmp
);
1884 static int brcmnand_init_cs(struct brcmnand_host
*host
)
1886 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1887 struct device_node
*dn
= host
->of_node
;
1888 struct platform_device
*pdev
= host
->pdev
;
1889 struct mtd_info
*mtd
;
1890 struct nand_chip
*chip
;
1892 struct mtd_part_parser_data ppdata
= { .of_node
= dn
};
1894 ret
= of_property_read_u32(dn
, "reg", &host
->cs
);
1896 dev_err(&pdev
->dev
, "can't get chip-select\n");
1903 chip
->flash_node
= dn
;
1906 mtd
->name
= devm_kasprintf(&pdev
->dev
, GFP_KERNEL
, "brcmnand.%d",
1908 mtd
->owner
= THIS_MODULE
;
1909 mtd
->dev
.parent
= &pdev
->dev
;
1911 chip
->IO_ADDR_R
= (void __iomem
*)0xdeadbeef;
1912 chip
->IO_ADDR_W
= (void __iomem
*)0xdeadbeef;
1914 chip
->cmd_ctrl
= brcmnand_cmd_ctrl
;
1915 chip
->cmdfunc
= brcmnand_cmdfunc
;
1916 chip
->waitfunc
= brcmnand_waitfunc
;
1917 chip
->read_byte
= brcmnand_read_byte
;
1918 chip
->read_buf
= brcmnand_read_buf
;
1919 chip
->write_buf
= brcmnand_write_buf
;
1921 chip
->ecc
.mode
= NAND_ECC_HW
;
1922 chip
->ecc
.read_page
= brcmnand_read_page
;
1923 chip
->ecc
.read_subpage
= brcmnand_read_subpage
;
1924 chip
->ecc
.write_page
= brcmnand_write_page
;
1925 chip
->ecc
.read_page_raw
= brcmnand_read_page_raw
;
1926 chip
->ecc
.write_page_raw
= brcmnand_write_page_raw
;
1927 chip
->ecc
.write_oob_raw
= brcmnand_write_oob_raw
;
1928 chip
->ecc
.read_oob_raw
= brcmnand_read_oob_raw
;
1929 chip
->ecc
.read_oob
= brcmnand_read_oob
;
1930 chip
->ecc
.write_oob
= brcmnand_write_oob
;
1932 chip
->controller
= &ctrl
->controller
;
1934 if (nand_scan_ident(mtd
, 1, NULL
))
1937 chip
->options
|= NAND_NO_SUBPAGE_WRITE
;
1939 * Avoid (for instance) kmap()'d buffers from JFFS2, which we can't DMA
1940 * to/from, and have nand_base pass us a bounce buffer instead, as
1943 chip
->options
|= NAND_USE_BOUNCE_BUFFER
;
1945 if (of_get_nand_on_flash_bbt(dn
))
1946 chip
->bbt_options
|= NAND_BBT_USE_FLASH
| NAND_BBT_NO_OOB
;
1948 if (brcmnand_setup_dev(host
))
1951 chip
->ecc
.size
= host
->hwcfg
.sector_size_1k
? 1024 : 512;
1952 /* only use our internal HW threshold */
1953 mtd
->bitflip_threshold
= 1;
1955 chip
->ecc
.layout
= brcmstb_choose_ecc_layout(host
);
1956 if (!chip
->ecc
.layout
)
1959 if (nand_scan_tail(mtd
))
1962 return mtd_device_parse_register(mtd
, NULL
, &ppdata
, NULL
, 0);
1965 static void brcmnand_save_restore_cs_config(struct brcmnand_host
*host
,
1968 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1969 u16 cfg_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_CFG
);
1970 u16 cfg_ext_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
1971 BRCMNAND_CS_CFG_EXT
);
1972 u16 acc_control_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
1973 BRCMNAND_CS_ACC_CONTROL
);
1974 u16 t1_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_TIMING1
);
1975 u16 t2_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_TIMING2
);
1978 nand_writereg(ctrl
, cfg_offs
, host
->hwcfg
.config
);
1979 if (cfg_offs
!= cfg_ext_offs
)
1980 nand_writereg(ctrl
, cfg_ext_offs
,
1981 host
->hwcfg
.config_ext
);
1982 nand_writereg(ctrl
, acc_control_offs
, host
->hwcfg
.acc_control
);
1983 nand_writereg(ctrl
, t1_offs
, host
->hwcfg
.timing_1
);
1984 nand_writereg(ctrl
, t2_offs
, host
->hwcfg
.timing_2
);
1986 host
->hwcfg
.config
= nand_readreg(ctrl
, cfg_offs
);
1987 if (cfg_offs
!= cfg_ext_offs
)
1988 host
->hwcfg
.config_ext
=
1989 nand_readreg(ctrl
, cfg_ext_offs
);
1990 host
->hwcfg
.acc_control
= nand_readreg(ctrl
, acc_control_offs
);
1991 host
->hwcfg
.timing_1
= nand_readreg(ctrl
, t1_offs
);
1992 host
->hwcfg
.timing_2
= nand_readreg(ctrl
, t2_offs
);
1996 static int brcmnand_suspend(struct device
*dev
)
1998 struct brcmnand_controller
*ctrl
= dev_get_drvdata(dev
);
1999 struct brcmnand_host
*host
;
2001 list_for_each_entry(host
, &ctrl
->host_list
, node
)
2002 brcmnand_save_restore_cs_config(host
, 0);
2004 ctrl
->nand_cs_nand_select
= brcmnand_read_reg(ctrl
, BRCMNAND_CS_SELECT
);
2005 ctrl
->nand_cs_nand_xor
= brcmnand_read_reg(ctrl
, BRCMNAND_CS_XOR
);
2006 ctrl
->corr_stat_threshold
=
2007 brcmnand_read_reg(ctrl
, BRCMNAND_CORR_THRESHOLD
);
2009 if (has_flash_dma(ctrl
))
2010 ctrl
->flash_dma_mode
= flash_dma_readl(ctrl
, FLASH_DMA_MODE
);
2015 static int brcmnand_resume(struct device
*dev
)
2017 struct brcmnand_controller
*ctrl
= dev_get_drvdata(dev
);
2018 struct brcmnand_host
*host
;
2020 if (has_flash_dma(ctrl
)) {
2021 flash_dma_writel(ctrl
, FLASH_DMA_MODE
, ctrl
->flash_dma_mode
);
2022 flash_dma_writel(ctrl
, FLASH_DMA_ERROR_STATUS
, 0);
2025 brcmnand_write_reg(ctrl
, BRCMNAND_CS_SELECT
, ctrl
->nand_cs_nand_select
);
2026 brcmnand_write_reg(ctrl
, BRCMNAND_CS_XOR
, ctrl
->nand_cs_nand_xor
);
2027 brcmnand_write_reg(ctrl
, BRCMNAND_CORR_THRESHOLD
,
2028 ctrl
->corr_stat_threshold
);
2030 /* Clear/re-enable interrupt */
2031 ctrl
->soc
->ctlrdy_ack(ctrl
->soc
);
2032 ctrl
->soc
->ctlrdy_set_enabled(ctrl
->soc
, true);
2035 list_for_each_entry(host
, &ctrl
->host_list
, node
) {
2036 struct mtd_info
*mtd
= &host
->mtd
;
2037 struct nand_chip
*chip
= mtd
->priv
;
2039 brcmnand_save_restore_cs_config(host
, 1);
2041 /* Reset the chip, required by some chips after power-up */
2042 chip
->cmdfunc(mtd
, NAND_CMD_RESET
, -1, -1);
2048 const struct dev_pm_ops brcmnand_pm_ops
= {
2049 .suspend
= brcmnand_suspend
,
2050 .resume
= brcmnand_resume
,
2052 EXPORT_SYMBOL_GPL(brcmnand_pm_ops
);
2054 static const struct of_device_id brcmnand_of_match
[] = {
2055 { .compatible
= "brcm,brcmnand-v4.0" },
2056 { .compatible
= "brcm,brcmnand-v5.0" },
2057 { .compatible
= "brcm,brcmnand-v6.0" },
2058 { .compatible
= "brcm,brcmnand-v6.1" },
2059 { .compatible
= "brcm,brcmnand-v7.0" },
2060 { .compatible
= "brcm,brcmnand-v7.1" },
2063 MODULE_DEVICE_TABLE(of
, brcmnand_of_match
);
2065 /***********************************************************************
2066 * Platform driver setup (per controller)
2067 ***********************************************************************/
2069 int brcmnand_probe(struct platform_device
*pdev
, struct brcmnand_soc
*soc
)
2071 struct device
*dev
= &pdev
->dev
;
2072 struct device_node
*dn
= dev
->of_node
, *child
;
2073 struct brcmnand_controller
*ctrl
;
2074 struct resource
*res
;
2077 /* We only support device-tree instantiation */
2081 if (!of_match_node(brcmnand_of_match
, dn
))
2084 ctrl
= devm_kzalloc(dev
, sizeof(*ctrl
), GFP_KERNEL
);
2088 dev_set_drvdata(dev
, ctrl
);
2091 init_completion(&ctrl
->done
);
2092 init_completion(&ctrl
->dma_done
);
2093 spin_lock_init(&ctrl
->controller
.lock
);
2094 init_waitqueue_head(&ctrl
->controller
.wq
);
2095 INIT_LIST_HEAD(&ctrl
->host_list
);
2097 /* NAND register range */
2098 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2099 ctrl
->nand_base
= devm_ioremap_resource(dev
, res
);
2100 if (IS_ERR(ctrl
->nand_base
))
2101 return PTR_ERR(ctrl
->nand_base
);
2103 /* Initialize NAND revision */
2104 ret
= brcmnand_revision_init(ctrl
);
2109 * Most chips have this cache at a fixed offset within 'nand' block.
2110 * Some must specify this region separately.
2112 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "nand-cache");
2114 ctrl
->nand_fc
= devm_ioremap_resource(dev
, res
);
2115 if (IS_ERR(ctrl
->nand_fc
))
2116 return PTR_ERR(ctrl
->nand_fc
);
2118 ctrl
->nand_fc
= ctrl
->nand_base
+
2119 ctrl
->reg_offsets
[BRCMNAND_FC_BASE
];
2123 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "flash-dma");
2125 ctrl
->flash_dma_base
= devm_ioremap_resource(dev
, res
);
2126 if (IS_ERR(ctrl
->flash_dma_base
))
2127 return PTR_ERR(ctrl
->flash_dma_base
);
2129 flash_dma_writel(ctrl
, FLASH_DMA_MODE
, 1); /* linked-list */
2130 flash_dma_writel(ctrl
, FLASH_DMA_ERROR_STATUS
, 0);
2132 /* Allocate descriptor(s) */
2133 ctrl
->dma_desc
= dmam_alloc_coherent(dev
,
2134 sizeof(*ctrl
->dma_desc
),
2135 &ctrl
->dma_pa
, GFP_KERNEL
);
2136 if (!ctrl
->dma_desc
)
2139 ctrl
->dma_irq
= platform_get_irq(pdev
, 1);
2140 if ((int)ctrl
->dma_irq
< 0) {
2141 dev_err(dev
, "missing FLASH_DMA IRQ\n");
2145 ret
= devm_request_irq(dev
, ctrl
->dma_irq
,
2146 brcmnand_dma_irq
, 0, DRV_NAME
,
2149 dev_err(dev
, "can't allocate IRQ %d: error %d\n",
2150 ctrl
->dma_irq
, ret
);
2154 dev_info(dev
, "enabling FLASH_DMA\n");
2157 /* Disable automatic device ID config, direct addressing */
2158 brcmnand_rmw_reg(ctrl
, BRCMNAND_CS_SELECT
,
2159 CS_SELECT_AUTO_DEVICE_ID_CFG
| 0xff, 0, 0);
2160 /* Disable XOR addressing */
2161 brcmnand_rmw_reg(ctrl
, BRCMNAND_CS_XOR
, 0xff, 0, 0);
2163 if (ctrl
->features
& BRCMNAND_HAS_WP
) {
2164 /* Permanently disable write protection */
2166 brcmnand_set_wp(ctrl
, false);
2172 ctrl
->irq
= platform_get_irq(pdev
, 0);
2173 if ((int)ctrl
->irq
< 0) {
2174 dev_err(dev
, "no IRQ defined\n");
2179 * Some SoCs integrate this controller (e.g., its interrupt bits) in
2185 ret
= devm_request_irq(dev
, ctrl
->irq
, brcmnand_irq
, 0,
2188 /* Enable interrupt */
2189 ctrl
->soc
->ctlrdy_ack(ctrl
->soc
);
2190 ctrl
->soc
->ctlrdy_set_enabled(ctrl
->soc
, true);
2192 /* Use standard interrupt infrastructure */
2193 ret
= devm_request_irq(dev
, ctrl
->irq
, brcmnand_ctlrdy_irq
, 0,
2197 dev_err(dev
, "can't allocate IRQ %d: error %d\n",
2202 for_each_available_child_of_node(dn
, child
) {
2203 if (of_device_is_compatible(child
, "brcm,nandcs")) {
2204 struct brcmnand_host
*host
;
2206 host
= devm_kzalloc(dev
, sizeof(*host
), GFP_KERNEL
);
2211 host
->of_node
= child
;
2213 ret
= brcmnand_init_cs(host
);
2215 continue; /* Try all chip-selects */
2217 list_add_tail(&host
->node
, &ctrl
->host_list
);
2221 /* No chip-selects could initialize properly */
2222 if (list_empty(&ctrl
->host_list
))
2227 EXPORT_SYMBOL_GPL(brcmnand_probe
);
2229 int brcmnand_remove(struct platform_device
*pdev
)
2231 struct brcmnand_controller
*ctrl
= dev_get_drvdata(&pdev
->dev
);
2232 struct brcmnand_host
*host
;
2234 list_for_each_entry(host
, &ctrl
->host_list
, node
)
2235 nand_release(&host
->mtd
);
2237 dev_set_drvdata(&pdev
->dev
, NULL
);
2241 EXPORT_SYMBOL_GPL(brcmnand_remove
);
2243 MODULE_LICENSE("GPL v2");
2244 MODULE_AUTHOR("Kevin Cernekee");
2245 MODULE_AUTHOR("Brian Norris");
2246 MODULE_DESCRIPTION("NAND driver for Broadcom chips");
2247 MODULE_ALIAS("platform:brcmnand");