2 * Copyright © 2010-2015 Broadcom Corporation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/clk.h>
15 #include <linux/version.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/platform_device.h>
21 #include <linux/err.h>
22 #include <linux/completion.h>
23 #include <linux/interrupt.h>
24 #include <linux/spinlock.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/ioport.h>
27 #include <linux/bug.h>
28 #include <linux/kernel.h>
29 #include <linux/bitops.h>
31 #include <linux/mtd/mtd.h>
32 #include <linux/mtd/nand.h>
33 #include <linux/mtd/partitions.h>
35 #include <linux/of_platform.h>
36 #include <linux/slab.h>
37 #include <linux/list.h>
38 #include <linux/log2.h>
43 * This flag controls if WP stays on between erase/write commands to mitigate
44 * flash corruption due to power glitches. Values:
45 * 0: NAND_WP is not used or not available
46 * 1: NAND_WP is set by default, cleared for erase/write operations
47 * 2: NAND_WP is always cleared
50 module_param(wp_on
, int, 0444);
52 /***********************************************************************
54 ***********************************************************************/
56 #define DRV_NAME "brcmnand"
59 #define CMD_PAGE_READ 0x01
60 #define CMD_SPARE_AREA_READ 0x02
61 #define CMD_STATUS_READ 0x03
62 #define CMD_PROGRAM_PAGE 0x04
63 #define CMD_PROGRAM_SPARE_AREA 0x05
64 #define CMD_COPY_BACK 0x06
65 #define CMD_DEVICE_ID_READ 0x07
66 #define CMD_BLOCK_ERASE 0x08
67 #define CMD_FLASH_RESET 0x09
68 #define CMD_BLOCKS_LOCK 0x0a
69 #define CMD_BLOCKS_LOCK_DOWN 0x0b
70 #define CMD_BLOCKS_UNLOCK 0x0c
71 #define CMD_READ_BLOCKS_LOCK_STATUS 0x0d
72 #define CMD_PARAMETER_READ 0x0e
73 #define CMD_PARAMETER_CHANGE_COL 0x0f
74 #define CMD_LOW_LEVEL_OP 0x10
76 struct brcm_nand_dma_desc
{
91 /* Bitfields for brcm_nand_dma_desc::status_valid */
92 #define FLASH_DMA_ECC_ERROR (1 << 8)
93 #define FLASH_DMA_CORR_ERROR (1 << 9)
95 /* 512B flash cache in the NAND controller HW */
98 #define FC_WORDS (FC_BYTES >> 2)
100 #define BRCMNAND_MIN_PAGESIZE 512
101 #define BRCMNAND_MIN_BLOCKSIZE (8 * 1024)
102 #define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024)
104 /* Controller feature flags */
106 BRCMNAND_HAS_1K_SECTORS
= BIT(0),
107 BRCMNAND_HAS_PREFETCH
= BIT(1),
108 BRCMNAND_HAS_CACHE_MODE
= BIT(2),
109 BRCMNAND_HAS_WP
= BIT(3),
112 struct brcmnand_controller
{
114 struct nand_hw_control controller
;
115 void __iomem
*nand_base
;
116 void __iomem
*nand_fc
; /* flash cache */
117 void __iomem
*flash_dma_base
;
119 unsigned int dma_irq
;
122 /* Some SoCs provide custom interrupt status register(s) */
123 struct brcmnand_soc
*soc
;
125 /* Some SoCs have a gateable clock for the controller */
130 struct completion done
;
131 struct completion dma_done
;
133 /* List of NAND hosts (one for each chip-select) */
134 struct list_head host_list
;
136 struct brcm_nand_dma_desc
*dma_desc
;
139 /* in-memory cache of the FLASH_CACHE, used only for some commands */
140 u8 flash_cache
[FC_BYTES
];
142 /* Controller revision details */
143 const u16
*reg_offsets
;
144 unsigned int reg_spacing
; /* between CS1, CS2, ... regs */
145 const u8
*cs_offsets
; /* within each chip-select */
146 const u8
*cs0_offsets
; /* within CS0, if different */
147 unsigned int max_block_size
;
148 const unsigned int *block_sizes
;
149 unsigned int max_page_size
;
150 const unsigned int *page_sizes
;
151 unsigned int max_oob
;
154 /* for low-power standby/resume only */
155 u32 nand_cs_nand_select
;
156 u32 nand_cs_nand_xor
;
157 u32 corr_stat_threshold
;
161 struct brcmnand_cfg
{
163 unsigned int block_size
;
164 unsigned int page_size
;
165 unsigned int spare_area_size
;
166 unsigned int device_width
;
167 unsigned int col_adr_bytes
;
168 unsigned int blk_adr_bytes
;
169 unsigned int ful_adr_bytes
;
170 unsigned int sector_size_1k
;
171 unsigned int ecc_level
;
172 /* use for low-power standby/resume only */
180 struct brcmnand_host
{
181 struct list_head node
;
183 struct nand_chip chip
;
184 struct platform_device
*pdev
;
187 unsigned int last_cmd
;
188 unsigned int last_byte
;
190 struct brcmnand_cfg hwcfg
;
191 struct brcmnand_controller
*ctrl
;
195 BRCMNAND_CMD_START
= 0,
196 BRCMNAND_CMD_EXT_ADDRESS
,
197 BRCMNAND_CMD_ADDRESS
,
198 BRCMNAND_INTFC_STATUS
,
203 BRCMNAND_CS1_BASE
, /* CS1 regs, if non-contiguous */
204 BRCMNAND_CORR_THRESHOLD
,
205 BRCMNAND_CORR_THRESHOLD_EXT
,
206 BRCMNAND_UNCORR_COUNT
,
208 BRCMNAND_CORR_EXT_ADDR
,
210 BRCMNAND_UNCORR_EXT_ADDR
,
211 BRCMNAND_UNCORR_ADDR
,
216 BRCMNAND_OOB_READ_BASE
,
217 BRCMNAND_OOB_READ_10_BASE
, /* offset 0x10, if non-contiguous */
218 BRCMNAND_OOB_WRITE_BASE
,
219 BRCMNAND_OOB_WRITE_10_BASE
, /* offset 0x10, if non-contiguous */
224 static const u16 brcmnand_regs_v40
[] = {
225 [BRCMNAND_CMD_START
] = 0x04,
226 [BRCMNAND_CMD_EXT_ADDRESS
] = 0x08,
227 [BRCMNAND_CMD_ADDRESS
] = 0x0c,
228 [BRCMNAND_INTFC_STATUS
] = 0x6c,
229 [BRCMNAND_CS_SELECT
] = 0x14,
230 [BRCMNAND_CS_XOR
] = 0x18,
231 [BRCMNAND_LL_OP
] = 0x178,
232 [BRCMNAND_CS0_BASE
] = 0x40,
233 [BRCMNAND_CS1_BASE
] = 0xd0,
234 [BRCMNAND_CORR_THRESHOLD
] = 0x84,
235 [BRCMNAND_CORR_THRESHOLD_EXT
] = 0,
236 [BRCMNAND_UNCORR_COUNT
] = 0,
237 [BRCMNAND_CORR_COUNT
] = 0,
238 [BRCMNAND_CORR_EXT_ADDR
] = 0x70,
239 [BRCMNAND_CORR_ADDR
] = 0x74,
240 [BRCMNAND_UNCORR_EXT_ADDR
] = 0x78,
241 [BRCMNAND_UNCORR_ADDR
] = 0x7c,
242 [BRCMNAND_SEMAPHORE
] = 0x58,
243 [BRCMNAND_ID
] = 0x60,
244 [BRCMNAND_ID_EXT
] = 0x64,
245 [BRCMNAND_LL_RDATA
] = 0x17c,
246 [BRCMNAND_OOB_READ_BASE
] = 0x20,
247 [BRCMNAND_OOB_READ_10_BASE
] = 0x130,
248 [BRCMNAND_OOB_WRITE_BASE
] = 0x30,
249 [BRCMNAND_OOB_WRITE_10_BASE
] = 0,
250 [BRCMNAND_FC_BASE
] = 0x200,
254 static const u16 brcmnand_regs_v50
[] = {
255 [BRCMNAND_CMD_START
] = 0x04,
256 [BRCMNAND_CMD_EXT_ADDRESS
] = 0x08,
257 [BRCMNAND_CMD_ADDRESS
] = 0x0c,
258 [BRCMNAND_INTFC_STATUS
] = 0x6c,
259 [BRCMNAND_CS_SELECT
] = 0x14,
260 [BRCMNAND_CS_XOR
] = 0x18,
261 [BRCMNAND_LL_OP
] = 0x178,
262 [BRCMNAND_CS0_BASE
] = 0x40,
263 [BRCMNAND_CS1_BASE
] = 0xd0,
264 [BRCMNAND_CORR_THRESHOLD
] = 0x84,
265 [BRCMNAND_CORR_THRESHOLD_EXT
] = 0,
266 [BRCMNAND_UNCORR_COUNT
] = 0,
267 [BRCMNAND_CORR_COUNT
] = 0,
268 [BRCMNAND_CORR_EXT_ADDR
] = 0x70,
269 [BRCMNAND_CORR_ADDR
] = 0x74,
270 [BRCMNAND_UNCORR_EXT_ADDR
] = 0x78,
271 [BRCMNAND_UNCORR_ADDR
] = 0x7c,
272 [BRCMNAND_SEMAPHORE
] = 0x58,
273 [BRCMNAND_ID
] = 0x60,
274 [BRCMNAND_ID_EXT
] = 0x64,
275 [BRCMNAND_LL_RDATA
] = 0x17c,
276 [BRCMNAND_OOB_READ_BASE
] = 0x20,
277 [BRCMNAND_OOB_READ_10_BASE
] = 0x130,
278 [BRCMNAND_OOB_WRITE_BASE
] = 0x30,
279 [BRCMNAND_OOB_WRITE_10_BASE
] = 0x140,
280 [BRCMNAND_FC_BASE
] = 0x200,
283 /* BRCMNAND v6.0 - v7.1 */
284 static const u16 brcmnand_regs_v60
[] = {
285 [BRCMNAND_CMD_START
] = 0x04,
286 [BRCMNAND_CMD_EXT_ADDRESS
] = 0x08,
287 [BRCMNAND_CMD_ADDRESS
] = 0x0c,
288 [BRCMNAND_INTFC_STATUS
] = 0x14,
289 [BRCMNAND_CS_SELECT
] = 0x18,
290 [BRCMNAND_CS_XOR
] = 0x1c,
291 [BRCMNAND_LL_OP
] = 0x20,
292 [BRCMNAND_CS0_BASE
] = 0x50,
293 [BRCMNAND_CS1_BASE
] = 0,
294 [BRCMNAND_CORR_THRESHOLD
] = 0xc0,
295 [BRCMNAND_CORR_THRESHOLD_EXT
] = 0xc4,
296 [BRCMNAND_UNCORR_COUNT
] = 0xfc,
297 [BRCMNAND_CORR_COUNT
] = 0x100,
298 [BRCMNAND_CORR_EXT_ADDR
] = 0x10c,
299 [BRCMNAND_CORR_ADDR
] = 0x110,
300 [BRCMNAND_UNCORR_EXT_ADDR
] = 0x114,
301 [BRCMNAND_UNCORR_ADDR
] = 0x118,
302 [BRCMNAND_SEMAPHORE
] = 0x150,
303 [BRCMNAND_ID
] = 0x194,
304 [BRCMNAND_ID_EXT
] = 0x198,
305 [BRCMNAND_LL_RDATA
] = 0x19c,
306 [BRCMNAND_OOB_READ_BASE
] = 0x200,
307 [BRCMNAND_OOB_READ_10_BASE
] = 0,
308 [BRCMNAND_OOB_WRITE_BASE
] = 0x280,
309 [BRCMNAND_OOB_WRITE_10_BASE
] = 0,
310 [BRCMNAND_FC_BASE
] = 0x400,
314 static const u16 brcmnand_regs_v71
[] = {
315 [BRCMNAND_CMD_START
] = 0x04,
316 [BRCMNAND_CMD_EXT_ADDRESS
] = 0x08,
317 [BRCMNAND_CMD_ADDRESS
] = 0x0c,
318 [BRCMNAND_INTFC_STATUS
] = 0x14,
319 [BRCMNAND_CS_SELECT
] = 0x18,
320 [BRCMNAND_CS_XOR
] = 0x1c,
321 [BRCMNAND_LL_OP
] = 0x20,
322 [BRCMNAND_CS0_BASE
] = 0x50,
323 [BRCMNAND_CS1_BASE
] = 0,
324 [BRCMNAND_CORR_THRESHOLD
] = 0xdc,
325 [BRCMNAND_CORR_THRESHOLD_EXT
] = 0xe0,
326 [BRCMNAND_UNCORR_COUNT
] = 0xfc,
327 [BRCMNAND_CORR_COUNT
] = 0x100,
328 [BRCMNAND_CORR_EXT_ADDR
] = 0x10c,
329 [BRCMNAND_CORR_ADDR
] = 0x110,
330 [BRCMNAND_UNCORR_EXT_ADDR
] = 0x114,
331 [BRCMNAND_UNCORR_ADDR
] = 0x118,
332 [BRCMNAND_SEMAPHORE
] = 0x150,
333 [BRCMNAND_ID
] = 0x194,
334 [BRCMNAND_ID_EXT
] = 0x198,
335 [BRCMNAND_LL_RDATA
] = 0x19c,
336 [BRCMNAND_OOB_READ_BASE
] = 0x200,
337 [BRCMNAND_OOB_READ_10_BASE
] = 0,
338 [BRCMNAND_OOB_WRITE_BASE
] = 0x280,
339 [BRCMNAND_OOB_WRITE_10_BASE
] = 0,
340 [BRCMNAND_FC_BASE
] = 0x400,
343 enum brcmnand_cs_reg
{
344 BRCMNAND_CS_CFG_EXT
= 0,
346 BRCMNAND_CS_ACC_CONTROL
,
351 /* Per chip-select offsets for v7.1 */
352 static const u8 brcmnand_cs_offsets_v71
[] = {
353 [BRCMNAND_CS_ACC_CONTROL
] = 0x00,
354 [BRCMNAND_CS_CFG_EXT
] = 0x04,
355 [BRCMNAND_CS_CFG
] = 0x08,
356 [BRCMNAND_CS_TIMING1
] = 0x0c,
357 [BRCMNAND_CS_TIMING2
] = 0x10,
360 /* Per chip-select offsets for pre v7.1, except CS0 on <= v5.0 */
361 static const u8 brcmnand_cs_offsets
[] = {
362 [BRCMNAND_CS_ACC_CONTROL
] = 0x00,
363 [BRCMNAND_CS_CFG_EXT
] = 0x04,
364 [BRCMNAND_CS_CFG
] = 0x04,
365 [BRCMNAND_CS_TIMING1
] = 0x08,
366 [BRCMNAND_CS_TIMING2
] = 0x0c,
369 /* Per chip-select offset for <= v5.0 on CS0 only */
370 static const u8 brcmnand_cs_offsets_cs0
[] = {
371 [BRCMNAND_CS_ACC_CONTROL
] = 0x00,
372 [BRCMNAND_CS_CFG_EXT
] = 0x08,
373 [BRCMNAND_CS_CFG
] = 0x08,
374 [BRCMNAND_CS_TIMING1
] = 0x10,
375 [BRCMNAND_CS_TIMING2
] = 0x14,
379 * Bitfields for the CFG and CFG_EXT registers. Pre-v7.1 controllers only had
380 * one config register, but once the bitfields overflowed, newer controllers
381 * (v7.1 and newer) added a CFG_EXT register and shuffled a few fields around.
384 CFG_BLK_ADR_BYTES_SHIFT
= 8,
385 CFG_COL_ADR_BYTES_SHIFT
= 12,
386 CFG_FUL_ADR_BYTES_SHIFT
= 16,
387 CFG_BUS_WIDTH_SHIFT
= 23,
388 CFG_BUS_WIDTH
= BIT(CFG_BUS_WIDTH_SHIFT
),
389 CFG_DEVICE_SIZE_SHIFT
= 24,
391 /* Only for pre-v7.1 (with no CFG_EXT register) */
392 CFG_PAGE_SIZE_SHIFT
= 20,
393 CFG_BLK_SIZE_SHIFT
= 28,
395 /* Only for v7.1+ (with CFG_EXT register) */
396 CFG_EXT_PAGE_SIZE_SHIFT
= 0,
397 CFG_EXT_BLK_SIZE_SHIFT
= 4,
400 /* BRCMNAND_INTFC_STATUS */
402 INTFC_FLASH_STATUS
= GENMASK(7, 0),
404 INTFC_ERASED
= BIT(27),
405 INTFC_OOB_VALID
= BIT(28),
406 INTFC_CACHE_VALID
= BIT(29),
407 INTFC_FLASH_READY
= BIT(30),
408 INTFC_CTLR_READY
= BIT(31),
411 static inline u32
nand_readreg(struct brcmnand_controller
*ctrl
, u32 offs
)
413 return brcmnand_readl(ctrl
->nand_base
+ offs
);
416 static inline void nand_writereg(struct brcmnand_controller
*ctrl
, u32 offs
,
419 brcmnand_writel(val
, ctrl
->nand_base
+ offs
);
422 static int brcmnand_revision_init(struct brcmnand_controller
*ctrl
)
424 static const unsigned int block_sizes_v6
[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 };
425 static const unsigned int block_sizes_v4
[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 };
426 static const unsigned int page_sizes
[] = { 512, 2048, 4096, 8192, 0 };
428 ctrl
->nand_version
= nand_readreg(ctrl
, 0) & 0xffff;
430 /* Only support v4.0+? */
431 if (ctrl
->nand_version
< 0x0400) {
432 dev_err(ctrl
->dev
, "version %#x not supported\n",
437 /* Register offsets */
438 if (ctrl
->nand_version
>= 0x0701)
439 ctrl
->reg_offsets
= brcmnand_regs_v71
;
440 else if (ctrl
->nand_version
>= 0x0600)
441 ctrl
->reg_offsets
= brcmnand_regs_v60
;
442 else if (ctrl
->nand_version
>= 0x0500)
443 ctrl
->reg_offsets
= brcmnand_regs_v50
;
444 else if (ctrl
->nand_version
>= 0x0400)
445 ctrl
->reg_offsets
= brcmnand_regs_v40
;
447 /* Chip-select stride */
448 if (ctrl
->nand_version
>= 0x0701)
449 ctrl
->reg_spacing
= 0x14;
451 ctrl
->reg_spacing
= 0x10;
453 /* Per chip-select registers */
454 if (ctrl
->nand_version
>= 0x0701) {
455 ctrl
->cs_offsets
= brcmnand_cs_offsets_v71
;
457 ctrl
->cs_offsets
= brcmnand_cs_offsets
;
459 /* v5.0 and earlier has a different CS0 offset layout */
460 if (ctrl
->nand_version
<= 0x0500)
461 ctrl
->cs0_offsets
= brcmnand_cs_offsets_cs0
;
464 /* Page / block sizes */
465 if (ctrl
->nand_version
>= 0x0701) {
466 /* >= v7.1 use nice power-of-2 values! */
467 ctrl
->max_page_size
= 16 * 1024;
468 ctrl
->max_block_size
= 2 * 1024 * 1024;
470 ctrl
->page_sizes
= page_sizes
;
471 if (ctrl
->nand_version
>= 0x0600)
472 ctrl
->block_sizes
= block_sizes_v6
;
474 ctrl
->block_sizes
= block_sizes_v4
;
476 if (ctrl
->nand_version
< 0x0400) {
477 ctrl
->max_page_size
= 4096;
478 ctrl
->max_block_size
= 512 * 1024;
482 /* Maximum spare area sector size (per 512B) */
483 if (ctrl
->nand_version
>= 0x0600)
485 else if (ctrl
->nand_version
>= 0x0500)
490 /* v6.0 and newer (except v6.1) have prefetch support */
491 if (ctrl
->nand_version
>= 0x0600 && ctrl
->nand_version
!= 0x0601)
492 ctrl
->features
|= BRCMNAND_HAS_PREFETCH
;
495 * v6.x has cache mode, but it's implemented differently. Ignore it for
498 if (ctrl
->nand_version
>= 0x0700)
499 ctrl
->features
|= BRCMNAND_HAS_CACHE_MODE
;
501 if (ctrl
->nand_version
>= 0x0500)
502 ctrl
->features
|= BRCMNAND_HAS_1K_SECTORS
;
504 if (ctrl
->nand_version
>= 0x0700)
505 ctrl
->features
|= BRCMNAND_HAS_WP
;
506 else if (of_property_read_bool(ctrl
->dev
->of_node
, "brcm,nand-has-wp"))
507 ctrl
->features
|= BRCMNAND_HAS_WP
;
512 static inline u32
brcmnand_read_reg(struct brcmnand_controller
*ctrl
,
513 enum brcmnand_reg reg
)
515 u16 offs
= ctrl
->reg_offsets
[reg
];
518 return nand_readreg(ctrl
, offs
);
523 static inline void brcmnand_write_reg(struct brcmnand_controller
*ctrl
,
524 enum brcmnand_reg reg
, u32 val
)
526 u16 offs
= ctrl
->reg_offsets
[reg
];
529 nand_writereg(ctrl
, offs
, val
);
532 static inline void brcmnand_rmw_reg(struct brcmnand_controller
*ctrl
,
533 enum brcmnand_reg reg
, u32 mask
, unsigned
536 u32 tmp
= brcmnand_read_reg(ctrl
, reg
);
540 brcmnand_write_reg(ctrl
, reg
, tmp
);
543 static inline u32
brcmnand_read_fc(struct brcmnand_controller
*ctrl
, int word
)
545 return __raw_readl(ctrl
->nand_fc
+ word
* 4);
548 static inline void brcmnand_write_fc(struct brcmnand_controller
*ctrl
,
551 __raw_writel(val
, ctrl
->nand_fc
+ word
* 4);
554 static inline u16
brcmnand_cs_offset(struct brcmnand_controller
*ctrl
, int cs
,
555 enum brcmnand_cs_reg reg
)
557 u16 offs_cs0
= ctrl
->reg_offsets
[BRCMNAND_CS0_BASE
];
558 u16 offs_cs1
= ctrl
->reg_offsets
[BRCMNAND_CS1_BASE
];
561 if (cs
== 0 && ctrl
->cs0_offsets
)
562 cs_offs
= ctrl
->cs0_offsets
[reg
];
564 cs_offs
= ctrl
->cs_offsets
[reg
];
567 return offs_cs1
+ (cs
- 1) * ctrl
->reg_spacing
+ cs_offs
;
569 return offs_cs0
+ cs
* ctrl
->reg_spacing
+ cs_offs
;
572 static inline u32
brcmnand_count_corrected(struct brcmnand_controller
*ctrl
)
574 if (ctrl
->nand_version
< 0x0600)
576 return brcmnand_read_reg(ctrl
, BRCMNAND_CORR_COUNT
);
579 static void brcmnand_wr_corr_thresh(struct brcmnand_host
*host
, u8 val
)
581 struct brcmnand_controller
*ctrl
= host
->ctrl
;
582 unsigned int shift
= 0, bits
;
583 enum brcmnand_reg reg
= BRCMNAND_CORR_THRESHOLD
;
586 if (ctrl
->nand_version
>= 0x0600)
588 else if (ctrl
->nand_version
>= 0x0500)
593 if (ctrl
->nand_version
>= 0x0600) {
595 reg
= BRCMNAND_CORR_THRESHOLD_EXT
;
596 shift
= (cs
% 5) * bits
;
598 brcmnand_rmw_reg(ctrl
, reg
, (bits
- 1) << shift
, shift
, val
);
601 static inline int brcmnand_cmd_shift(struct brcmnand_controller
*ctrl
)
603 if (ctrl
->nand_version
< 0x0602)
608 /***********************************************************************
609 * NAND ACC CONTROL bitfield
611 * Some bits have remained constant throughout hardware revision, while
612 * others have shifted around.
613 ***********************************************************************/
615 /* Constant for all versions (where supported) */
617 /* See BRCMNAND_HAS_CACHE_MODE */
618 ACC_CONTROL_CACHE_MODE
= BIT(22),
620 /* See BRCMNAND_HAS_PREFETCH */
621 ACC_CONTROL_PREFETCH
= BIT(23),
623 ACC_CONTROL_PAGE_HIT
= BIT(24),
624 ACC_CONTROL_WR_PREEMPT
= BIT(25),
625 ACC_CONTROL_PARTIAL_PAGE
= BIT(26),
626 ACC_CONTROL_RD_ERASED
= BIT(27),
627 ACC_CONTROL_FAST_PGM_RDIN
= BIT(28),
628 ACC_CONTROL_WR_ECC
= BIT(30),
629 ACC_CONTROL_RD_ECC
= BIT(31),
632 static inline u32
brcmnand_spare_area_mask(struct brcmnand_controller
*ctrl
)
634 if (ctrl
->nand_version
>= 0x0600)
635 return GENMASK(6, 0);
637 return GENMASK(5, 0);
640 #define NAND_ACC_CONTROL_ECC_SHIFT 16
642 static inline u32
brcmnand_ecc_level_mask(struct brcmnand_controller
*ctrl
)
644 u32 mask
= (ctrl
->nand_version
>= 0x0600) ? 0x1f : 0x0f;
646 return mask
<< NAND_ACC_CONTROL_ECC_SHIFT
;
649 static void brcmnand_set_ecc_enabled(struct brcmnand_host
*host
, int en
)
651 struct brcmnand_controller
*ctrl
= host
->ctrl
;
652 u16 offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_ACC_CONTROL
);
653 u32 acc_control
= nand_readreg(ctrl
, offs
);
654 u32 ecc_flags
= ACC_CONTROL_WR_ECC
| ACC_CONTROL_RD_ECC
;
657 acc_control
|= ecc_flags
; /* enable RD/WR ECC */
658 acc_control
|= host
->hwcfg
.ecc_level
659 << NAND_ACC_CONTROL_ECC_SHIFT
;
661 acc_control
&= ~ecc_flags
; /* disable RD/WR ECC */
662 acc_control
&= ~brcmnand_ecc_level_mask(ctrl
);
665 nand_writereg(ctrl
, offs
, acc_control
);
668 static inline int brcmnand_sector_1k_shift(struct brcmnand_controller
*ctrl
)
670 if (ctrl
->nand_version
>= 0x0600)
672 else if (ctrl
->nand_version
>= 0x0500)
678 static int brcmnand_get_sector_size_1k(struct brcmnand_host
*host
)
680 struct brcmnand_controller
*ctrl
= host
->ctrl
;
681 int shift
= brcmnand_sector_1k_shift(ctrl
);
682 u16 acc_control_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
683 BRCMNAND_CS_ACC_CONTROL
);
688 return (nand_readreg(ctrl
, acc_control_offs
) >> shift
) & 0x1;
691 static void brcmnand_set_sector_size_1k(struct brcmnand_host
*host
, int val
)
693 struct brcmnand_controller
*ctrl
= host
->ctrl
;
694 int shift
= brcmnand_sector_1k_shift(ctrl
);
695 u16 acc_control_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
696 BRCMNAND_CS_ACC_CONTROL
);
702 tmp
= nand_readreg(ctrl
, acc_control_offs
);
703 tmp
&= ~(1 << shift
);
704 tmp
|= (!!val
) << shift
;
705 nand_writereg(ctrl
, acc_control_offs
, tmp
);
708 /***********************************************************************
710 ***********************************************************************/
713 CS_SELECT_NAND_WP
= BIT(29),
714 CS_SELECT_AUTO_DEVICE_ID_CFG
= BIT(30),
717 static inline void brcmnand_set_wp(struct brcmnand_controller
*ctrl
, bool en
)
719 u32 val
= en
? CS_SELECT_NAND_WP
: 0;
721 brcmnand_rmw_reg(ctrl
, BRCMNAND_CS_SELECT
, CS_SELECT_NAND_WP
, 0, val
);
724 /***********************************************************************
726 ***********************************************************************/
729 FLASH_DMA_REVISION
= 0x00,
730 FLASH_DMA_FIRST_DESC
= 0x04,
731 FLASH_DMA_FIRST_DESC_EXT
= 0x08,
732 FLASH_DMA_CTRL
= 0x0c,
733 FLASH_DMA_MODE
= 0x10,
734 FLASH_DMA_STATUS
= 0x14,
735 FLASH_DMA_INTERRUPT_DESC
= 0x18,
736 FLASH_DMA_INTERRUPT_DESC_EXT
= 0x1c,
737 FLASH_DMA_ERROR_STATUS
= 0x20,
738 FLASH_DMA_CURRENT_DESC
= 0x24,
739 FLASH_DMA_CURRENT_DESC_EXT
= 0x28,
742 static inline bool has_flash_dma(struct brcmnand_controller
*ctrl
)
744 return ctrl
->flash_dma_base
;
747 static inline bool flash_dma_buf_ok(const void *buf
)
749 return buf
&& !is_vmalloc_addr(buf
) &&
750 likely(IS_ALIGNED((uintptr_t)buf
, 4));
753 static inline void flash_dma_writel(struct brcmnand_controller
*ctrl
, u8 offs
,
756 brcmnand_writel(val
, ctrl
->flash_dma_base
+ offs
);
759 static inline u32
flash_dma_readl(struct brcmnand_controller
*ctrl
, u8 offs
)
761 return brcmnand_readl(ctrl
->flash_dma_base
+ offs
);
764 /* Low-level operation types: command, address, write, or read */
765 enum brcmnand_llop_type
{
772 /***********************************************************************
773 * Internal support functions
774 ***********************************************************************/
776 static inline bool is_hamming_ecc(struct brcmnand_cfg
*cfg
)
778 return cfg
->sector_size_1k
== 0 && cfg
->spare_area_size
== 16 &&
779 cfg
->ecc_level
== 15;
783 * Set mtd->ooblayout to the appropriate mtd_ooblayout_ops given
784 * the layout/configuration.
785 * Returns -ERRCODE on failure.
787 static int brcmnand_hamming_ooblayout_ecc(struct mtd_info
*mtd
, int section
,
788 struct mtd_oob_region
*oobregion
)
790 struct nand_chip
*chip
= mtd_to_nand(mtd
);
791 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
792 struct brcmnand_cfg
*cfg
= &host
->hwcfg
;
793 int sas
= cfg
->spare_area_size
<< cfg
->sector_size_1k
;
794 int sectors
= cfg
->page_size
/ (512 << cfg
->sector_size_1k
);
796 if (section
>= sectors
)
799 oobregion
->offset
= (section
* sas
) + 6;
800 oobregion
->length
= 3;
805 static int brcmnand_hamming_ooblayout_free(struct mtd_info
*mtd
, int section
,
806 struct mtd_oob_region
*oobregion
)
808 struct nand_chip
*chip
= mtd_to_nand(mtd
);
809 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
810 struct brcmnand_cfg
*cfg
= &host
->hwcfg
;
811 int sas
= cfg
->spare_area_size
<< cfg
->sector_size_1k
;
812 int sectors
= cfg
->page_size
/ (512 << cfg
->sector_size_1k
);
814 if (section
>= sectors
* 2)
817 oobregion
->offset
= (section
/ 2) * sas
;
820 oobregion
->offset
+= 9;
821 oobregion
->length
= 7;
823 oobregion
->length
= 6;
825 /* First sector of each page may have BBI */
828 * Small-page NAND use byte 6 for BBI while large-page
831 if (cfg
->page_size
> 512)
840 static const struct mtd_ooblayout_ops brcmnand_hamming_ooblayout_ops
= {
841 .ecc
= brcmnand_hamming_ooblayout_ecc
,
842 .free
= brcmnand_hamming_ooblayout_free
,
845 static int brcmnand_bch_ooblayout_ecc(struct mtd_info
*mtd
, int section
,
846 struct mtd_oob_region
*oobregion
)
848 struct nand_chip
*chip
= mtd_to_nand(mtd
);
849 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
850 struct brcmnand_cfg
*cfg
= &host
->hwcfg
;
851 int sas
= cfg
->spare_area_size
<< cfg
->sector_size_1k
;
852 int sectors
= cfg
->page_size
/ (512 << cfg
->sector_size_1k
);
854 if (section
>= sectors
)
857 oobregion
->offset
= (section
* (sas
+ 1)) - chip
->ecc
.bytes
;
858 oobregion
->length
= chip
->ecc
.bytes
;
863 static int brcmnand_bch_ooblayout_free_lp(struct mtd_info
*mtd
, int section
,
864 struct mtd_oob_region
*oobregion
)
866 struct nand_chip
*chip
= mtd_to_nand(mtd
);
867 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
868 struct brcmnand_cfg
*cfg
= &host
->hwcfg
;
869 int sas
= cfg
->spare_area_size
<< cfg
->sector_size_1k
;
870 int sectors
= cfg
->page_size
/ (512 << cfg
->sector_size_1k
);
872 if (section
>= sectors
)
875 if (sas
<= chip
->ecc
.bytes
)
878 oobregion
->offset
= section
* sas
;
879 oobregion
->length
= sas
- chip
->ecc
.bytes
;
889 static int brcmnand_bch_ooblayout_free_sp(struct mtd_info
*mtd
, int section
,
890 struct mtd_oob_region
*oobregion
)
892 struct nand_chip
*chip
= mtd_to_nand(mtd
);
893 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
894 struct brcmnand_cfg
*cfg
= &host
->hwcfg
;
895 int sas
= cfg
->spare_area_size
<< cfg
->sector_size_1k
;
897 if (section
> 1 || sas
- chip
->ecc
.bytes
< 6 ||
898 (section
&& sas
- chip
->ecc
.bytes
== 6))
902 oobregion
->offset
= 0;
903 oobregion
->length
= 5;
905 oobregion
->offset
= 6;
906 oobregion
->length
= sas
- chip
->ecc
.bytes
- 6;
912 static const struct mtd_ooblayout_ops brcmnand_bch_lp_ooblayout_ops
= {
913 .ecc
= brcmnand_bch_ooblayout_ecc
,
914 .free
= brcmnand_bch_ooblayout_free_lp
,
917 static const struct mtd_ooblayout_ops brcmnand_bch_sp_ooblayout_ops
= {
918 .ecc
= brcmnand_bch_ooblayout_ecc
,
919 .free
= brcmnand_bch_ooblayout_free_sp
,
922 static int brcmstb_choose_ecc_layout(struct brcmnand_host
*host
)
924 struct brcmnand_cfg
*p
= &host
->hwcfg
;
925 struct mtd_info
*mtd
= nand_to_mtd(&host
->chip
);
926 struct nand_ecc_ctrl
*ecc
= &host
->chip
.ecc
;
927 unsigned int ecc_level
= p
->ecc_level
;
928 int sas
= p
->spare_area_size
<< p
->sector_size_1k
;
929 int sectors
= p
->page_size
/ (512 << p
->sector_size_1k
);
931 if (p
->sector_size_1k
)
934 if (is_hamming_ecc(p
)) {
935 ecc
->bytes
= 3 * sectors
;
936 mtd_set_ooblayout(mtd
, &brcmnand_hamming_ooblayout_ops
);
941 * CONTROLLER_VERSION:
942 * < v5.0: ECC_REQ = ceil(BCH_T * 13/8)
943 * >= v5.0: ECC_REQ = ceil(BCH_T * 14/8)
944 * But we will just be conservative.
946 ecc
->bytes
= DIV_ROUND_UP(ecc_level
* 14, 8);
947 if (p
->page_size
== 512)
948 mtd_set_ooblayout(mtd
, &brcmnand_bch_sp_ooblayout_ops
);
950 mtd_set_ooblayout(mtd
, &brcmnand_bch_lp_ooblayout_ops
);
952 if (ecc
->bytes
>= sas
) {
953 dev_err(&host
->pdev
->dev
,
954 "error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n",
962 static void brcmnand_wp(struct mtd_info
*mtd
, int wp
)
964 struct nand_chip
*chip
= mtd_to_nand(mtd
);
965 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
966 struct brcmnand_controller
*ctrl
= host
->ctrl
;
968 if ((ctrl
->features
& BRCMNAND_HAS_WP
) && wp_on
== 1) {
969 static int old_wp
= -1;
972 dev_dbg(ctrl
->dev
, "WP %s\n", wp
? "on" : "off");
975 brcmnand_set_wp(ctrl
, wp
);
979 /* Helper functions for reading and writing OOB registers */
980 static inline u8
oob_reg_read(struct brcmnand_controller
*ctrl
, u32 offs
)
982 u16 offset0
, offset10
, reg_offs
;
984 offset0
= ctrl
->reg_offsets
[BRCMNAND_OOB_READ_BASE
];
985 offset10
= ctrl
->reg_offsets
[BRCMNAND_OOB_READ_10_BASE
];
987 if (offs
>= ctrl
->max_oob
)
990 if (offs
>= 16 && offset10
)
991 reg_offs
= offset10
+ ((offs
- 0x10) & ~0x03);
993 reg_offs
= offset0
+ (offs
& ~0x03);
995 return nand_readreg(ctrl
, reg_offs
) >> (24 - ((offs
& 0x03) << 3));
998 static inline void oob_reg_write(struct brcmnand_controller
*ctrl
, u32 offs
,
1001 u16 offset0
, offset10
, reg_offs
;
1003 offset0
= ctrl
->reg_offsets
[BRCMNAND_OOB_WRITE_BASE
];
1004 offset10
= ctrl
->reg_offsets
[BRCMNAND_OOB_WRITE_10_BASE
];
1006 if (offs
>= ctrl
->max_oob
)
1009 if (offs
>= 16 && offset10
)
1010 reg_offs
= offset10
+ ((offs
- 0x10) & ~0x03);
1012 reg_offs
= offset0
+ (offs
& ~0x03);
1014 nand_writereg(ctrl
, reg_offs
, data
);
1018 * read_oob_from_regs - read data from OOB registers
1019 * @ctrl: NAND controller
1020 * @i: sub-page sector index
1021 * @oob: buffer to read to
1022 * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
1023 * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
1025 static int read_oob_from_regs(struct brcmnand_controller
*ctrl
, int i
, u8
*oob
,
1026 int sas
, int sector_1k
)
1028 int tbytes
= sas
<< sector_1k
;
1031 /* Adjust OOB values for 1K sector size */
1032 if (sector_1k
&& (i
& 0x01))
1033 tbytes
= max(0, tbytes
- (int)ctrl
->max_oob
);
1034 tbytes
= min_t(int, tbytes
, ctrl
->max_oob
);
1036 for (j
= 0; j
< tbytes
; j
++)
1037 oob
[j
] = oob_reg_read(ctrl
, j
);
1042 * write_oob_to_regs - write data to OOB registers
1043 * @i: sub-page sector index
1044 * @oob: buffer to write from
1045 * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
1046 * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
1048 static int write_oob_to_regs(struct brcmnand_controller
*ctrl
, int i
,
1049 const u8
*oob
, int sas
, int sector_1k
)
1051 int tbytes
= sas
<< sector_1k
;
1054 /* Adjust OOB values for 1K sector size */
1055 if (sector_1k
&& (i
& 0x01))
1056 tbytes
= max(0, tbytes
- (int)ctrl
->max_oob
);
1057 tbytes
= min_t(int, tbytes
, ctrl
->max_oob
);
1059 for (j
= 0; j
< tbytes
; j
+= 4)
1060 oob_reg_write(ctrl
, j
,
1061 (oob
[j
+ 0] << 24) |
1062 (oob
[j
+ 1] << 16) |
1068 static irqreturn_t
brcmnand_ctlrdy_irq(int irq
, void *data
)
1070 struct brcmnand_controller
*ctrl
= data
;
1072 /* Discard all NAND_CTLRDY interrupts during DMA */
1073 if (ctrl
->dma_pending
)
1076 complete(&ctrl
->done
);
1080 /* Handle SoC-specific interrupt hardware */
1081 static irqreturn_t
brcmnand_irq(int irq
, void *data
)
1083 struct brcmnand_controller
*ctrl
= data
;
1085 if (ctrl
->soc
->ctlrdy_ack(ctrl
->soc
))
1086 return brcmnand_ctlrdy_irq(irq
, data
);
1091 static irqreturn_t
brcmnand_dma_irq(int irq
, void *data
)
1093 struct brcmnand_controller
*ctrl
= data
;
1095 complete(&ctrl
->dma_done
);
1100 static void brcmnand_send_cmd(struct brcmnand_host
*host
, int cmd
)
1102 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1105 dev_dbg(ctrl
->dev
, "send native cmd %d addr_lo 0x%x\n", cmd
,
1106 brcmnand_read_reg(ctrl
, BRCMNAND_CMD_ADDRESS
));
1107 BUG_ON(ctrl
->cmd_pending
!= 0);
1108 ctrl
->cmd_pending
= cmd
;
1110 intfc
= brcmnand_read_reg(ctrl
, BRCMNAND_INTFC_STATUS
);
1111 BUG_ON(!(intfc
& INTFC_CTLR_READY
));
1113 mb(); /* flush previous writes */
1114 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_START
,
1115 cmd
<< brcmnand_cmd_shift(ctrl
));
1118 /***********************************************************************
1119 * NAND MTD API: read/program/erase
1120 ***********************************************************************/
1122 static void brcmnand_cmd_ctrl(struct mtd_info
*mtd
, int dat
,
1125 /* intentionally left blank */
1128 static int brcmnand_waitfunc(struct mtd_info
*mtd
, struct nand_chip
*this)
1130 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1131 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1132 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1133 unsigned long timeo
= msecs_to_jiffies(100);
1135 dev_dbg(ctrl
->dev
, "wait on native cmd %d\n", ctrl
->cmd_pending
);
1136 if (ctrl
->cmd_pending
&&
1137 wait_for_completion_timeout(&ctrl
->done
, timeo
) <= 0) {
1138 u32 cmd
= brcmnand_read_reg(ctrl
, BRCMNAND_CMD_START
)
1139 >> brcmnand_cmd_shift(ctrl
);
1141 dev_err_ratelimited(ctrl
->dev
,
1142 "timeout waiting for command %#02x\n", cmd
);
1143 dev_err_ratelimited(ctrl
->dev
, "intfc status %08x\n",
1144 brcmnand_read_reg(ctrl
, BRCMNAND_INTFC_STATUS
));
1146 ctrl
->cmd_pending
= 0;
1147 return brcmnand_read_reg(ctrl
, BRCMNAND_INTFC_STATUS
) &
1156 LLOP_RETURN_IDLE
= BIT(31),
1158 LLOP_DATA_MASK
= GENMASK(15, 0),
1161 static int brcmnand_low_level_op(struct brcmnand_host
*host
,
1162 enum brcmnand_llop_type type
, u32 data
,
1165 struct mtd_info
*mtd
= nand_to_mtd(&host
->chip
);
1166 struct nand_chip
*chip
= &host
->chip
;
1167 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1170 tmp
= data
& LLOP_DATA_MASK
;
1173 tmp
|= LLOP_WE
| LLOP_CLE
;
1177 tmp
|= LLOP_WE
| LLOP_ALE
;
1190 tmp
|= LLOP_RETURN_IDLE
;
1192 dev_dbg(ctrl
->dev
, "ll_op cmd %#x\n", tmp
);
1194 brcmnand_write_reg(ctrl
, BRCMNAND_LL_OP
, tmp
);
1195 (void)brcmnand_read_reg(ctrl
, BRCMNAND_LL_OP
);
1197 brcmnand_send_cmd(host
, CMD_LOW_LEVEL_OP
);
1198 return brcmnand_waitfunc(mtd
, chip
);
1201 static void brcmnand_cmdfunc(struct mtd_info
*mtd
, unsigned command
,
1202 int column
, int page_addr
)
1204 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1205 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1206 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1207 u64 addr
= (u64
)page_addr
<< chip
->page_shift
;
1210 if (command
== NAND_CMD_READID
|| command
== NAND_CMD_PARAM
||
1211 command
== NAND_CMD_RNDOUT
)
1213 /* Avoid propagating a negative, don't-care address */
1214 else if (page_addr
< 0)
1217 dev_dbg(ctrl
->dev
, "cmd 0x%x addr 0x%llx\n", command
,
1218 (unsigned long long)addr
);
1220 host
->last_cmd
= command
;
1221 host
->last_byte
= 0;
1222 host
->last_addr
= addr
;
1225 case NAND_CMD_RESET
:
1226 native_cmd
= CMD_FLASH_RESET
;
1228 case NAND_CMD_STATUS
:
1229 native_cmd
= CMD_STATUS_READ
;
1231 case NAND_CMD_READID
:
1232 native_cmd
= CMD_DEVICE_ID_READ
;
1234 case NAND_CMD_READOOB
:
1235 native_cmd
= CMD_SPARE_AREA_READ
;
1237 case NAND_CMD_ERASE1
:
1238 native_cmd
= CMD_BLOCK_ERASE
;
1239 brcmnand_wp(mtd
, 0);
1241 case NAND_CMD_PARAM
:
1242 native_cmd
= CMD_PARAMETER_READ
;
1244 case NAND_CMD_SET_FEATURES
:
1245 case NAND_CMD_GET_FEATURES
:
1246 brcmnand_low_level_op(host
, LL_OP_CMD
, command
, false);
1247 brcmnand_low_level_op(host
, LL_OP_ADDR
, column
, false);
1249 case NAND_CMD_RNDOUT
:
1250 native_cmd
= CMD_PARAMETER_CHANGE_COL
;
1251 addr
&= ~((u64
)(FC_BYTES
- 1));
1253 * HW quirk: PARAMETER_CHANGE_COL requires SECTOR_SIZE_1K=0
1254 * NB: hwcfg.sector_size_1k may not be initialized yet
1256 if (brcmnand_get_sector_size_1k(host
)) {
1257 host
->hwcfg
.sector_size_1k
=
1258 brcmnand_get_sector_size_1k(host
);
1259 brcmnand_set_sector_size_1k(host
, 0);
1267 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
,
1268 (host
->cs
<< 16) | ((addr
>> 32) & 0xffff));
1269 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
);
1270 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_ADDRESS
, lower_32_bits(addr
));
1271 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_ADDRESS
);
1273 brcmnand_send_cmd(host
, native_cmd
);
1274 brcmnand_waitfunc(mtd
, chip
);
1276 if (native_cmd
== CMD_PARAMETER_READ
||
1277 native_cmd
== CMD_PARAMETER_CHANGE_COL
) {
1278 /* Copy flash cache word-wise */
1279 u32
*flash_cache
= (u32
*)ctrl
->flash_cache
;
1282 brcmnand_soc_data_bus_prepare(ctrl
->soc
);
1285 * Must cache the FLASH_CACHE now, since changes in
1286 * SECTOR_SIZE_1K may invalidate it
1288 for (i
= 0; i
< FC_WORDS
; i
++)
1290 * Flash cache is big endian for parameter pages, at
1293 flash_cache
[i
] = be32_to_cpu(brcmnand_read_fc(ctrl
, i
));
1295 brcmnand_soc_data_bus_unprepare(ctrl
->soc
);
1297 /* Cleanup from HW quirk: restore SECTOR_SIZE_1K */
1298 if (host
->hwcfg
.sector_size_1k
)
1299 brcmnand_set_sector_size_1k(host
,
1300 host
->hwcfg
.sector_size_1k
);
1303 /* Re-enable protection is necessary only after erase */
1304 if (command
== NAND_CMD_ERASE1
)
1305 brcmnand_wp(mtd
, 1);
1308 static uint8_t brcmnand_read_byte(struct mtd_info
*mtd
)
1310 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1311 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1312 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1316 switch (host
->last_cmd
) {
1317 case NAND_CMD_READID
:
1318 if (host
->last_byte
< 4)
1319 ret
= brcmnand_read_reg(ctrl
, BRCMNAND_ID
) >>
1320 (24 - (host
->last_byte
<< 3));
1321 else if (host
->last_byte
< 8)
1322 ret
= brcmnand_read_reg(ctrl
, BRCMNAND_ID_EXT
) >>
1323 (56 - (host
->last_byte
<< 3));
1326 case NAND_CMD_READOOB
:
1327 ret
= oob_reg_read(ctrl
, host
->last_byte
);
1330 case NAND_CMD_STATUS
:
1331 ret
= brcmnand_read_reg(ctrl
, BRCMNAND_INTFC_STATUS
) &
1333 if (wp_on
) /* hide WP status */
1334 ret
|= NAND_STATUS_WP
;
1337 case NAND_CMD_PARAM
:
1338 case NAND_CMD_RNDOUT
:
1339 addr
= host
->last_addr
+ host
->last_byte
;
1340 offs
= addr
& (FC_BYTES
- 1);
1342 /* At FC_BYTES boundary, switch to next column */
1343 if (host
->last_byte
> 0 && offs
== 0)
1344 chip
->cmdfunc(mtd
, NAND_CMD_RNDOUT
, addr
, -1);
1346 ret
= ctrl
->flash_cache
[offs
];
1348 case NAND_CMD_GET_FEATURES
:
1349 if (host
->last_byte
>= ONFI_SUBFEATURE_PARAM_LEN
) {
1352 bool last
= host
->last_byte
==
1353 ONFI_SUBFEATURE_PARAM_LEN
- 1;
1354 brcmnand_low_level_op(host
, LL_OP_RD
, 0, last
);
1355 ret
= brcmnand_read_reg(ctrl
, BRCMNAND_LL_RDATA
) & 0xff;
1359 dev_dbg(ctrl
->dev
, "read byte = 0x%02x\n", ret
);
1365 static void brcmnand_read_buf(struct mtd_info
*mtd
, uint8_t *buf
, int len
)
1369 for (i
= 0; i
< len
; i
++, buf
++)
1370 *buf
= brcmnand_read_byte(mtd
);
1373 static void brcmnand_write_buf(struct mtd_info
*mtd
, const uint8_t *buf
,
1377 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1378 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1380 switch (host
->last_cmd
) {
1381 case NAND_CMD_SET_FEATURES
:
1382 for (i
= 0; i
< len
; i
++)
1383 brcmnand_low_level_op(host
, LL_OP_WR
, buf
[i
],
1393 * Construct a FLASH_DMA descriptor as part of a linked list. You must know the
1394 * following ahead of time:
1395 * - Is this descriptor the beginning or end of a linked list?
1396 * - What is the (DMA) address of the next descriptor in the linked list?
1398 static int brcmnand_fill_dma_desc(struct brcmnand_host
*host
,
1399 struct brcm_nand_dma_desc
*desc
, u64 addr
,
1400 dma_addr_t buf
, u32 len
, u8 dma_cmd
,
1401 bool begin
, bool end
,
1402 dma_addr_t next_desc
)
1404 memset(desc
, 0, sizeof(*desc
));
1405 /* Descriptors are written in native byte order (wordwise) */
1406 desc
->next_desc
= lower_32_bits(next_desc
);
1407 desc
->next_desc_ext
= upper_32_bits(next_desc
);
1408 desc
->cmd_irq
= (dma_cmd
<< 24) |
1409 (end
? (0x03 << 8) : 0) | /* IRQ | STOP */
1410 (!!begin
) | ((!!end
) << 1); /* head, tail */
1411 #ifdef CONFIG_CPU_BIG_ENDIAN
1412 desc
->cmd_irq
|= 0x01 << 12;
1414 desc
->dram_addr
= lower_32_bits(buf
);
1415 desc
->dram_addr_ext
= upper_32_bits(buf
);
1416 desc
->tfr_len
= len
;
1417 desc
->total_len
= len
;
1418 desc
->flash_addr
= lower_32_bits(addr
);
1419 desc
->flash_addr_ext
= upper_32_bits(addr
);
1420 desc
->cs
= host
->cs
;
1421 desc
->status_valid
= 0x01;
1426 * Kick the FLASH_DMA engine, with a given DMA descriptor
1428 static void brcmnand_dma_run(struct brcmnand_host
*host
, dma_addr_t desc
)
1430 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1431 unsigned long timeo
= msecs_to_jiffies(100);
1433 flash_dma_writel(ctrl
, FLASH_DMA_FIRST_DESC
, lower_32_bits(desc
));
1434 (void)flash_dma_readl(ctrl
, FLASH_DMA_FIRST_DESC
);
1435 flash_dma_writel(ctrl
, FLASH_DMA_FIRST_DESC_EXT
, upper_32_bits(desc
));
1436 (void)flash_dma_readl(ctrl
, FLASH_DMA_FIRST_DESC_EXT
);
1438 /* Start FLASH_DMA engine */
1439 ctrl
->dma_pending
= true;
1440 mb(); /* flush previous writes */
1441 flash_dma_writel(ctrl
, FLASH_DMA_CTRL
, 0x03); /* wake | run */
1443 if (wait_for_completion_timeout(&ctrl
->dma_done
, timeo
) <= 0) {
1445 "timeout waiting for DMA; status %#x, error status %#x\n",
1446 flash_dma_readl(ctrl
, FLASH_DMA_STATUS
),
1447 flash_dma_readl(ctrl
, FLASH_DMA_ERROR_STATUS
));
1449 ctrl
->dma_pending
= false;
1450 flash_dma_writel(ctrl
, FLASH_DMA_CTRL
, 0); /* force stop */
1453 static int brcmnand_dma_trans(struct brcmnand_host
*host
, u64 addr
, u32
*buf
,
1454 u32 len
, u8 dma_cmd
)
1456 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1458 int dir
= dma_cmd
== CMD_PAGE_READ
? DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
1460 buf_pa
= dma_map_single(ctrl
->dev
, buf
, len
, dir
);
1461 if (dma_mapping_error(ctrl
->dev
, buf_pa
)) {
1462 dev_err(ctrl
->dev
, "unable to map buffer for DMA\n");
1466 brcmnand_fill_dma_desc(host
, ctrl
->dma_desc
, addr
, buf_pa
, len
,
1467 dma_cmd
, true, true, 0);
1469 brcmnand_dma_run(host
, ctrl
->dma_pa
);
1471 dma_unmap_single(ctrl
->dev
, buf_pa
, len
, dir
);
1473 if (ctrl
->dma_desc
->status_valid
& FLASH_DMA_ECC_ERROR
)
1475 else if (ctrl
->dma_desc
->status_valid
& FLASH_DMA_CORR_ERROR
)
1482 * Assumes proper CS is already set
1484 static int brcmnand_read_by_pio(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1485 u64 addr
, unsigned int trans
, u32
*buf
,
1486 u8
*oob
, u64
*err_addr
)
1488 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1489 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1492 /* Clear error addresses */
1493 brcmnand_write_reg(ctrl
, BRCMNAND_UNCORR_ADDR
, 0);
1494 brcmnand_write_reg(ctrl
, BRCMNAND_CORR_ADDR
, 0);
1495 brcmnand_write_reg(ctrl
, BRCMNAND_UNCORR_EXT_ADDR
, 0);
1496 brcmnand_write_reg(ctrl
, BRCMNAND_CORR_EXT_ADDR
, 0);
1498 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
,
1499 (host
->cs
<< 16) | ((addr
>> 32) & 0xffff));
1500 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
);
1502 for (i
= 0; i
< trans
; i
++, addr
+= FC_BYTES
) {
1503 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_ADDRESS
,
1504 lower_32_bits(addr
));
1505 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_ADDRESS
);
1506 /* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */
1507 brcmnand_send_cmd(host
, CMD_PAGE_READ
);
1508 brcmnand_waitfunc(mtd
, chip
);
1511 brcmnand_soc_data_bus_prepare(ctrl
->soc
);
1513 for (j
= 0; j
< FC_WORDS
; j
++, buf
++)
1514 *buf
= brcmnand_read_fc(ctrl
, j
);
1516 brcmnand_soc_data_bus_unprepare(ctrl
->soc
);
1520 oob
+= read_oob_from_regs(ctrl
, i
, oob
,
1521 mtd
->oobsize
/ trans
,
1522 host
->hwcfg
.sector_size_1k
);
1525 *err_addr
= brcmnand_read_reg(ctrl
,
1526 BRCMNAND_UNCORR_ADDR
) |
1527 ((u64
)(brcmnand_read_reg(ctrl
,
1528 BRCMNAND_UNCORR_EXT_ADDR
)
1535 *err_addr
= brcmnand_read_reg(ctrl
,
1536 BRCMNAND_CORR_ADDR
) |
1537 ((u64
)(brcmnand_read_reg(ctrl
,
1538 BRCMNAND_CORR_EXT_ADDR
)
1548 static int brcmnand_read(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1549 u64 addr
, unsigned int trans
, u32
*buf
, u8
*oob
)
1551 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1552 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1556 dev_dbg(ctrl
->dev
, "read %llx -> %p\n", (unsigned long long)addr
, buf
);
1558 brcmnand_write_reg(ctrl
, BRCMNAND_UNCORR_COUNT
, 0);
1560 if (has_flash_dma(ctrl
) && !oob
&& flash_dma_buf_ok(buf
)) {
1561 err
= brcmnand_dma_trans(host
, addr
, buf
, trans
* FC_BYTES
,
1564 if (mtd_is_bitflip_or_eccerr(err
))
1571 memset(oob
, 0x99, mtd
->oobsize
);
1573 err
= brcmnand_read_by_pio(mtd
, chip
, addr
, trans
, buf
,
1577 if (mtd_is_eccerr(err
)) {
1578 dev_dbg(ctrl
->dev
, "uncorrectable error at 0x%llx\n",
1579 (unsigned long long)err_addr
);
1580 mtd
->ecc_stats
.failed
++;
1581 /* NAND layer expects zero on ECC errors */
1585 if (mtd_is_bitflip(err
)) {
1586 unsigned int corrected
= brcmnand_count_corrected(ctrl
);
1588 dev_dbg(ctrl
->dev
, "corrected error at 0x%llx\n",
1589 (unsigned long long)err_addr
);
1590 mtd
->ecc_stats
.corrected
+= corrected
;
1591 /* Always exceed the software-imposed threshold */
1592 return max(mtd
->bitflip_threshold
, corrected
);
1598 static int brcmnand_read_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1599 uint8_t *buf
, int oob_required
, int page
)
1601 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1602 u8
*oob
= oob_required
? (u8
*)chip
->oob_poi
: NULL
;
1604 return brcmnand_read(mtd
, chip
, host
->last_addr
,
1605 mtd
->writesize
>> FC_SHIFT
, (u32
*)buf
, oob
);
1608 static int brcmnand_read_page_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1609 uint8_t *buf
, int oob_required
, int page
)
1611 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1612 u8
*oob
= oob_required
? (u8
*)chip
->oob_poi
: NULL
;
1615 brcmnand_set_ecc_enabled(host
, 0);
1616 ret
= brcmnand_read(mtd
, chip
, host
->last_addr
,
1617 mtd
->writesize
>> FC_SHIFT
, (u32
*)buf
, oob
);
1618 brcmnand_set_ecc_enabled(host
, 1);
1622 static int brcmnand_read_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1625 return brcmnand_read(mtd
, chip
, (u64
)page
<< chip
->page_shift
,
1626 mtd
->writesize
>> FC_SHIFT
,
1627 NULL
, (u8
*)chip
->oob_poi
);
1630 static int brcmnand_read_oob_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1633 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1635 brcmnand_set_ecc_enabled(host
, 0);
1636 brcmnand_read(mtd
, chip
, (u64
)page
<< chip
->page_shift
,
1637 mtd
->writesize
>> FC_SHIFT
,
1638 NULL
, (u8
*)chip
->oob_poi
);
1639 brcmnand_set_ecc_enabled(host
, 1);
1643 static int brcmnand_write(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1644 u64 addr
, const u32
*buf
, u8
*oob
)
1646 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1647 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1648 unsigned int i
, j
, trans
= mtd
->writesize
>> FC_SHIFT
;
1649 int status
, ret
= 0;
1651 dev_dbg(ctrl
->dev
, "write %llx <- %p\n", (unsigned long long)addr
, buf
);
1653 if (unlikely((unsigned long)buf
& 0x03)) {
1654 dev_warn(ctrl
->dev
, "unaligned buffer: %p\n", buf
);
1655 buf
= (u32
*)((unsigned long)buf
& ~0x03);
1658 brcmnand_wp(mtd
, 0);
1660 for (i
= 0; i
< ctrl
->max_oob
; i
+= 4)
1661 oob_reg_write(ctrl
, i
, 0xffffffff);
1663 if (has_flash_dma(ctrl
) && !oob
&& flash_dma_buf_ok(buf
)) {
1664 if (brcmnand_dma_trans(host
, addr
, (u32
*)buf
,
1665 mtd
->writesize
, CMD_PROGRAM_PAGE
))
1670 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
,
1671 (host
->cs
<< 16) | ((addr
>> 32) & 0xffff));
1672 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
);
1674 for (i
= 0; i
< trans
; i
++, addr
+= FC_BYTES
) {
1675 /* full address MUST be set before populating FC */
1676 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_ADDRESS
,
1677 lower_32_bits(addr
));
1678 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_ADDRESS
);
1681 brcmnand_soc_data_bus_prepare(ctrl
->soc
);
1683 for (j
= 0; j
< FC_WORDS
; j
++, buf
++)
1684 brcmnand_write_fc(ctrl
, j
, *buf
);
1686 brcmnand_soc_data_bus_unprepare(ctrl
->soc
);
1688 for (j
= 0; j
< FC_WORDS
; j
++)
1689 brcmnand_write_fc(ctrl
, j
, 0xffffffff);
1693 oob
+= write_oob_to_regs(ctrl
, i
, oob
,
1694 mtd
->oobsize
/ trans
,
1695 host
->hwcfg
.sector_size_1k
);
1698 /* we cannot use SPARE_AREA_PROGRAM when PARTIAL_PAGE_EN=0 */
1699 brcmnand_send_cmd(host
, CMD_PROGRAM_PAGE
);
1700 status
= brcmnand_waitfunc(mtd
, chip
);
1702 if (status
& NAND_STATUS_FAIL
) {
1703 dev_info(ctrl
->dev
, "program failed at %llx\n",
1704 (unsigned long long)addr
);
1710 brcmnand_wp(mtd
, 1);
1714 static int brcmnand_write_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1715 const uint8_t *buf
, int oob_required
, int page
)
1717 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1718 void *oob
= oob_required
? chip
->oob_poi
: NULL
;
1720 brcmnand_write(mtd
, chip
, host
->last_addr
, (const u32
*)buf
, oob
);
1724 static int brcmnand_write_page_raw(struct mtd_info
*mtd
,
1725 struct nand_chip
*chip
, const uint8_t *buf
,
1726 int oob_required
, int page
)
1728 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1729 void *oob
= oob_required
? chip
->oob_poi
: NULL
;
1731 brcmnand_set_ecc_enabled(host
, 0);
1732 brcmnand_write(mtd
, chip
, host
->last_addr
, (const u32
*)buf
, oob
);
1733 brcmnand_set_ecc_enabled(host
, 1);
1737 static int brcmnand_write_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1740 return brcmnand_write(mtd
, chip
, (u64
)page
<< chip
->page_shift
,
1741 NULL
, chip
->oob_poi
);
1744 static int brcmnand_write_oob_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1747 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1750 brcmnand_set_ecc_enabled(host
, 0);
1751 ret
= brcmnand_write(mtd
, chip
, (u64
)page
<< chip
->page_shift
, NULL
,
1752 (u8
*)chip
->oob_poi
);
1753 brcmnand_set_ecc_enabled(host
, 1);
1758 /***********************************************************************
1759 * Per-CS setup (1 NAND device)
1760 ***********************************************************************/
1762 static int brcmnand_set_cfg(struct brcmnand_host
*host
,
1763 struct brcmnand_cfg
*cfg
)
1765 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1766 struct nand_chip
*chip
= &host
->chip
;
1767 u16 cfg_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_CFG
);
1768 u16 cfg_ext_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
1769 BRCMNAND_CS_CFG_EXT
);
1770 u16 acc_control_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
1771 BRCMNAND_CS_ACC_CONTROL
);
1772 u8 block_size
= 0, page_size
= 0, device_size
= 0;
1775 if (ctrl
->block_sizes
) {
1778 for (i
= 0, found
= 0; ctrl
->block_sizes
[i
]; i
++)
1779 if (ctrl
->block_sizes
[i
] * 1024 == cfg
->block_size
) {
1784 dev_warn(ctrl
->dev
, "invalid block size %u\n",
1789 block_size
= ffs(cfg
->block_size
) - ffs(BRCMNAND_MIN_BLOCKSIZE
);
1792 if (cfg
->block_size
< BRCMNAND_MIN_BLOCKSIZE
|| (ctrl
->max_block_size
&&
1793 cfg
->block_size
> ctrl
->max_block_size
)) {
1794 dev_warn(ctrl
->dev
, "invalid block size %u\n",
1799 if (ctrl
->page_sizes
) {
1802 for (i
= 0, found
= 0; ctrl
->page_sizes
[i
]; i
++)
1803 if (ctrl
->page_sizes
[i
] == cfg
->page_size
) {
1808 dev_warn(ctrl
->dev
, "invalid page size %u\n",
1813 page_size
= ffs(cfg
->page_size
) - ffs(BRCMNAND_MIN_PAGESIZE
);
1816 if (cfg
->page_size
< BRCMNAND_MIN_PAGESIZE
|| (ctrl
->max_page_size
&&
1817 cfg
->page_size
> ctrl
->max_page_size
)) {
1818 dev_warn(ctrl
->dev
, "invalid page size %u\n", cfg
->page_size
);
1822 if (fls64(cfg
->device_size
) < fls64(BRCMNAND_MIN_DEVSIZE
)) {
1823 dev_warn(ctrl
->dev
, "invalid device size 0x%llx\n",
1824 (unsigned long long)cfg
->device_size
);
1827 device_size
= fls64(cfg
->device_size
) - fls64(BRCMNAND_MIN_DEVSIZE
);
1829 tmp
= (cfg
->blk_adr_bytes
<< CFG_BLK_ADR_BYTES_SHIFT
) |
1830 (cfg
->col_adr_bytes
<< CFG_COL_ADR_BYTES_SHIFT
) |
1831 (cfg
->ful_adr_bytes
<< CFG_FUL_ADR_BYTES_SHIFT
) |
1832 (!!(cfg
->device_width
== 16) << CFG_BUS_WIDTH_SHIFT
) |
1833 (device_size
<< CFG_DEVICE_SIZE_SHIFT
);
1834 if (cfg_offs
== cfg_ext_offs
) {
1835 tmp
|= (page_size
<< CFG_PAGE_SIZE_SHIFT
) |
1836 (block_size
<< CFG_BLK_SIZE_SHIFT
);
1837 nand_writereg(ctrl
, cfg_offs
, tmp
);
1839 nand_writereg(ctrl
, cfg_offs
, tmp
);
1840 tmp
= (page_size
<< CFG_EXT_PAGE_SIZE_SHIFT
) |
1841 (block_size
<< CFG_EXT_BLK_SIZE_SHIFT
);
1842 nand_writereg(ctrl
, cfg_ext_offs
, tmp
);
1845 tmp
= nand_readreg(ctrl
, acc_control_offs
);
1846 tmp
&= ~brcmnand_ecc_level_mask(ctrl
);
1847 tmp
|= cfg
->ecc_level
<< NAND_ACC_CONTROL_ECC_SHIFT
;
1848 tmp
&= ~brcmnand_spare_area_mask(ctrl
);
1849 tmp
|= cfg
->spare_area_size
;
1850 nand_writereg(ctrl
, acc_control_offs
, tmp
);
1852 brcmnand_set_sector_size_1k(host
, cfg
->sector_size_1k
);
1854 /* threshold = ceil(BCH-level * 0.75) */
1855 brcmnand_wr_corr_thresh(host
, DIV_ROUND_UP(chip
->ecc
.strength
* 3, 4));
1860 static void brcmnand_print_cfg(char *buf
, struct brcmnand_cfg
*cfg
)
1863 "%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit",
1864 (unsigned long long)cfg
->device_size
>> 20,
1865 cfg
->block_size
>> 10,
1866 cfg
->page_size
>= 1024 ? cfg
->page_size
>> 10 : cfg
->page_size
,
1867 cfg
->page_size
>= 1024 ? "KiB" : "B",
1868 cfg
->spare_area_size
, cfg
->device_width
);
1870 /* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */
1871 if (is_hamming_ecc(cfg
))
1872 sprintf(buf
, ", Hamming ECC");
1873 else if (cfg
->sector_size_1k
)
1874 sprintf(buf
, ", BCH-%u (1KiB sector)", cfg
->ecc_level
<< 1);
1876 sprintf(buf
, ", BCH-%u", cfg
->ecc_level
);
1880 * Minimum number of bytes to address a page. Calculated as:
1881 * roundup(log2(size / page-size) / 8)
1883 * NB: the following does not "round up" for non-power-of-2 'size'; but this is
1884 * OK because many other things will break if 'size' is irregular...
1886 static inline int get_blk_adr_bytes(u64 size
, u32 writesize
)
1888 return ALIGN(ilog2(size
) - ilog2(writesize
), 8) >> 3;
1891 static int brcmnand_setup_dev(struct brcmnand_host
*host
)
1893 struct mtd_info
*mtd
= nand_to_mtd(&host
->chip
);
1894 struct nand_chip
*chip
= &host
->chip
;
1895 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1896 struct brcmnand_cfg
*cfg
= &host
->hwcfg
;
1898 u32 offs
, tmp
, oob_sector
;
1901 memset(cfg
, 0, sizeof(*cfg
));
1903 ret
= of_property_read_u32(nand_get_flash_node(chip
),
1904 "brcm,nand-oob-sector-size",
1907 /* Use detected size */
1908 cfg
->spare_area_size
= mtd
->oobsize
/
1909 (mtd
->writesize
>> FC_SHIFT
);
1911 cfg
->spare_area_size
= oob_sector
;
1913 if (cfg
->spare_area_size
> ctrl
->max_oob
)
1914 cfg
->spare_area_size
= ctrl
->max_oob
;
1916 * Set oobsize to be consistent with controller's spare_area_size, as
1917 * the rest is inaccessible.
1919 mtd
->oobsize
= cfg
->spare_area_size
* (mtd
->writesize
>> FC_SHIFT
);
1921 cfg
->device_size
= mtd
->size
;
1922 cfg
->block_size
= mtd
->erasesize
;
1923 cfg
->page_size
= mtd
->writesize
;
1924 cfg
->device_width
= (chip
->options
& NAND_BUSWIDTH_16
) ? 16 : 8;
1925 cfg
->col_adr_bytes
= 2;
1926 cfg
->blk_adr_bytes
= get_blk_adr_bytes(mtd
->size
, mtd
->writesize
);
1928 if (chip
->ecc
.mode
!= NAND_ECC_HW
) {
1929 dev_err(ctrl
->dev
, "only HW ECC supported; selected: %d\n",
1934 if (chip
->ecc
.algo
== NAND_ECC_UNKNOWN
) {
1935 if (chip
->ecc
.strength
== 1 && chip
->ecc
.size
== 512)
1936 /* Default to Hamming for 1-bit ECC, if unspecified */
1937 chip
->ecc
.algo
= NAND_ECC_HAMMING
;
1939 /* Otherwise, BCH */
1940 chip
->ecc
.algo
= NAND_ECC_BCH
;
1943 if (chip
->ecc
.algo
== NAND_ECC_HAMMING
&& (chip
->ecc
.strength
!= 1 ||
1944 chip
->ecc
.size
!= 512)) {
1945 dev_err(ctrl
->dev
, "invalid Hamming params: %d bits per %d bytes\n",
1946 chip
->ecc
.strength
, chip
->ecc
.size
);
1950 switch (chip
->ecc
.size
) {
1952 if (chip
->ecc
.algo
== NAND_ECC_HAMMING
)
1953 cfg
->ecc_level
= 15;
1955 cfg
->ecc_level
= chip
->ecc
.strength
;
1956 cfg
->sector_size_1k
= 0;
1959 if (!(ctrl
->features
& BRCMNAND_HAS_1K_SECTORS
)) {
1960 dev_err(ctrl
->dev
, "1KB sectors not supported\n");
1963 if (chip
->ecc
.strength
& 0x1) {
1965 "odd ECC not supported with 1KB sectors\n");
1969 cfg
->ecc_level
= chip
->ecc
.strength
>> 1;
1970 cfg
->sector_size_1k
= 1;
1973 dev_err(ctrl
->dev
, "unsupported ECC size: %d\n",
1978 cfg
->ful_adr_bytes
= cfg
->blk_adr_bytes
;
1979 if (mtd
->writesize
> 512)
1980 cfg
->ful_adr_bytes
+= cfg
->col_adr_bytes
;
1982 cfg
->ful_adr_bytes
+= 1;
1984 ret
= brcmnand_set_cfg(host
, cfg
);
1988 brcmnand_set_ecc_enabled(host
, 1);
1990 brcmnand_print_cfg(msg
, cfg
);
1991 dev_info(ctrl
->dev
, "detected %s\n", msg
);
1993 /* Configure ACC_CONTROL */
1994 offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_ACC_CONTROL
);
1995 tmp
= nand_readreg(ctrl
, offs
);
1996 tmp
&= ~ACC_CONTROL_PARTIAL_PAGE
;
1997 tmp
&= ~ACC_CONTROL_RD_ERASED
;
1998 tmp
&= ~ACC_CONTROL_FAST_PGM_RDIN
;
1999 if (ctrl
->features
& BRCMNAND_HAS_PREFETCH
) {
2001 * FIXME: Flash DMA + prefetch may see spurious erased-page ECC
2004 if (has_flash_dma(ctrl
))
2005 tmp
&= ~ACC_CONTROL_PREFETCH
;
2007 tmp
|= ACC_CONTROL_PREFETCH
;
2009 nand_writereg(ctrl
, offs
, tmp
);
2014 static int brcmnand_init_cs(struct brcmnand_host
*host
, struct device_node
*dn
)
2016 struct brcmnand_controller
*ctrl
= host
->ctrl
;
2017 struct platform_device
*pdev
= host
->pdev
;
2018 struct mtd_info
*mtd
;
2019 struct nand_chip
*chip
;
2023 ret
= of_property_read_u32(dn
, "reg", &host
->cs
);
2025 dev_err(&pdev
->dev
, "can't get chip-select\n");
2029 mtd
= nand_to_mtd(&host
->chip
);
2032 nand_set_flash_node(chip
, dn
);
2033 nand_set_controller_data(chip
, host
);
2034 mtd
->name
= devm_kasprintf(&pdev
->dev
, GFP_KERNEL
, "brcmnand.%d",
2036 mtd
->owner
= THIS_MODULE
;
2037 mtd
->dev
.parent
= &pdev
->dev
;
2039 chip
->IO_ADDR_R
= (void __iomem
*)0xdeadbeef;
2040 chip
->IO_ADDR_W
= (void __iomem
*)0xdeadbeef;
2042 chip
->cmd_ctrl
= brcmnand_cmd_ctrl
;
2043 chip
->cmdfunc
= brcmnand_cmdfunc
;
2044 chip
->waitfunc
= brcmnand_waitfunc
;
2045 chip
->read_byte
= brcmnand_read_byte
;
2046 chip
->read_buf
= brcmnand_read_buf
;
2047 chip
->write_buf
= brcmnand_write_buf
;
2049 chip
->ecc
.mode
= NAND_ECC_HW
;
2050 chip
->ecc
.read_page
= brcmnand_read_page
;
2051 chip
->ecc
.write_page
= brcmnand_write_page
;
2052 chip
->ecc
.read_page_raw
= brcmnand_read_page_raw
;
2053 chip
->ecc
.write_page_raw
= brcmnand_write_page_raw
;
2054 chip
->ecc
.write_oob_raw
= brcmnand_write_oob_raw
;
2055 chip
->ecc
.read_oob_raw
= brcmnand_read_oob_raw
;
2056 chip
->ecc
.read_oob
= brcmnand_read_oob
;
2057 chip
->ecc
.write_oob
= brcmnand_write_oob
;
2059 chip
->controller
= &ctrl
->controller
;
2062 * The bootloader might have configured 16bit mode but
2063 * NAND READID command only works in 8bit mode. We force
2064 * 8bit mode here to ensure that NAND READID commands works.
2066 cfg_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_CFG
);
2067 nand_writereg(ctrl
, cfg_offs
,
2068 nand_readreg(ctrl
, cfg_offs
) & ~CFG_BUS_WIDTH
);
2070 if (nand_scan_ident(mtd
, 1, NULL
))
2073 chip
->options
|= NAND_NO_SUBPAGE_WRITE
;
2075 * Avoid (for instance) kmap()'d buffers from JFFS2, which we can't DMA
2076 * to/from, and have nand_base pass us a bounce buffer instead, as
2079 chip
->options
|= NAND_USE_BOUNCE_BUFFER
;
2081 if (chip
->bbt_options
& NAND_BBT_USE_FLASH
)
2082 chip
->bbt_options
|= NAND_BBT_NO_OOB
;
2084 if (brcmnand_setup_dev(host
))
2087 chip
->ecc
.size
= host
->hwcfg
.sector_size_1k
? 1024 : 512;
2088 /* only use our internal HW threshold */
2089 mtd
->bitflip_threshold
= 1;
2091 ret
= brcmstb_choose_ecc_layout(host
);
2095 if (nand_scan_tail(mtd
))
2098 return mtd_device_register(mtd
, NULL
, 0);
2101 static void brcmnand_save_restore_cs_config(struct brcmnand_host
*host
,
2104 struct brcmnand_controller
*ctrl
= host
->ctrl
;
2105 u16 cfg_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_CFG
);
2106 u16 cfg_ext_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
2107 BRCMNAND_CS_CFG_EXT
);
2108 u16 acc_control_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
2109 BRCMNAND_CS_ACC_CONTROL
);
2110 u16 t1_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_TIMING1
);
2111 u16 t2_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_TIMING2
);
2114 nand_writereg(ctrl
, cfg_offs
, host
->hwcfg
.config
);
2115 if (cfg_offs
!= cfg_ext_offs
)
2116 nand_writereg(ctrl
, cfg_ext_offs
,
2117 host
->hwcfg
.config_ext
);
2118 nand_writereg(ctrl
, acc_control_offs
, host
->hwcfg
.acc_control
);
2119 nand_writereg(ctrl
, t1_offs
, host
->hwcfg
.timing_1
);
2120 nand_writereg(ctrl
, t2_offs
, host
->hwcfg
.timing_2
);
2122 host
->hwcfg
.config
= nand_readreg(ctrl
, cfg_offs
);
2123 if (cfg_offs
!= cfg_ext_offs
)
2124 host
->hwcfg
.config_ext
=
2125 nand_readreg(ctrl
, cfg_ext_offs
);
2126 host
->hwcfg
.acc_control
= nand_readreg(ctrl
, acc_control_offs
);
2127 host
->hwcfg
.timing_1
= nand_readreg(ctrl
, t1_offs
);
2128 host
->hwcfg
.timing_2
= nand_readreg(ctrl
, t2_offs
);
2132 static int brcmnand_suspend(struct device
*dev
)
2134 struct brcmnand_controller
*ctrl
= dev_get_drvdata(dev
);
2135 struct brcmnand_host
*host
;
2137 list_for_each_entry(host
, &ctrl
->host_list
, node
)
2138 brcmnand_save_restore_cs_config(host
, 0);
2140 ctrl
->nand_cs_nand_select
= brcmnand_read_reg(ctrl
, BRCMNAND_CS_SELECT
);
2141 ctrl
->nand_cs_nand_xor
= brcmnand_read_reg(ctrl
, BRCMNAND_CS_XOR
);
2142 ctrl
->corr_stat_threshold
=
2143 brcmnand_read_reg(ctrl
, BRCMNAND_CORR_THRESHOLD
);
2145 if (has_flash_dma(ctrl
))
2146 ctrl
->flash_dma_mode
= flash_dma_readl(ctrl
, FLASH_DMA_MODE
);
2151 static int brcmnand_resume(struct device
*dev
)
2153 struct brcmnand_controller
*ctrl
= dev_get_drvdata(dev
);
2154 struct brcmnand_host
*host
;
2156 if (has_flash_dma(ctrl
)) {
2157 flash_dma_writel(ctrl
, FLASH_DMA_MODE
, ctrl
->flash_dma_mode
);
2158 flash_dma_writel(ctrl
, FLASH_DMA_ERROR_STATUS
, 0);
2161 brcmnand_write_reg(ctrl
, BRCMNAND_CS_SELECT
, ctrl
->nand_cs_nand_select
);
2162 brcmnand_write_reg(ctrl
, BRCMNAND_CS_XOR
, ctrl
->nand_cs_nand_xor
);
2163 brcmnand_write_reg(ctrl
, BRCMNAND_CORR_THRESHOLD
,
2164 ctrl
->corr_stat_threshold
);
2166 /* Clear/re-enable interrupt */
2167 ctrl
->soc
->ctlrdy_ack(ctrl
->soc
);
2168 ctrl
->soc
->ctlrdy_set_enabled(ctrl
->soc
, true);
2171 list_for_each_entry(host
, &ctrl
->host_list
, node
) {
2172 struct nand_chip
*chip
= &host
->chip
;
2173 struct mtd_info
*mtd
= nand_to_mtd(chip
);
2175 brcmnand_save_restore_cs_config(host
, 1);
2177 /* Reset the chip, required by some chips after power-up */
2178 chip
->cmdfunc(mtd
, NAND_CMD_RESET
, -1, -1);
2184 const struct dev_pm_ops brcmnand_pm_ops
= {
2185 .suspend
= brcmnand_suspend
,
2186 .resume
= brcmnand_resume
,
2188 EXPORT_SYMBOL_GPL(brcmnand_pm_ops
);
2190 static const struct of_device_id brcmnand_of_match
[] = {
2191 { .compatible
= "brcm,brcmnand-v4.0" },
2192 { .compatible
= "brcm,brcmnand-v5.0" },
2193 { .compatible
= "brcm,brcmnand-v6.0" },
2194 { .compatible
= "brcm,brcmnand-v6.1" },
2195 { .compatible
= "brcm,brcmnand-v6.2" },
2196 { .compatible
= "brcm,brcmnand-v7.0" },
2197 { .compatible
= "brcm,brcmnand-v7.1" },
2200 MODULE_DEVICE_TABLE(of
, brcmnand_of_match
);
2202 /***********************************************************************
2203 * Platform driver setup (per controller)
2204 ***********************************************************************/
2206 int brcmnand_probe(struct platform_device
*pdev
, struct brcmnand_soc
*soc
)
2208 struct device
*dev
= &pdev
->dev
;
2209 struct device_node
*dn
= dev
->of_node
, *child
;
2210 struct brcmnand_controller
*ctrl
;
2211 struct resource
*res
;
2214 /* We only support device-tree instantiation */
2218 if (!of_match_node(brcmnand_of_match
, dn
))
2221 ctrl
= devm_kzalloc(dev
, sizeof(*ctrl
), GFP_KERNEL
);
2225 dev_set_drvdata(dev
, ctrl
);
2228 init_completion(&ctrl
->done
);
2229 init_completion(&ctrl
->dma_done
);
2230 spin_lock_init(&ctrl
->controller
.lock
);
2231 init_waitqueue_head(&ctrl
->controller
.wq
);
2232 INIT_LIST_HEAD(&ctrl
->host_list
);
2234 /* NAND register range */
2235 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2236 ctrl
->nand_base
= devm_ioremap_resource(dev
, res
);
2237 if (IS_ERR(ctrl
->nand_base
))
2238 return PTR_ERR(ctrl
->nand_base
);
2240 /* Enable clock before using NAND registers */
2241 ctrl
->clk
= devm_clk_get(dev
, "nand");
2242 if (!IS_ERR(ctrl
->clk
)) {
2243 ret
= clk_prepare_enable(ctrl
->clk
);
2247 ret
= PTR_ERR(ctrl
->clk
);
2248 if (ret
== -EPROBE_DEFER
)
2254 /* Initialize NAND revision */
2255 ret
= brcmnand_revision_init(ctrl
);
2260 * Most chips have this cache at a fixed offset within 'nand' block.
2261 * Some must specify this region separately.
2263 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "nand-cache");
2265 ctrl
->nand_fc
= devm_ioremap_resource(dev
, res
);
2266 if (IS_ERR(ctrl
->nand_fc
)) {
2267 ret
= PTR_ERR(ctrl
->nand_fc
);
2271 ctrl
->nand_fc
= ctrl
->nand_base
+
2272 ctrl
->reg_offsets
[BRCMNAND_FC_BASE
];
2276 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "flash-dma");
2278 ctrl
->flash_dma_base
= devm_ioremap_resource(dev
, res
);
2279 if (IS_ERR(ctrl
->flash_dma_base
)) {
2280 ret
= PTR_ERR(ctrl
->flash_dma_base
);
2284 flash_dma_writel(ctrl
, FLASH_DMA_MODE
, 1); /* linked-list */
2285 flash_dma_writel(ctrl
, FLASH_DMA_ERROR_STATUS
, 0);
2287 /* Allocate descriptor(s) */
2288 ctrl
->dma_desc
= dmam_alloc_coherent(dev
,
2289 sizeof(*ctrl
->dma_desc
),
2290 &ctrl
->dma_pa
, GFP_KERNEL
);
2291 if (!ctrl
->dma_desc
) {
2296 ctrl
->dma_irq
= platform_get_irq(pdev
, 1);
2297 if ((int)ctrl
->dma_irq
< 0) {
2298 dev_err(dev
, "missing FLASH_DMA IRQ\n");
2303 ret
= devm_request_irq(dev
, ctrl
->dma_irq
,
2304 brcmnand_dma_irq
, 0, DRV_NAME
,
2307 dev_err(dev
, "can't allocate IRQ %d: error %d\n",
2308 ctrl
->dma_irq
, ret
);
2312 dev_info(dev
, "enabling FLASH_DMA\n");
2315 /* Disable automatic device ID config, direct addressing */
2316 brcmnand_rmw_reg(ctrl
, BRCMNAND_CS_SELECT
,
2317 CS_SELECT_AUTO_DEVICE_ID_CFG
| 0xff, 0, 0);
2318 /* Disable XOR addressing */
2319 brcmnand_rmw_reg(ctrl
, BRCMNAND_CS_XOR
, 0xff, 0, 0);
2321 if (ctrl
->features
& BRCMNAND_HAS_WP
) {
2322 /* Permanently disable write protection */
2324 brcmnand_set_wp(ctrl
, false);
2330 ctrl
->irq
= platform_get_irq(pdev
, 0);
2331 if ((int)ctrl
->irq
< 0) {
2332 dev_err(dev
, "no IRQ defined\n");
2338 * Some SoCs integrate this controller (e.g., its interrupt bits) in
2344 ret
= devm_request_irq(dev
, ctrl
->irq
, brcmnand_irq
, 0,
2347 /* Enable interrupt */
2348 ctrl
->soc
->ctlrdy_ack(ctrl
->soc
);
2349 ctrl
->soc
->ctlrdy_set_enabled(ctrl
->soc
, true);
2351 /* Use standard interrupt infrastructure */
2352 ret
= devm_request_irq(dev
, ctrl
->irq
, brcmnand_ctlrdy_irq
, 0,
2356 dev_err(dev
, "can't allocate IRQ %d: error %d\n",
2361 for_each_available_child_of_node(dn
, child
) {
2362 if (of_device_is_compatible(child
, "brcm,nandcs")) {
2363 struct brcmnand_host
*host
;
2365 host
= devm_kzalloc(dev
, sizeof(*host
), GFP_KERNEL
);
2374 ret
= brcmnand_init_cs(host
, child
);
2376 devm_kfree(dev
, host
);
2377 continue; /* Try all chip-selects */
2380 list_add_tail(&host
->node
, &ctrl
->host_list
);
2384 /* No chip-selects could initialize properly */
2385 if (list_empty(&ctrl
->host_list
)) {
2393 clk_disable_unprepare(ctrl
->clk
);
2397 EXPORT_SYMBOL_GPL(brcmnand_probe
);
2399 int brcmnand_remove(struct platform_device
*pdev
)
2401 struct brcmnand_controller
*ctrl
= dev_get_drvdata(&pdev
->dev
);
2402 struct brcmnand_host
*host
;
2404 list_for_each_entry(host
, &ctrl
->host_list
, node
)
2405 nand_release(nand_to_mtd(&host
->chip
));
2407 clk_disable_unprepare(ctrl
->clk
);
2409 dev_set_drvdata(&pdev
->dev
, NULL
);
2413 EXPORT_SYMBOL_GPL(brcmnand_remove
);
2415 MODULE_LICENSE("GPL v2");
2416 MODULE_AUTHOR("Kevin Cernekee");
2417 MODULE_AUTHOR("Brian Norris");
2418 MODULE_DESCRIPTION("NAND driver for Broadcom chips");
2419 MODULE_ALIAS("platform:brcmnand");