2 * Copyright © 2010-2015 Broadcom Corporation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/clk.h>
15 #include <linux/version.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/platform_device.h>
21 #include <linux/err.h>
22 #include <linux/completion.h>
23 #include <linux/interrupt.h>
24 #include <linux/spinlock.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/ioport.h>
27 #include <linux/bug.h>
28 #include <linux/kernel.h>
29 #include <linux/bitops.h>
31 #include <linux/mtd/mtd.h>
32 #include <linux/mtd/nand.h>
33 #include <linux/mtd/partitions.h>
35 #include <linux/of_platform.h>
36 #include <linux/slab.h>
37 #include <linux/list.h>
38 #include <linux/log2.h>
43 * This flag controls if WP stays on between erase/write commands to mitigate
44 * flash corruption due to power glitches. Values:
45 * 0: NAND_WP is not used or not available
46 * 1: NAND_WP is set by default, cleared for erase/write operations
47 * 2: NAND_WP is always cleared
50 module_param(wp_on
, int, 0444);
52 /***********************************************************************
54 ***********************************************************************/
56 #define DRV_NAME "brcmnand"
59 #define CMD_PAGE_READ 0x01
60 #define CMD_SPARE_AREA_READ 0x02
61 #define CMD_STATUS_READ 0x03
62 #define CMD_PROGRAM_PAGE 0x04
63 #define CMD_PROGRAM_SPARE_AREA 0x05
64 #define CMD_COPY_BACK 0x06
65 #define CMD_DEVICE_ID_READ 0x07
66 #define CMD_BLOCK_ERASE 0x08
67 #define CMD_FLASH_RESET 0x09
68 #define CMD_BLOCKS_LOCK 0x0a
69 #define CMD_BLOCKS_LOCK_DOWN 0x0b
70 #define CMD_BLOCKS_UNLOCK 0x0c
71 #define CMD_READ_BLOCKS_LOCK_STATUS 0x0d
72 #define CMD_PARAMETER_READ 0x0e
73 #define CMD_PARAMETER_CHANGE_COL 0x0f
74 #define CMD_LOW_LEVEL_OP 0x10
76 struct brcm_nand_dma_desc
{
91 /* Bitfields for brcm_nand_dma_desc::status_valid */
92 #define FLASH_DMA_ECC_ERROR (1 << 8)
93 #define FLASH_DMA_CORR_ERROR (1 << 9)
95 /* 512B flash cache in the NAND controller HW */
98 #define FC_WORDS (FC_BYTES >> 2)
100 #define BRCMNAND_MIN_PAGESIZE 512
101 #define BRCMNAND_MIN_BLOCKSIZE (8 * 1024)
102 #define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024)
104 /* Controller feature flags */
106 BRCMNAND_HAS_1K_SECTORS
= BIT(0),
107 BRCMNAND_HAS_PREFETCH
= BIT(1),
108 BRCMNAND_HAS_CACHE_MODE
= BIT(2),
109 BRCMNAND_HAS_WP
= BIT(3),
112 struct brcmnand_controller
{
114 struct nand_hw_control controller
;
115 void __iomem
*nand_base
;
116 void __iomem
*nand_fc
; /* flash cache */
117 void __iomem
*flash_dma_base
;
119 unsigned int dma_irq
;
122 /* Some SoCs provide custom interrupt status register(s) */
123 struct brcmnand_soc
*soc
;
125 /* Some SoCs have a gateable clock for the controller */
130 struct completion done
;
131 struct completion dma_done
;
133 /* List of NAND hosts (one for each chip-select) */
134 struct list_head host_list
;
136 struct brcm_nand_dma_desc
*dma_desc
;
139 /* in-memory cache of the FLASH_CACHE, used only for some commands */
140 u8 flash_cache
[FC_BYTES
];
142 /* Controller revision details */
143 const u16
*reg_offsets
;
144 unsigned int reg_spacing
; /* between CS1, CS2, ... regs */
145 const u8
*cs_offsets
; /* within each chip-select */
146 const u8
*cs0_offsets
; /* within CS0, if different */
147 unsigned int max_block_size
;
148 const unsigned int *block_sizes
;
149 unsigned int max_page_size
;
150 const unsigned int *page_sizes
;
151 unsigned int max_oob
;
154 /* for low-power standby/resume only */
155 u32 nand_cs_nand_select
;
156 u32 nand_cs_nand_xor
;
157 u32 corr_stat_threshold
;
161 struct brcmnand_cfg
{
163 unsigned int block_size
;
164 unsigned int page_size
;
165 unsigned int spare_area_size
;
166 unsigned int device_width
;
167 unsigned int col_adr_bytes
;
168 unsigned int blk_adr_bytes
;
169 unsigned int ful_adr_bytes
;
170 unsigned int sector_size_1k
;
171 unsigned int ecc_level
;
172 /* use for low-power standby/resume only */
180 struct brcmnand_host
{
181 struct list_head node
;
183 struct nand_chip chip
;
184 struct platform_device
*pdev
;
187 unsigned int last_cmd
;
188 unsigned int last_byte
;
190 struct brcmnand_cfg hwcfg
;
191 struct brcmnand_controller
*ctrl
;
195 BRCMNAND_CMD_START
= 0,
196 BRCMNAND_CMD_EXT_ADDRESS
,
197 BRCMNAND_CMD_ADDRESS
,
198 BRCMNAND_INTFC_STATUS
,
203 BRCMNAND_CS1_BASE
, /* CS1 regs, if non-contiguous */
204 BRCMNAND_CORR_THRESHOLD
,
205 BRCMNAND_CORR_THRESHOLD_EXT
,
206 BRCMNAND_UNCORR_COUNT
,
208 BRCMNAND_CORR_EXT_ADDR
,
210 BRCMNAND_UNCORR_EXT_ADDR
,
211 BRCMNAND_UNCORR_ADDR
,
216 BRCMNAND_OOB_READ_BASE
,
217 BRCMNAND_OOB_READ_10_BASE
, /* offset 0x10, if non-contiguous */
218 BRCMNAND_OOB_WRITE_BASE
,
219 BRCMNAND_OOB_WRITE_10_BASE
, /* offset 0x10, if non-contiguous */
224 static const u16 brcmnand_regs_v40
[] = {
225 [BRCMNAND_CMD_START
] = 0x04,
226 [BRCMNAND_CMD_EXT_ADDRESS
] = 0x08,
227 [BRCMNAND_CMD_ADDRESS
] = 0x0c,
228 [BRCMNAND_INTFC_STATUS
] = 0x6c,
229 [BRCMNAND_CS_SELECT
] = 0x14,
230 [BRCMNAND_CS_XOR
] = 0x18,
231 [BRCMNAND_LL_OP
] = 0x178,
232 [BRCMNAND_CS0_BASE
] = 0x40,
233 [BRCMNAND_CS1_BASE
] = 0xd0,
234 [BRCMNAND_CORR_THRESHOLD
] = 0x84,
235 [BRCMNAND_CORR_THRESHOLD_EXT
] = 0,
236 [BRCMNAND_UNCORR_COUNT
] = 0,
237 [BRCMNAND_CORR_COUNT
] = 0,
238 [BRCMNAND_CORR_EXT_ADDR
] = 0x70,
239 [BRCMNAND_CORR_ADDR
] = 0x74,
240 [BRCMNAND_UNCORR_EXT_ADDR
] = 0x78,
241 [BRCMNAND_UNCORR_ADDR
] = 0x7c,
242 [BRCMNAND_SEMAPHORE
] = 0x58,
243 [BRCMNAND_ID
] = 0x60,
244 [BRCMNAND_ID_EXT
] = 0x64,
245 [BRCMNAND_LL_RDATA
] = 0x17c,
246 [BRCMNAND_OOB_READ_BASE
] = 0x20,
247 [BRCMNAND_OOB_READ_10_BASE
] = 0x130,
248 [BRCMNAND_OOB_WRITE_BASE
] = 0x30,
249 [BRCMNAND_OOB_WRITE_10_BASE
] = 0,
250 [BRCMNAND_FC_BASE
] = 0x200,
254 static const u16 brcmnand_regs_v50
[] = {
255 [BRCMNAND_CMD_START
] = 0x04,
256 [BRCMNAND_CMD_EXT_ADDRESS
] = 0x08,
257 [BRCMNAND_CMD_ADDRESS
] = 0x0c,
258 [BRCMNAND_INTFC_STATUS
] = 0x6c,
259 [BRCMNAND_CS_SELECT
] = 0x14,
260 [BRCMNAND_CS_XOR
] = 0x18,
261 [BRCMNAND_LL_OP
] = 0x178,
262 [BRCMNAND_CS0_BASE
] = 0x40,
263 [BRCMNAND_CS1_BASE
] = 0xd0,
264 [BRCMNAND_CORR_THRESHOLD
] = 0x84,
265 [BRCMNAND_CORR_THRESHOLD_EXT
] = 0,
266 [BRCMNAND_UNCORR_COUNT
] = 0,
267 [BRCMNAND_CORR_COUNT
] = 0,
268 [BRCMNAND_CORR_EXT_ADDR
] = 0x70,
269 [BRCMNAND_CORR_ADDR
] = 0x74,
270 [BRCMNAND_UNCORR_EXT_ADDR
] = 0x78,
271 [BRCMNAND_UNCORR_ADDR
] = 0x7c,
272 [BRCMNAND_SEMAPHORE
] = 0x58,
273 [BRCMNAND_ID
] = 0x60,
274 [BRCMNAND_ID_EXT
] = 0x64,
275 [BRCMNAND_LL_RDATA
] = 0x17c,
276 [BRCMNAND_OOB_READ_BASE
] = 0x20,
277 [BRCMNAND_OOB_READ_10_BASE
] = 0x130,
278 [BRCMNAND_OOB_WRITE_BASE
] = 0x30,
279 [BRCMNAND_OOB_WRITE_10_BASE
] = 0x140,
280 [BRCMNAND_FC_BASE
] = 0x200,
283 /* BRCMNAND v6.0 - v7.1 */
284 static const u16 brcmnand_regs_v60
[] = {
285 [BRCMNAND_CMD_START
] = 0x04,
286 [BRCMNAND_CMD_EXT_ADDRESS
] = 0x08,
287 [BRCMNAND_CMD_ADDRESS
] = 0x0c,
288 [BRCMNAND_INTFC_STATUS
] = 0x14,
289 [BRCMNAND_CS_SELECT
] = 0x18,
290 [BRCMNAND_CS_XOR
] = 0x1c,
291 [BRCMNAND_LL_OP
] = 0x20,
292 [BRCMNAND_CS0_BASE
] = 0x50,
293 [BRCMNAND_CS1_BASE
] = 0,
294 [BRCMNAND_CORR_THRESHOLD
] = 0xc0,
295 [BRCMNAND_CORR_THRESHOLD_EXT
] = 0xc4,
296 [BRCMNAND_UNCORR_COUNT
] = 0xfc,
297 [BRCMNAND_CORR_COUNT
] = 0x100,
298 [BRCMNAND_CORR_EXT_ADDR
] = 0x10c,
299 [BRCMNAND_CORR_ADDR
] = 0x110,
300 [BRCMNAND_UNCORR_EXT_ADDR
] = 0x114,
301 [BRCMNAND_UNCORR_ADDR
] = 0x118,
302 [BRCMNAND_SEMAPHORE
] = 0x150,
303 [BRCMNAND_ID
] = 0x194,
304 [BRCMNAND_ID_EXT
] = 0x198,
305 [BRCMNAND_LL_RDATA
] = 0x19c,
306 [BRCMNAND_OOB_READ_BASE
] = 0x200,
307 [BRCMNAND_OOB_READ_10_BASE
] = 0,
308 [BRCMNAND_OOB_WRITE_BASE
] = 0x280,
309 [BRCMNAND_OOB_WRITE_10_BASE
] = 0,
310 [BRCMNAND_FC_BASE
] = 0x400,
314 static const u16 brcmnand_regs_v71
[] = {
315 [BRCMNAND_CMD_START
] = 0x04,
316 [BRCMNAND_CMD_EXT_ADDRESS
] = 0x08,
317 [BRCMNAND_CMD_ADDRESS
] = 0x0c,
318 [BRCMNAND_INTFC_STATUS
] = 0x14,
319 [BRCMNAND_CS_SELECT
] = 0x18,
320 [BRCMNAND_CS_XOR
] = 0x1c,
321 [BRCMNAND_LL_OP
] = 0x20,
322 [BRCMNAND_CS0_BASE
] = 0x50,
323 [BRCMNAND_CS1_BASE
] = 0,
324 [BRCMNAND_CORR_THRESHOLD
] = 0xdc,
325 [BRCMNAND_CORR_THRESHOLD_EXT
] = 0xe0,
326 [BRCMNAND_UNCORR_COUNT
] = 0xfc,
327 [BRCMNAND_CORR_COUNT
] = 0x100,
328 [BRCMNAND_CORR_EXT_ADDR
] = 0x10c,
329 [BRCMNAND_CORR_ADDR
] = 0x110,
330 [BRCMNAND_UNCORR_EXT_ADDR
] = 0x114,
331 [BRCMNAND_UNCORR_ADDR
] = 0x118,
332 [BRCMNAND_SEMAPHORE
] = 0x150,
333 [BRCMNAND_ID
] = 0x194,
334 [BRCMNAND_ID_EXT
] = 0x198,
335 [BRCMNAND_LL_RDATA
] = 0x19c,
336 [BRCMNAND_OOB_READ_BASE
] = 0x200,
337 [BRCMNAND_OOB_READ_10_BASE
] = 0,
338 [BRCMNAND_OOB_WRITE_BASE
] = 0x280,
339 [BRCMNAND_OOB_WRITE_10_BASE
] = 0,
340 [BRCMNAND_FC_BASE
] = 0x400,
344 static const u16 brcmnand_regs_v72
[] = {
345 [BRCMNAND_CMD_START
] = 0x04,
346 [BRCMNAND_CMD_EXT_ADDRESS
] = 0x08,
347 [BRCMNAND_CMD_ADDRESS
] = 0x0c,
348 [BRCMNAND_INTFC_STATUS
] = 0x14,
349 [BRCMNAND_CS_SELECT
] = 0x18,
350 [BRCMNAND_CS_XOR
] = 0x1c,
351 [BRCMNAND_LL_OP
] = 0x20,
352 [BRCMNAND_CS0_BASE
] = 0x50,
353 [BRCMNAND_CS1_BASE
] = 0,
354 [BRCMNAND_CORR_THRESHOLD
] = 0xdc,
355 [BRCMNAND_CORR_THRESHOLD_EXT
] = 0xe0,
356 [BRCMNAND_UNCORR_COUNT
] = 0xfc,
357 [BRCMNAND_CORR_COUNT
] = 0x100,
358 [BRCMNAND_CORR_EXT_ADDR
] = 0x10c,
359 [BRCMNAND_CORR_ADDR
] = 0x110,
360 [BRCMNAND_UNCORR_EXT_ADDR
] = 0x114,
361 [BRCMNAND_UNCORR_ADDR
] = 0x118,
362 [BRCMNAND_SEMAPHORE
] = 0x150,
363 [BRCMNAND_ID
] = 0x194,
364 [BRCMNAND_ID_EXT
] = 0x198,
365 [BRCMNAND_LL_RDATA
] = 0x19c,
366 [BRCMNAND_OOB_READ_BASE
] = 0x200,
367 [BRCMNAND_OOB_READ_10_BASE
] = 0,
368 [BRCMNAND_OOB_WRITE_BASE
] = 0x400,
369 [BRCMNAND_OOB_WRITE_10_BASE
] = 0,
370 [BRCMNAND_FC_BASE
] = 0x600,
373 enum brcmnand_cs_reg
{
374 BRCMNAND_CS_CFG_EXT
= 0,
376 BRCMNAND_CS_ACC_CONTROL
,
381 /* Per chip-select offsets for v7.1 */
382 static const u8 brcmnand_cs_offsets_v71
[] = {
383 [BRCMNAND_CS_ACC_CONTROL
] = 0x00,
384 [BRCMNAND_CS_CFG_EXT
] = 0x04,
385 [BRCMNAND_CS_CFG
] = 0x08,
386 [BRCMNAND_CS_TIMING1
] = 0x0c,
387 [BRCMNAND_CS_TIMING2
] = 0x10,
390 /* Per chip-select offsets for pre v7.1, except CS0 on <= v5.0 */
391 static const u8 brcmnand_cs_offsets
[] = {
392 [BRCMNAND_CS_ACC_CONTROL
] = 0x00,
393 [BRCMNAND_CS_CFG_EXT
] = 0x04,
394 [BRCMNAND_CS_CFG
] = 0x04,
395 [BRCMNAND_CS_TIMING1
] = 0x08,
396 [BRCMNAND_CS_TIMING2
] = 0x0c,
399 /* Per chip-select offset for <= v5.0 on CS0 only */
400 static const u8 brcmnand_cs_offsets_cs0
[] = {
401 [BRCMNAND_CS_ACC_CONTROL
] = 0x00,
402 [BRCMNAND_CS_CFG_EXT
] = 0x08,
403 [BRCMNAND_CS_CFG
] = 0x08,
404 [BRCMNAND_CS_TIMING1
] = 0x10,
405 [BRCMNAND_CS_TIMING2
] = 0x14,
409 * Bitfields for the CFG and CFG_EXT registers. Pre-v7.1 controllers only had
410 * one config register, but once the bitfields overflowed, newer controllers
411 * (v7.1 and newer) added a CFG_EXT register and shuffled a few fields around.
414 CFG_BLK_ADR_BYTES_SHIFT
= 8,
415 CFG_COL_ADR_BYTES_SHIFT
= 12,
416 CFG_FUL_ADR_BYTES_SHIFT
= 16,
417 CFG_BUS_WIDTH_SHIFT
= 23,
418 CFG_BUS_WIDTH
= BIT(CFG_BUS_WIDTH_SHIFT
),
419 CFG_DEVICE_SIZE_SHIFT
= 24,
421 /* Only for pre-v7.1 (with no CFG_EXT register) */
422 CFG_PAGE_SIZE_SHIFT
= 20,
423 CFG_BLK_SIZE_SHIFT
= 28,
425 /* Only for v7.1+ (with CFG_EXT register) */
426 CFG_EXT_PAGE_SIZE_SHIFT
= 0,
427 CFG_EXT_BLK_SIZE_SHIFT
= 4,
430 /* BRCMNAND_INTFC_STATUS */
432 INTFC_FLASH_STATUS
= GENMASK(7, 0),
434 INTFC_ERASED
= BIT(27),
435 INTFC_OOB_VALID
= BIT(28),
436 INTFC_CACHE_VALID
= BIT(29),
437 INTFC_FLASH_READY
= BIT(30),
438 INTFC_CTLR_READY
= BIT(31),
441 static inline u32
nand_readreg(struct brcmnand_controller
*ctrl
, u32 offs
)
443 return brcmnand_readl(ctrl
->nand_base
+ offs
);
446 static inline void nand_writereg(struct brcmnand_controller
*ctrl
, u32 offs
,
449 brcmnand_writel(val
, ctrl
->nand_base
+ offs
);
452 static int brcmnand_revision_init(struct brcmnand_controller
*ctrl
)
454 static const unsigned int block_sizes_v6
[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 };
455 static const unsigned int block_sizes_v4
[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 };
456 static const unsigned int page_sizes
[] = { 512, 2048, 4096, 8192, 0 };
458 ctrl
->nand_version
= nand_readreg(ctrl
, 0) & 0xffff;
460 /* Only support v4.0+? */
461 if (ctrl
->nand_version
< 0x0400) {
462 dev_err(ctrl
->dev
, "version %#x not supported\n",
467 /* Register offsets */
468 if (ctrl
->nand_version
>= 0x0702)
469 ctrl
->reg_offsets
= brcmnand_regs_v72
;
470 else if (ctrl
->nand_version
>= 0x0701)
471 ctrl
->reg_offsets
= brcmnand_regs_v71
;
472 else if (ctrl
->nand_version
>= 0x0600)
473 ctrl
->reg_offsets
= brcmnand_regs_v60
;
474 else if (ctrl
->nand_version
>= 0x0500)
475 ctrl
->reg_offsets
= brcmnand_regs_v50
;
476 else if (ctrl
->nand_version
>= 0x0400)
477 ctrl
->reg_offsets
= brcmnand_regs_v40
;
479 /* Chip-select stride */
480 if (ctrl
->nand_version
>= 0x0701)
481 ctrl
->reg_spacing
= 0x14;
483 ctrl
->reg_spacing
= 0x10;
485 /* Per chip-select registers */
486 if (ctrl
->nand_version
>= 0x0701) {
487 ctrl
->cs_offsets
= brcmnand_cs_offsets_v71
;
489 ctrl
->cs_offsets
= brcmnand_cs_offsets
;
491 /* v5.0 and earlier has a different CS0 offset layout */
492 if (ctrl
->nand_version
<= 0x0500)
493 ctrl
->cs0_offsets
= brcmnand_cs_offsets_cs0
;
496 /* Page / block sizes */
497 if (ctrl
->nand_version
>= 0x0701) {
498 /* >= v7.1 use nice power-of-2 values! */
499 ctrl
->max_page_size
= 16 * 1024;
500 ctrl
->max_block_size
= 2 * 1024 * 1024;
502 ctrl
->page_sizes
= page_sizes
;
503 if (ctrl
->nand_version
>= 0x0600)
504 ctrl
->block_sizes
= block_sizes_v6
;
506 ctrl
->block_sizes
= block_sizes_v4
;
508 if (ctrl
->nand_version
< 0x0400) {
509 ctrl
->max_page_size
= 4096;
510 ctrl
->max_block_size
= 512 * 1024;
514 /* Maximum spare area sector size (per 512B) */
515 if (ctrl
->nand_version
>= 0x0702)
517 else if (ctrl
->nand_version
>= 0x0600)
519 else if (ctrl
->nand_version
>= 0x0500)
524 /* v6.0 and newer (except v6.1) have prefetch support */
525 if (ctrl
->nand_version
>= 0x0600 && ctrl
->nand_version
!= 0x0601)
526 ctrl
->features
|= BRCMNAND_HAS_PREFETCH
;
529 * v6.x has cache mode, but it's implemented differently. Ignore it for
532 if (ctrl
->nand_version
>= 0x0700)
533 ctrl
->features
|= BRCMNAND_HAS_CACHE_MODE
;
535 if (ctrl
->nand_version
>= 0x0500)
536 ctrl
->features
|= BRCMNAND_HAS_1K_SECTORS
;
538 if (ctrl
->nand_version
>= 0x0700)
539 ctrl
->features
|= BRCMNAND_HAS_WP
;
540 else if (of_property_read_bool(ctrl
->dev
->of_node
, "brcm,nand-has-wp"))
541 ctrl
->features
|= BRCMNAND_HAS_WP
;
546 static inline u32
brcmnand_read_reg(struct brcmnand_controller
*ctrl
,
547 enum brcmnand_reg reg
)
549 u16 offs
= ctrl
->reg_offsets
[reg
];
552 return nand_readreg(ctrl
, offs
);
557 static inline void brcmnand_write_reg(struct brcmnand_controller
*ctrl
,
558 enum brcmnand_reg reg
, u32 val
)
560 u16 offs
= ctrl
->reg_offsets
[reg
];
563 nand_writereg(ctrl
, offs
, val
);
566 static inline void brcmnand_rmw_reg(struct brcmnand_controller
*ctrl
,
567 enum brcmnand_reg reg
, u32 mask
, unsigned
570 u32 tmp
= brcmnand_read_reg(ctrl
, reg
);
574 brcmnand_write_reg(ctrl
, reg
, tmp
);
577 static inline u32
brcmnand_read_fc(struct brcmnand_controller
*ctrl
, int word
)
579 return __raw_readl(ctrl
->nand_fc
+ word
* 4);
582 static inline void brcmnand_write_fc(struct brcmnand_controller
*ctrl
,
585 __raw_writel(val
, ctrl
->nand_fc
+ word
* 4);
588 static inline u16
brcmnand_cs_offset(struct brcmnand_controller
*ctrl
, int cs
,
589 enum brcmnand_cs_reg reg
)
591 u16 offs_cs0
= ctrl
->reg_offsets
[BRCMNAND_CS0_BASE
];
592 u16 offs_cs1
= ctrl
->reg_offsets
[BRCMNAND_CS1_BASE
];
595 if (cs
== 0 && ctrl
->cs0_offsets
)
596 cs_offs
= ctrl
->cs0_offsets
[reg
];
598 cs_offs
= ctrl
->cs_offsets
[reg
];
601 return offs_cs1
+ (cs
- 1) * ctrl
->reg_spacing
+ cs_offs
;
603 return offs_cs0
+ cs
* ctrl
->reg_spacing
+ cs_offs
;
606 static inline u32
brcmnand_count_corrected(struct brcmnand_controller
*ctrl
)
608 if (ctrl
->nand_version
< 0x0600)
610 return brcmnand_read_reg(ctrl
, BRCMNAND_CORR_COUNT
);
613 static void brcmnand_wr_corr_thresh(struct brcmnand_host
*host
, u8 val
)
615 struct brcmnand_controller
*ctrl
= host
->ctrl
;
616 unsigned int shift
= 0, bits
;
617 enum brcmnand_reg reg
= BRCMNAND_CORR_THRESHOLD
;
620 if (ctrl
->nand_version
>= 0x0702)
622 else if (ctrl
->nand_version
>= 0x0600)
624 else if (ctrl
->nand_version
>= 0x0500)
629 if (ctrl
->nand_version
>= 0x0702) {
631 reg
= BRCMNAND_CORR_THRESHOLD_EXT
;
632 shift
= (cs
% 4) * bits
;
633 } else if (ctrl
->nand_version
>= 0x0600) {
635 reg
= BRCMNAND_CORR_THRESHOLD_EXT
;
636 shift
= (cs
% 5) * bits
;
638 brcmnand_rmw_reg(ctrl
, reg
, (bits
- 1) << shift
, shift
, val
);
641 static inline int brcmnand_cmd_shift(struct brcmnand_controller
*ctrl
)
643 if (ctrl
->nand_version
< 0x0602)
648 /***********************************************************************
649 * NAND ACC CONTROL bitfield
651 * Some bits have remained constant throughout hardware revision, while
652 * others have shifted around.
653 ***********************************************************************/
655 /* Constant for all versions (where supported) */
657 /* See BRCMNAND_HAS_CACHE_MODE */
658 ACC_CONTROL_CACHE_MODE
= BIT(22),
660 /* See BRCMNAND_HAS_PREFETCH */
661 ACC_CONTROL_PREFETCH
= BIT(23),
663 ACC_CONTROL_PAGE_HIT
= BIT(24),
664 ACC_CONTROL_WR_PREEMPT
= BIT(25),
665 ACC_CONTROL_PARTIAL_PAGE
= BIT(26),
666 ACC_CONTROL_RD_ERASED
= BIT(27),
667 ACC_CONTROL_FAST_PGM_RDIN
= BIT(28),
668 ACC_CONTROL_WR_ECC
= BIT(30),
669 ACC_CONTROL_RD_ECC
= BIT(31),
672 static inline u32
brcmnand_spare_area_mask(struct brcmnand_controller
*ctrl
)
674 if (ctrl
->nand_version
>= 0x0702)
675 return GENMASK(7, 0);
676 else if (ctrl
->nand_version
>= 0x0600)
677 return GENMASK(6, 0);
679 return GENMASK(5, 0);
682 #define NAND_ACC_CONTROL_ECC_SHIFT 16
683 #define NAND_ACC_CONTROL_ECC_EXT_SHIFT 13
685 static inline u32
brcmnand_ecc_level_mask(struct brcmnand_controller
*ctrl
)
687 u32 mask
= (ctrl
->nand_version
>= 0x0600) ? 0x1f : 0x0f;
689 mask
<<= NAND_ACC_CONTROL_ECC_SHIFT
;
691 /* v7.2 includes additional ECC levels */
692 if (ctrl
->nand_version
>= 0x0702)
693 mask
|= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT
;
698 static void brcmnand_set_ecc_enabled(struct brcmnand_host
*host
, int en
)
700 struct brcmnand_controller
*ctrl
= host
->ctrl
;
701 u16 offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_ACC_CONTROL
);
702 u32 acc_control
= nand_readreg(ctrl
, offs
);
703 u32 ecc_flags
= ACC_CONTROL_WR_ECC
| ACC_CONTROL_RD_ECC
;
706 acc_control
|= ecc_flags
; /* enable RD/WR ECC */
707 acc_control
|= host
->hwcfg
.ecc_level
708 << NAND_ACC_CONTROL_ECC_SHIFT
;
710 acc_control
&= ~ecc_flags
; /* disable RD/WR ECC */
711 acc_control
&= ~brcmnand_ecc_level_mask(ctrl
);
714 nand_writereg(ctrl
, offs
, acc_control
);
717 static inline int brcmnand_sector_1k_shift(struct brcmnand_controller
*ctrl
)
719 if (ctrl
->nand_version
>= 0x0702)
721 else if (ctrl
->nand_version
>= 0x0600)
723 else if (ctrl
->nand_version
>= 0x0500)
729 static int brcmnand_get_sector_size_1k(struct brcmnand_host
*host
)
731 struct brcmnand_controller
*ctrl
= host
->ctrl
;
732 int shift
= brcmnand_sector_1k_shift(ctrl
);
733 u16 acc_control_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
734 BRCMNAND_CS_ACC_CONTROL
);
739 return (nand_readreg(ctrl
, acc_control_offs
) >> shift
) & 0x1;
742 static void brcmnand_set_sector_size_1k(struct brcmnand_host
*host
, int val
)
744 struct brcmnand_controller
*ctrl
= host
->ctrl
;
745 int shift
= brcmnand_sector_1k_shift(ctrl
);
746 u16 acc_control_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
747 BRCMNAND_CS_ACC_CONTROL
);
753 tmp
= nand_readreg(ctrl
, acc_control_offs
);
754 tmp
&= ~(1 << shift
);
755 tmp
|= (!!val
) << shift
;
756 nand_writereg(ctrl
, acc_control_offs
, tmp
);
759 /***********************************************************************
761 ***********************************************************************/
764 CS_SELECT_NAND_WP
= BIT(29),
765 CS_SELECT_AUTO_DEVICE_ID_CFG
= BIT(30),
768 static inline void brcmnand_set_wp(struct brcmnand_controller
*ctrl
, bool en
)
770 u32 val
= en
? CS_SELECT_NAND_WP
: 0;
772 brcmnand_rmw_reg(ctrl
, BRCMNAND_CS_SELECT
, CS_SELECT_NAND_WP
, 0, val
);
775 /***********************************************************************
777 ***********************************************************************/
780 FLASH_DMA_REVISION
= 0x00,
781 FLASH_DMA_FIRST_DESC
= 0x04,
782 FLASH_DMA_FIRST_DESC_EXT
= 0x08,
783 FLASH_DMA_CTRL
= 0x0c,
784 FLASH_DMA_MODE
= 0x10,
785 FLASH_DMA_STATUS
= 0x14,
786 FLASH_DMA_INTERRUPT_DESC
= 0x18,
787 FLASH_DMA_INTERRUPT_DESC_EXT
= 0x1c,
788 FLASH_DMA_ERROR_STATUS
= 0x20,
789 FLASH_DMA_CURRENT_DESC
= 0x24,
790 FLASH_DMA_CURRENT_DESC_EXT
= 0x28,
793 static inline bool has_flash_dma(struct brcmnand_controller
*ctrl
)
795 return ctrl
->flash_dma_base
;
798 static inline bool flash_dma_buf_ok(const void *buf
)
800 return buf
&& !is_vmalloc_addr(buf
) &&
801 likely(IS_ALIGNED((uintptr_t)buf
, 4));
804 static inline void flash_dma_writel(struct brcmnand_controller
*ctrl
, u8 offs
,
807 brcmnand_writel(val
, ctrl
->flash_dma_base
+ offs
);
810 static inline u32
flash_dma_readl(struct brcmnand_controller
*ctrl
, u8 offs
)
812 return brcmnand_readl(ctrl
->flash_dma_base
+ offs
);
815 /* Low-level operation types: command, address, write, or read */
816 enum brcmnand_llop_type
{
823 /***********************************************************************
824 * Internal support functions
825 ***********************************************************************/
827 static inline bool is_hamming_ecc(struct brcmnand_controller
*ctrl
,
828 struct brcmnand_cfg
*cfg
)
830 if (ctrl
->nand_version
<= 0x0701)
831 return cfg
->sector_size_1k
== 0 && cfg
->spare_area_size
== 16 &&
832 cfg
->ecc_level
== 15;
834 return cfg
->sector_size_1k
== 0 && ((cfg
->spare_area_size
== 16 &&
835 cfg
->ecc_level
== 15) ||
836 (cfg
->spare_area_size
== 28 && cfg
->ecc_level
== 16));
840 * Set mtd->ooblayout to the appropriate mtd_ooblayout_ops given
841 * the layout/configuration.
842 * Returns -ERRCODE on failure.
844 static int brcmnand_hamming_ooblayout_ecc(struct mtd_info
*mtd
, int section
,
845 struct mtd_oob_region
*oobregion
)
847 struct nand_chip
*chip
= mtd_to_nand(mtd
);
848 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
849 struct brcmnand_cfg
*cfg
= &host
->hwcfg
;
850 int sas
= cfg
->spare_area_size
<< cfg
->sector_size_1k
;
851 int sectors
= cfg
->page_size
/ (512 << cfg
->sector_size_1k
);
853 if (section
>= sectors
)
856 oobregion
->offset
= (section
* sas
) + 6;
857 oobregion
->length
= 3;
862 static int brcmnand_hamming_ooblayout_free(struct mtd_info
*mtd
, int section
,
863 struct mtd_oob_region
*oobregion
)
865 struct nand_chip
*chip
= mtd_to_nand(mtd
);
866 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
867 struct brcmnand_cfg
*cfg
= &host
->hwcfg
;
868 int sas
= cfg
->spare_area_size
<< cfg
->sector_size_1k
;
869 int sectors
= cfg
->page_size
/ (512 << cfg
->sector_size_1k
);
871 if (section
>= sectors
* 2)
874 oobregion
->offset
= (section
/ 2) * sas
;
877 oobregion
->offset
+= 9;
878 oobregion
->length
= 7;
880 oobregion
->length
= 6;
882 /* First sector of each page may have BBI */
885 * Small-page NAND use byte 6 for BBI while large-page
888 if (cfg
->page_size
> 512)
897 static const struct mtd_ooblayout_ops brcmnand_hamming_ooblayout_ops
= {
898 .ecc
= brcmnand_hamming_ooblayout_ecc
,
899 .free
= brcmnand_hamming_ooblayout_free
,
902 static int brcmnand_bch_ooblayout_ecc(struct mtd_info
*mtd
, int section
,
903 struct mtd_oob_region
*oobregion
)
905 struct nand_chip
*chip
= mtd_to_nand(mtd
);
906 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
907 struct brcmnand_cfg
*cfg
= &host
->hwcfg
;
908 int sas
= cfg
->spare_area_size
<< cfg
->sector_size_1k
;
909 int sectors
= cfg
->page_size
/ (512 << cfg
->sector_size_1k
);
911 if (section
>= sectors
)
914 oobregion
->offset
= (section
* (sas
+ 1)) - chip
->ecc
.bytes
;
915 oobregion
->length
= chip
->ecc
.bytes
;
920 static int brcmnand_bch_ooblayout_free_lp(struct mtd_info
*mtd
, int section
,
921 struct mtd_oob_region
*oobregion
)
923 struct nand_chip
*chip
= mtd_to_nand(mtd
);
924 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
925 struct brcmnand_cfg
*cfg
= &host
->hwcfg
;
926 int sas
= cfg
->spare_area_size
<< cfg
->sector_size_1k
;
927 int sectors
= cfg
->page_size
/ (512 << cfg
->sector_size_1k
);
929 if (section
>= sectors
)
932 if (sas
<= chip
->ecc
.bytes
)
935 oobregion
->offset
= section
* sas
;
936 oobregion
->length
= sas
- chip
->ecc
.bytes
;
946 static int brcmnand_bch_ooblayout_free_sp(struct mtd_info
*mtd
, int section
,
947 struct mtd_oob_region
*oobregion
)
949 struct nand_chip
*chip
= mtd_to_nand(mtd
);
950 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
951 struct brcmnand_cfg
*cfg
= &host
->hwcfg
;
952 int sas
= cfg
->spare_area_size
<< cfg
->sector_size_1k
;
954 if (section
> 1 || sas
- chip
->ecc
.bytes
< 6 ||
955 (section
&& sas
- chip
->ecc
.bytes
== 6))
959 oobregion
->offset
= 0;
960 oobregion
->length
= 5;
962 oobregion
->offset
= 6;
963 oobregion
->length
= sas
- chip
->ecc
.bytes
- 6;
969 static const struct mtd_ooblayout_ops brcmnand_bch_lp_ooblayout_ops
= {
970 .ecc
= brcmnand_bch_ooblayout_ecc
,
971 .free
= brcmnand_bch_ooblayout_free_lp
,
974 static const struct mtd_ooblayout_ops brcmnand_bch_sp_ooblayout_ops
= {
975 .ecc
= brcmnand_bch_ooblayout_ecc
,
976 .free
= brcmnand_bch_ooblayout_free_sp
,
979 static int brcmstb_choose_ecc_layout(struct brcmnand_host
*host
)
981 struct brcmnand_cfg
*p
= &host
->hwcfg
;
982 struct mtd_info
*mtd
= nand_to_mtd(&host
->chip
);
983 struct nand_ecc_ctrl
*ecc
= &host
->chip
.ecc
;
984 unsigned int ecc_level
= p
->ecc_level
;
985 int sas
= p
->spare_area_size
<< p
->sector_size_1k
;
986 int sectors
= p
->page_size
/ (512 << p
->sector_size_1k
);
988 if (p
->sector_size_1k
)
991 if (is_hamming_ecc(host
->ctrl
, p
)) {
992 ecc
->bytes
= 3 * sectors
;
993 mtd_set_ooblayout(mtd
, &brcmnand_hamming_ooblayout_ops
);
998 * CONTROLLER_VERSION:
999 * < v5.0: ECC_REQ = ceil(BCH_T * 13/8)
1000 * >= v5.0: ECC_REQ = ceil(BCH_T * 14/8)
1001 * But we will just be conservative.
1003 ecc
->bytes
= DIV_ROUND_UP(ecc_level
* 14, 8);
1004 if (p
->page_size
== 512)
1005 mtd_set_ooblayout(mtd
, &brcmnand_bch_sp_ooblayout_ops
);
1007 mtd_set_ooblayout(mtd
, &brcmnand_bch_lp_ooblayout_ops
);
1009 if (ecc
->bytes
>= sas
) {
1010 dev_err(&host
->pdev
->dev
,
1011 "error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n",
1019 static void brcmnand_wp(struct mtd_info
*mtd
, int wp
)
1021 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1022 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1023 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1025 if ((ctrl
->features
& BRCMNAND_HAS_WP
) && wp_on
== 1) {
1026 static int old_wp
= -1;
1029 dev_dbg(ctrl
->dev
, "WP %s\n", wp
? "on" : "off");
1032 brcmnand_set_wp(ctrl
, wp
);
1036 /* Helper functions for reading and writing OOB registers */
1037 static inline u8
oob_reg_read(struct brcmnand_controller
*ctrl
, u32 offs
)
1039 u16 offset0
, offset10
, reg_offs
;
1041 offset0
= ctrl
->reg_offsets
[BRCMNAND_OOB_READ_BASE
];
1042 offset10
= ctrl
->reg_offsets
[BRCMNAND_OOB_READ_10_BASE
];
1044 if (offs
>= ctrl
->max_oob
)
1047 if (offs
>= 16 && offset10
)
1048 reg_offs
= offset10
+ ((offs
- 0x10) & ~0x03);
1050 reg_offs
= offset0
+ (offs
& ~0x03);
1052 return nand_readreg(ctrl
, reg_offs
) >> (24 - ((offs
& 0x03) << 3));
1055 static inline void oob_reg_write(struct brcmnand_controller
*ctrl
, u32 offs
,
1058 u16 offset0
, offset10
, reg_offs
;
1060 offset0
= ctrl
->reg_offsets
[BRCMNAND_OOB_WRITE_BASE
];
1061 offset10
= ctrl
->reg_offsets
[BRCMNAND_OOB_WRITE_10_BASE
];
1063 if (offs
>= ctrl
->max_oob
)
1066 if (offs
>= 16 && offset10
)
1067 reg_offs
= offset10
+ ((offs
- 0x10) & ~0x03);
1069 reg_offs
= offset0
+ (offs
& ~0x03);
1071 nand_writereg(ctrl
, reg_offs
, data
);
1075 * read_oob_from_regs - read data from OOB registers
1076 * @ctrl: NAND controller
1077 * @i: sub-page sector index
1078 * @oob: buffer to read to
1079 * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
1080 * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
1082 static int read_oob_from_regs(struct brcmnand_controller
*ctrl
, int i
, u8
*oob
,
1083 int sas
, int sector_1k
)
1085 int tbytes
= sas
<< sector_1k
;
1088 /* Adjust OOB values for 1K sector size */
1089 if (sector_1k
&& (i
& 0x01))
1090 tbytes
= max(0, tbytes
- (int)ctrl
->max_oob
);
1091 tbytes
= min_t(int, tbytes
, ctrl
->max_oob
);
1093 for (j
= 0; j
< tbytes
; j
++)
1094 oob
[j
] = oob_reg_read(ctrl
, j
);
1099 * write_oob_to_regs - write data to OOB registers
1100 * @i: sub-page sector index
1101 * @oob: buffer to write from
1102 * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
1103 * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
1105 static int write_oob_to_regs(struct brcmnand_controller
*ctrl
, int i
,
1106 const u8
*oob
, int sas
, int sector_1k
)
1108 int tbytes
= sas
<< sector_1k
;
1111 /* Adjust OOB values for 1K sector size */
1112 if (sector_1k
&& (i
& 0x01))
1113 tbytes
= max(0, tbytes
- (int)ctrl
->max_oob
);
1114 tbytes
= min_t(int, tbytes
, ctrl
->max_oob
);
1116 for (j
= 0; j
< tbytes
; j
+= 4)
1117 oob_reg_write(ctrl
, j
,
1118 (oob
[j
+ 0] << 24) |
1119 (oob
[j
+ 1] << 16) |
1125 static irqreturn_t
brcmnand_ctlrdy_irq(int irq
, void *data
)
1127 struct brcmnand_controller
*ctrl
= data
;
1129 /* Discard all NAND_CTLRDY interrupts during DMA */
1130 if (ctrl
->dma_pending
)
1133 complete(&ctrl
->done
);
1137 /* Handle SoC-specific interrupt hardware */
1138 static irqreturn_t
brcmnand_irq(int irq
, void *data
)
1140 struct brcmnand_controller
*ctrl
= data
;
1142 if (ctrl
->soc
->ctlrdy_ack(ctrl
->soc
))
1143 return brcmnand_ctlrdy_irq(irq
, data
);
1148 static irqreturn_t
brcmnand_dma_irq(int irq
, void *data
)
1150 struct brcmnand_controller
*ctrl
= data
;
1152 complete(&ctrl
->dma_done
);
1157 static void brcmnand_send_cmd(struct brcmnand_host
*host
, int cmd
)
1159 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1162 dev_dbg(ctrl
->dev
, "send native cmd %d addr_lo 0x%x\n", cmd
,
1163 brcmnand_read_reg(ctrl
, BRCMNAND_CMD_ADDRESS
));
1164 BUG_ON(ctrl
->cmd_pending
!= 0);
1165 ctrl
->cmd_pending
= cmd
;
1167 intfc
= brcmnand_read_reg(ctrl
, BRCMNAND_INTFC_STATUS
);
1168 WARN_ON(!(intfc
& INTFC_CTLR_READY
));
1170 mb(); /* flush previous writes */
1171 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_START
,
1172 cmd
<< brcmnand_cmd_shift(ctrl
));
1175 /***********************************************************************
1176 * NAND MTD API: read/program/erase
1177 ***********************************************************************/
1179 static void brcmnand_cmd_ctrl(struct mtd_info
*mtd
, int dat
,
1182 /* intentionally left blank */
1185 static int brcmnand_waitfunc(struct mtd_info
*mtd
, struct nand_chip
*this)
1187 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1188 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1189 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1190 unsigned long timeo
= msecs_to_jiffies(100);
1192 dev_dbg(ctrl
->dev
, "wait on native cmd %d\n", ctrl
->cmd_pending
);
1193 if (ctrl
->cmd_pending
&&
1194 wait_for_completion_timeout(&ctrl
->done
, timeo
) <= 0) {
1195 u32 cmd
= brcmnand_read_reg(ctrl
, BRCMNAND_CMD_START
)
1196 >> brcmnand_cmd_shift(ctrl
);
1198 dev_err_ratelimited(ctrl
->dev
,
1199 "timeout waiting for command %#02x\n", cmd
);
1200 dev_err_ratelimited(ctrl
->dev
, "intfc status %08x\n",
1201 brcmnand_read_reg(ctrl
, BRCMNAND_INTFC_STATUS
));
1203 ctrl
->cmd_pending
= 0;
1204 return brcmnand_read_reg(ctrl
, BRCMNAND_INTFC_STATUS
) &
1213 LLOP_RETURN_IDLE
= BIT(31),
1215 LLOP_DATA_MASK
= GENMASK(15, 0),
1218 static int brcmnand_low_level_op(struct brcmnand_host
*host
,
1219 enum brcmnand_llop_type type
, u32 data
,
1222 struct mtd_info
*mtd
= nand_to_mtd(&host
->chip
);
1223 struct nand_chip
*chip
= &host
->chip
;
1224 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1227 tmp
= data
& LLOP_DATA_MASK
;
1230 tmp
|= LLOP_WE
| LLOP_CLE
;
1234 tmp
|= LLOP_WE
| LLOP_ALE
;
1247 tmp
|= LLOP_RETURN_IDLE
;
1249 dev_dbg(ctrl
->dev
, "ll_op cmd %#x\n", tmp
);
1251 brcmnand_write_reg(ctrl
, BRCMNAND_LL_OP
, tmp
);
1252 (void)brcmnand_read_reg(ctrl
, BRCMNAND_LL_OP
);
1254 brcmnand_send_cmd(host
, CMD_LOW_LEVEL_OP
);
1255 return brcmnand_waitfunc(mtd
, chip
);
1258 static void brcmnand_cmdfunc(struct mtd_info
*mtd
, unsigned command
,
1259 int column
, int page_addr
)
1261 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1262 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1263 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1264 u64 addr
= (u64
)page_addr
<< chip
->page_shift
;
1267 if (command
== NAND_CMD_READID
|| command
== NAND_CMD_PARAM
||
1268 command
== NAND_CMD_RNDOUT
)
1270 /* Avoid propagating a negative, don't-care address */
1271 else if (page_addr
< 0)
1274 dev_dbg(ctrl
->dev
, "cmd 0x%x addr 0x%llx\n", command
,
1275 (unsigned long long)addr
);
1277 host
->last_cmd
= command
;
1278 host
->last_byte
= 0;
1279 host
->last_addr
= addr
;
1282 case NAND_CMD_RESET
:
1283 native_cmd
= CMD_FLASH_RESET
;
1285 case NAND_CMD_STATUS
:
1286 native_cmd
= CMD_STATUS_READ
;
1288 case NAND_CMD_READID
:
1289 native_cmd
= CMD_DEVICE_ID_READ
;
1291 case NAND_CMD_READOOB
:
1292 native_cmd
= CMD_SPARE_AREA_READ
;
1294 case NAND_CMD_ERASE1
:
1295 native_cmd
= CMD_BLOCK_ERASE
;
1296 brcmnand_wp(mtd
, 0);
1298 case NAND_CMD_PARAM
:
1299 native_cmd
= CMD_PARAMETER_READ
;
1301 case NAND_CMD_SET_FEATURES
:
1302 case NAND_CMD_GET_FEATURES
:
1303 brcmnand_low_level_op(host
, LL_OP_CMD
, command
, false);
1304 brcmnand_low_level_op(host
, LL_OP_ADDR
, column
, false);
1306 case NAND_CMD_RNDOUT
:
1307 native_cmd
= CMD_PARAMETER_CHANGE_COL
;
1308 addr
&= ~((u64
)(FC_BYTES
- 1));
1310 * HW quirk: PARAMETER_CHANGE_COL requires SECTOR_SIZE_1K=0
1311 * NB: hwcfg.sector_size_1k may not be initialized yet
1313 if (brcmnand_get_sector_size_1k(host
)) {
1314 host
->hwcfg
.sector_size_1k
=
1315 brcmnand_get_sector_size_1k(host
);
1316 brcmnand_set_sector_size_1k(host
, 0);
1324 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
,
1325 (host
->cs
<< 16) | ((addr
>> 32) & 0xffff));
1326 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
);
1327 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_ADDRESS
, lower_32_bits(addr
));
1328 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_ADDRESS
);
1330 brcmnand_send_cmd(host
, native_cmd
);
1331 brcmnand_waitfunc(mtd
, chip
);
1333 if (native_cmd
== CMD_PARAMETER_READ
||
1334 native_cmd
== CMD_PARAMETER_CHANGE_COL
) {
1335 /* Copy flash cache word-wise */
1336 u32
*flash_cache
= (u32
*)ctrl
->flash_cache
;
1339 brcmnand_soc_data_bus_prepare(ctrl
->soc
);
1342 * Must cache the FLASH_CACHE now, since changes in
1343 * SECTOR_SIZE_1K may invalidate it
1345 for (i
= 0; i
< FC_WORDS
; i
++)
1347 * Flash cache is big endian for parameter pages, at
1350 flash_cache
[i
] = be32_to_cpu(brcmnand_read_fc(ctrl
, i
));
1352 brcmnand_soc_data_bus_unprepare(ctrl
->soc
);
1354 /* Cleanup from HW quirk: restore SECTOR_SIZE_1K */
1355 if (host
->hwcfg
.sector_size_1k
)
1356 brcmnand_set_sector_size_1k(host
,
1357 host
->hwcfg
.sector_size_1k
);
1360 /* Re-enable protection is necessary only after erase */
1361 if (command
== NAND_CMD_ERASE1
)
1362 brcmnand_wp(mtd
, 1);
1365 static uint8_t brcmnand_read_byte(struct mtd_info
*mtd
)
1367 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1368 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1369 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1373 switch (host
->last_cmd
) {
1374 case NAND_CMD_READID
:
1375 if (host
->last_byte
< 4)
1376 ret
= brcmnand_read_reg(ctrl
, BRCMNAND_ID
) >>
1377 (24 - (host
->last_byte
<< 3));
1378 else if (host
->last_byte
< 8)
1379 ret
= brcmnand_read_reg(ctrl
, BRCMNAND_ID_EXT
) >>
1380 (56 - (host
->last_byte
<< 3));
1383 case NAND_CMD_READOOB
:
1384 ret
= oob_reg_read(ctrl
, host
->last_byte
);
1387 case NAND_CMD_STATUS
:
1388 ret
= brcmnand_read_reg(ctrl
, BRCMNAND_INTFC_STATUS
) &
1390 if (wp_on
) /* hide WP status */
1391 ret
|= NAND_STATUS_WP
;
1394 case NAND_CMD_PARAM
:
1395 case NAND_CMD_RNDOUT
:
1396 addr
= host
->last_addr
+ host
->last_byte
;
1397 offs
= addr
& (FC_BYTES
- 1);
1399 /* At FC_BYTES boundary, switch to next column */
1400 if (host
->last_byte
> 0 && offs
== 0)
1401 chip
->cmdfunc(mtd
, NAND_CMD_RNDOUT
, addr
, -1);
1403 ret
= ctrl
->flash_cache
[offs
];
1405 case NAND_CMD_GET_FEATURES
:
1406 if (host
->last_byte
>= ONFI_SUBFEATURE_PARAM_LEN
) {
1409 bool last
= host
->last_byte
==
1410 ONFI_SUBFEATURE_PARAM_LEN
- 1;
1411 brcmnand_low_level_op(host
, LL_OP_RD
, 0, last
);
1412 ret
= brcmnand_read_reg(ctrl
, BRCMNAND_LL_RDATA
) & 0xff;
1416 dev_dbg(ctrl
->dev
, "read byte = 0x%02x\n", ret
);
1422 static void brcmnand_read_buf(struct mtd_info
*mtd
, uint8_t *buf
, int len
)
1426 for (i
= 0; i
< len
; i
++, buf
++)
1427 *buf
= brcmnand_read_byte(mtd
);
1430 static void brcmnand_write_buf(struct mtd_info
*mtd
, const uint8_t *buf
,
1434 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1435 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1437 switch (host
->last_cmd
) {
1438 case NAND_CMD_SET_FEATURES
:
1439 for (i
= 0; i
< len
; i
++)
1440 brcmnand_low_level_op(host
, LL_OP_WR
, buf
[i
],
1450 * Construct a FLASH_DMA descriptor as part of a linked list. You must know the
1451 * following ahead of time:
1452 * - Is this descriptor the beginning or end of a linked list?
1453 * - What is the (DMA) address of the next descriptor in the linked list?
1455 static int brcmnand_fill_dma_desc(struct brcmnand_host
*host
,
1456 struct brcm_nand_dma_desc
*desc
, u64 addr
,
1457 dma_addr_t buf
, u32 len
, u8 dma_cmd
,
1458 bool begin
, bool end
,
1459 dma_addr_t next_desc
)
1461 memset(desc
, 0, sizeof(*desc
));
1462 /* Descriptors are written in native byte order (wordwise) */
1463 desc
->next_desc
= lower_32_bits(next_desc
);
1464 desc
->next_desc_ext
= upper_32_bits(next_desc
);
1465 desc
->cmd_irq
= (dma_cmd
<< 24) |
1466 (end
? (0x03 << 8) : 0) | /* IRQ | STOP */
1467 (!!begin
) | ((!!end
) << 1); /* head, tail */
1468 #ifdef CONFIG_CPU_BIG_ENDIAN
1469 desc
->cmd_irq
|= 0x01 << 12;
1471 desc
->dram_addr
= lower_32_bits(buf
);
1472 desc
->dram_addr_ext
= upper_32_bits(buf
);
1473 desc
->tfr_len
= len
;
1474 desc
->total_len
= len
;
1475 desc
->flash_addr
= lower_32_bits(addr
);
1476 desc
->flash_addr_ext
= upper_32_bits(addr
);
1477 desc
->cs
= host
->cs
;
1478 desc
->status_valid
= 0x01;
1483 * Kick the FLASH_DMA engine, with a given DMA descriptor
1485 static void brcmnand_dma_run(struct brcmnand_host
*host
, dma_addr_t desc
)
1487 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1488 unsigned long timeo
= msecs_to_jiffies(100);
1490 flash_dma_writel(ctrl
, FLASH_DMA_FIRST_DESC
, lower_32_bits(desc
));
1491 (void)flash_dma_readl(ctrl
, FLASH_DMA_FIRST_DESC
);
1492 flash_dma_writel(ctrl
, FLASH_DMA_FIRST_DESC_EXT
, upper_32_bits(desc
));
1493 (void)flash_dma_readl(ctrl
, FLASH_DMA_FIRST_DESC_EXT
);
1495 /* Start FLASH_DMA engine */
1496 ctrl
->dma_pending
= true;
1497 mb(); /* flush previous writes */
1498 flash_dma_writel(ctrl
, FLASH_DMA_CTRL
, 0x03); /* wake | run */
1500 if (wait_for_completion_timeout(&ctrl
->dma_done
, timeo
) <= 0) {
1502 "timeout waiting for DMA; status %#x, error status %#x\n",
1503 flash_dma_readl(ctrl
, FLASH_DMA_STATUS
),
1504 flash_dma_readl(ctrl
, FLASH_DMA_ERROR_STATUS
));
1506 ctrl
->dma_pending
= false;
1507 flash_dma_writel(ctrl
, FLASH_DMA_CTRL
, 0); /* force stop */
1510 static int brcmnand_dma_trans(struct brcmnand_host
*host
, u64 addr
, u32
*buf
,
1511 u32 len
, u8 dma_cmd
)
1513 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1515 int dir
= dma_cmd
== CMD_PAGE_READ
? DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
1517 buf_pa
= dma_map_single(ctrl
->dev
, buf
, len
, dir
);
1518 if (dma_mapping_error(ctrl
->dev
, buf_pa
)) {
1519 dev_err(ctrl
->dev
, "unable to map buffer for DMA\n");
1523 brcmnand_fill_dma_desc(host
, ctrl
->dma_desc
, addr
, buf_pa
, len
,
1524 dma_cmd
, true, true, 0);
1526 brcmnand_dma_run(host
, ctrl
->dma_pa
);
1528 dma_unmap_single(ctrl
->dev
, buf_pa
, len
, dir
);
1530 if (ctrl
->dma_desc
->status_valid
& FLASH_DMA_ECC_ERROR
)
1532 else if (ctrl
->dma_desc
->status_valid
& FLASH_DMA_CORR_ERROR
)
1539 * Assumes proper CS is already set
1541 static int brcmnand_read_by_pio(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1542 u64 addr
, unsigned int trans
, u32
*buf
,
1543 u8
*oob
, u64
*err_addr
)
1545 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1546 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1549 /* Clear error addresses */
1550 brcmnand_write_reg(ctrl
, BRCMNAND_UNCORR_ADDR
, 0);
1551 brcmnand_write_reg(ctrl
, BRCMNAND_CORR_ADDR
, 0);
1552 brcmnand_write_reg(ctrl
, BRCMNAND_UNCORR_EXT_ADDR
, 0);
1553 brcmnand_write_reg(ctrl
, BRCMNAND_CORR_EXT_ADDR
, 0);
1555 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
,
1556 (host
->cs
<< 16) | ((addr
>> 32) & 0xffff));
1557 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
);
1559 for (i
= 0; i
< trans
; i
++, addr
+= FC_BYTES
) {
1560 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_ADDRESS
,
1561 lower_32_bits(addr
));
1562 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_ADDRESS
);
1563 /* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */
1564 brcmnand_send_cmd(host
, CMD_PAGE_READ
);
1565 brcmnand_waitfunc(mtd
, chip
);
1568 brcmnand_soc_data_bus_prepare(ctrl
->soc
);
1570 for (j
= 0; j
< FC_WORDS
; j
++, buf
++)
1571 *buf
= brcmnand_read_fc(ctrl
, j
);
1573 brcmnand_soc_data_bus_unprepare(ctrl
->soc
);
1577 oob
+= read_oob_from_regs(ctrl
, i
, oob
,
1578 mtd
->oobsize
/ trans
,
1579 host
->hwcfg
.sector_size_1k
);
1582 *err_addr
= brcmnand_read_reg(ctrl
,
1583 BRCMNAND_UNCORR_ADDR
) |
1584 ((u64
)(brcmnand_read_reg(ctrl
,
1585 BRCMNAND_UNCORR_EXT_ADDR
)
1592 *err_addr
= brcmnand_read_reg(ctrl
,
1593 BRCMNAND_CORR_ADDR
) |
1594 ((u64
)(brcmnand_read_reg(ctrl
,
1595 BRCMNAND_CORR_EXT_ADDR
)
1606 * Check a page to see if it is erased (w/ bitflips) after an uncorrectable ECC
1609 * Because the HW ECC signals an ECC error if an erase paged has even a single
1610 * bitflip, we must check each ECC error to see if it is actually an erased
1611 * page with bitflips, not a truly corrupted page.
1613 * On a real error, return a negative error code (-EBADMSG for ECC error), and
1614 * buf will contain raw data.
1615 * Otherwise, buf gets filled with 0xffs and return the maximum number of
1616 * bitflips-per-ECC-sector to the caller.
1619 static int brcmstb_nand_verify_erased_page(struct mtd_info
*mtd
,
1620 struct nand_chip
*chip
, void *buf
, u64 addr
)
1623 void *oob
= chip
->oob_poi
;
1625 int page
= addr
>> chip
->page_shift
;
1629 buf
= chip
->buffers
->databuf
;
1630 /* Invalidate page cache */
1634 sas
= mtd
->oobsize
/ chip
->ecc
.steps
;
1636 /* read without ecc for verification */
1637 chip
->cmdfunc(mtd
, NAND_CMD_READ0
, 0x00, page
);
1638 ret
= chip
->ecc
.read_page_raw(mtd
, chip
, buf
, true, page
);
1642 for (i
= 0; i
< chip
->ecc
.steps
; i
++, oob
+= sas
) {
1643 ret
= nand_check_erased_ecc_chunk(buf
, chip
->ecc
.size
,
1645 chip
->ecc
.strength
);
1649 bitflips
= max(bitflips
, ret
);
1655 static int brcmnand_read(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1656 u64 addr
, unsigned int trans
, u32
*buf
, u8
*oob
)
1658 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1659 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1664 dev_dbg(ctrl
->dev
, "read %llx -> %p\n", (unsigned long long)addr
, buf
);
1667 brcmnand_write_reg(ctrl
, BRCMNAND_UNCORR_COUNT
, 0);
1669 if (has_flash_dma(ctrl
) && !oob
&& flash_dma_buf_ok(buf
)) {
1670 err
= brcmnand_dma_trans(host
, addr
, buf
, trans
* FC_BYTES
,
1673 if (mtd_is_bitflip_or_eccerr(err
))
1680 memset(oob
, 0x99, mtd
->oobsize
);
1682 err
= brcmnand_read_by_pio(mtd
, chip
, addr
, trans
, buf
,
1686 if (mtd_is_eccerr(err
)) {
1688 * On controller version and 7.0, 7.1 , DMA read after a
1689 * prior PIO read that reported uncorrectable error,
1690 * the DMA engine captures this error following DMA read
1691 * cleared only on subsequent DMA read, so just retry once
1692 * to clear a possible false error reported for current DMA
1695 if ((ctrl
->nand_version
== 0x0700) ||
1696 (ctrl
->nand_version
== 0x0701)) {
1704 * Controller version 7.2 has hw encoder to detect erased page
1705 * bitflips, apply sw verification for older controllers only
1707 if (ctrl
->nand_version
< 0x0702) {
1708 err
= brcmstb_nand_verify_erased_page(mtd
, chip
, buf
,
1710 /* erased page bitflips corrected */
1715 dev_dbg(ctrl
->dev
, "uncorrectable error at 0x%llx\n",
1716 (unsigned long long)err_addr
);
1717 mtd
->ecc_stats
.failed
++;
1718 /* NAND layer expects zero on ECC errors */
1722 if (mtd_is_bitflip(err
)) {
1723 unsigned int corrected
= brcmnand_count_corrected(ctrl
);
1725 dev_dbg(ctrl
->dev
, "corrected error at 0x%llx\n",
1726 (unsigned long long)err_addr
);
1727 mtd
->ecc_stats
.corrected
+= corrected
;
1728 /* Always exceed the software-imposed threshold */
1729 return max(mtd
->bitflip_threshold
, corrected
);
1735 static int brcmnand_read_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1736 uint8_t *buf
, int oob_required
, int page
)
1738 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1739 u8
*oob
= oob_required
? (u8
*)chip
->oob_poi
: NULL
;
1741 return brcmnand_read(mtd
, chip
, host
->last_addr
,
1742 mtd
->writesize
>> FC_SHIFT
, (u32
*)buf
, oob
);
1745 static int brcmnand_read_page_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1746 uint8_t *buf
, int oob_required
, int page
)
1748 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1749 u8
*oob
= oob_required
? (u8
*)chip
->oob_poi
: NULL
;
1752 brcmnand_set_ecc_enabled(host
, 0);
1753 ret
= brcmnand_read(mtd
, chip
, host
->last_addr
,
1754 mtd
->writesize
>> FC_SHIFT
, (u32
*)buf
, oob
);
1755 brcmnand_set_ecc_enabled(host
, 1);
1759 static int brcmnand_read_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1762 return brcmnand_read(mtd
, chip
, (u64
)page
<< chip
->page_shift
,
1763 mtd
->writesize
>> FC_SHIFT
,
1764 NULL
, (u8
*)chip
->oob_poi
);
1767 static int brcmnand_read_oob_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1770 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1772 brcmnand_set_ecc_enabled(host
, 0);
1773 brcmnand_read(mtd
, chip
, (u64
)page
<< chip
->page_shift
,
1774 mtd
->writesize
>> FC_SHIFT
,
1775 NULL
, (u8
*)chip
->oob_poi
);
1776 brcmnand_set_ecc_enabled(host
, 1);
1780 static int brcmnand_write(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1781 u64 addr
, const u32
*buf
, u8
*oob
)
1783 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1784 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1785 unsigned int i
, j
, trans
= mtd
->writesize
>> FC_SHIFT
;
1786 int status
, ret
= 0;
1788 dev_dbg(ctrl
->dev
, "write %llx <- %p\n", (unsigned long long)addr
, buf
);
1790 if (unlikely((unsigned long)buf
& 0x03)) {
1791 dev_warn(ctrl
->dev
, "unaligned buffer: %p\n", buf
);
1792 buf
= (u32
*)((unsigned long)buf
& ~0x03);
1795 brcmnand_wp(mtd
, 0);
1797 for (i
= 0; i
< ctrl
->max_oob
; i
+= 4)
1798 oob_reg_write(ctrl
, i
, 0xffffffff);
1800 if (has_flash_dma(ctrl
) && !oob
&& flash_dma_buf_ok(buf
)) {
1801 if (brcmnand_dma_trans(host
, addr
, (u32
*)buf
,
1802 mtd
->writesize
, CMD_PROGRAM_PAGE
))
1807 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
,
1808 (host
->cs
<< 16) | ((addr
>> 32) & 0xffff));
1809 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_EXT_ADDRESS
);
1811 for (i
= 0; i
< trans
; i
++, addr
+= FC_BYTES
) {
1812 /* full address MUST be set before populating FC */
1813 brcmnand_write_reg(ctrl
, BRCMNAND_CMD_ADDRESS
,
1814 lower_32_bits(addr
));
1815 (void)brcmnand_read_reg(ctrl
, BRCMNAND_CMD_ADDRESS
);
1818 brcmnand_soc_data_bus_prepare(ctrl
->soc
);
1820 for (j
= 0; j
< FC_WORDS
; j
++, buf
++)
1821 brcmnand_write_fc(ctrl
, j
, *buf
);
1823 brcmnand_soc_data_bus_unprepare(ctrl
->soc
);
1825 for (j
= 0; j
< FC_WORDS
; j
++)
1826 brcmnand_write_fc(ctrl
, j
, 0xffffffff);
1830 oob
+= write_oob_to_regs(ctrl
, i
, oob
,
1831 mtd
->oobsize
/ trans
,
1832 host
->hwcfg
.sector_size_1k
);
1835 /* we cannot use SPARE_AREA_PROGRAM when PARTIAL_PAGE_EN=0 */
1836 brcmnand_send_cmd(host
, CMD_PROGRAM_PAGE
);
1837 status
= brcmnand_waitfunc(mtd
, chip
);
1839 if (status
& NAND_STATUS_FAIL
) {
1840 dev_info(ctrl
->dev
, "program failed at %llx\n",
1841 (unsigned long long)addr
);
1847 brcmnand_wp(mtd
, 1);
1851 static int brcmnand_write_page(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1852 const uint8_t *buf
, int oob_required
, int page
)
1854 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1855 void *oob
= oob_required
? chip
->oob_poi
: NULL
;
1857 brcmnand_write(mtd
, chip
, host
->last_addr
, (const u32
*)buf
, oob
);
1861 static int brcmnand_write_page_raw(struct mtd_info
*mtd
,
1862 struct nand_chip
*chip
, const uint8_t *buf
,
1863 int oob_required
, int page
)
1865 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1866 void *oob
= oob_required
? chip
->oob_poi
: NULL
;
1868 brcmnand_set_ecc_enabled(host
, 0);
1869 brcmnand_write(mtd
, chip
, host
->last_addr
, (const u32
*)buf
, oob
);
1870 brcmnand_set_ecc_enabled(host
, 1);
1874 static int brcmnand_write_oob(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1877 return brcmnand_write(mtd
, chip
, (u64
)page
<< chip
->page_shift
,
1878 NULL
, chip
->oob_poi
);
1881 static int brcmnand_write_oob_raw(struct mtd_info
*mtd
, struct nand_chip
*chip
,
1884 struct brcmnand_host
*host
= nand_get_controller_data(chip
);
1887 brcmnand_set_ecc_enabled(host
, 0);
1888 ret
= brcmnand_write(mtd
, chip
, (u64
)page
<< chip
->page_shift
, NULL
,
1889 (u8
*)chip
->oob_poi
);
1890 brcmnand_set_ecc_enabled(host
, 1);
1895 /***********************************************************************
1896 * Per-CS setup (1 NAND device)
1897 ***********************************************************************/
1899 static int brcmnand_set_cfg(struct brcmnand_host
*host
,
1900 struct brcmnand_cfg
*cfg
)
1902 struct brcmnand_controller
*ctrl
= host
->ctrl
;
1903 struct nand_chip
*chip
= &host
->chip
;
1904 u16 cfg_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_CFG
);
1905 u16 cfg_ext_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
1906 BRCMNAND_CS_CFG_EXT
);
1907 u16 acc_control_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
1908 BRCMNAND_CS_ACC_CONTROL
);
1909 u8 block_size
= 0, page_size
= 0, device_size
= 0;
1912 if (ctrl
->block_sizes
) {
1915 for (i
= 0, found
= 0; ctrl
->block_sizes
[i
]; i
++)
1916 if (ctrl
->block_sizes
[i
] * 1024 == cfg
->block_size
) {
1921 dev_warn(ctrl
->dev
, "invalid block size %u\n",
1926 block_size
= ffs(cfg
->block_size
) - ffs(BRCMNAND_MIN_BLOCKSIZE
);
1929 if (cfg
->block_size
< BRCMNAND_MIN_BLOCKSIZE
|| (ctrl
->max_block_size
&&
1930 cfg
->block_size
> ctrl
->max_block_size
)) {
1931 dev_warn(ctrl
->dev
, "invalid block size %u\n",
1936 if (ctrl
->page_sizes
) {
1939 for (i
= 0, found
= 0; ctrl
->page_sizes
[i
]; i
++)
1940 if (ctrl
->page_sizes
[i
] == cfg
->page_size
) {
1945 dev_warn(ctrl
->dev
, "invalid page size %u\n",
1950 page_size
= ffs(cfg
->page_size
) - ffs(BRCMNAND_MIN_PAGESIZE
);
1953 if (cfg
->page_size
< BRCMNAND_MIN_PAGESIZE
|| (ctrl
->max_page_size
&&
1954 cfg
->page_size
> ctrl
->max_page_size
)) {
1955 dev_warn(ctrl
->dev
, "invalid page size %u\n", cfg
->page_size
);
1959 if (fls64(cfg
->device_size
) < fls64(BRCMNAND_MIN_DEVSIZE
)) {
1960 dev_warn(ctrl
->dev
, "invalid device size 0x%llx\n",
1961 (unsigned long long)cfg
->device_size
);
1964 device_size
= fls64(cfg
->device_size
) - fls64(BRCMNAND_MIN_DEVSIZE
);
1966 tmp
= (cfg
->blk_adr_bytes
<< CFG_BLK_ADR_BYTES_SHIFT
) |
1967 (cfg
->col_adr_bytes
<< CFG_COL_ADR_BYTES_SHIFT
) |
1968 (cfg
->ful_adr_bytes
<< CFG_FUL_ADR_BYTES_SHIFT
) |
1969 (!!(cfg
->device_width
== 16) << CFG_BUS_WIDTH_SHIFT
) |
1970 (device_size
<< CFG_DEVICE_SIZE_SHIFT
);
1971 if (cfg_offs
== cfg_ext_offs
) {
1972 tmp
|= (page_size
<< CFG_PAGE_SIZE_SHIFT
) |
1973 (block_size
<< CFG_BLK_SIZE_SHIFT
);
1974 nand_writereg(ctrl
, cfg_offs
, tmp
);
1976 nand_writereg(ctrl
, cfg_offs
, tmp
);
1977 tmp
= (page_size
<< CFG_EXT_PAGE_SIZE_SHIFT
) |
1978 (block_size
<< CFG_EXT_BLK_SIZE_SHIFT
);
1979 nand_writereg(ctrl
, cfg_ext_offs
, tmp
);
1982 tmp
= nand_readreg(ctrl
, acc_control_offs
);
1983 tmp
&= ~brcmnand_ecc_level_mask(ctrl
);
1984 tmp
|= cfg
->ecc_level
<< NAND_ACC_CONTROL_ECC_SHIFT
;
1985 tmp
&= ~brcmnand_spare_area_mask(ctrl
);
1986 tmp
|= cfg
->spare_area_size
;
1987 nand_writereg(ctrl
, acc_control_offs
, tmp
);
1989 brcmnand_set_sector_size_1k(host
, cfg
->sector_size_1k
);
1991 /* threshold = ceil(BCH-level * 0.75) */
1992 brcmnand_wr_corr_thresh(host
, DIV_ROUND_UP(chip
->ecc
.strength
* 3, 4));
1997 static void brcmnand_print_cfg(struct brcmnand_host
*host
,
1998 char *buf
, struct brcmnand_cfg
*cfg
)
2001 "%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit",
2002 (unsigned long long)cfg
->device_size
>> 20,
2003 cfg
->block_size
>> 10,
2004 cfg
->page_size
>= 1024 ? cfg
->page_size
>> 10 : cfg
->page_size
,
2005 cfg
->page_size
>= 1024 ? "KiB" : "B",
2006 cfg
->spare_area_size
, cfg
->device_width
);
2008 /* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */
2009 if (is_hamming_ecc(host
->ctrl
, cfg
))
2010 sprintf(buf
, ", Hamming ECC");
2011 else if (cfg
->sector_size_1k
)
2012 sprintf(buf
, ", BCH-%u (1KiB sector)", cfg
->ecc_level
<< 1);
2014 sprintf(buf
, ", BCH-%u", cfg
->ecc_level
);
2018 * Minimum number of bytes to address a page. Calculated as:
2019 * roundup(log2(size / page-size) / 8)
2021 * NB: the following does not "round up" for non-power-of-2 'size'; but this is
2022 * OK because many other things will break if 'size' is irregular...
2024 static inline int get_blk_adr_bytes(u64 size
, u32 writesize
)
2026 return ALIGN(ilog2(size
) - ilog2(writesize
), 8) >> 3;
2029 static int brcmnand_setup_dev(struct brcmnand_host
*host
)
2031 struct mtd_info
*mtd
= nand_to_mtd(&host
->chip
);
2032 struct nand_chip
*chip
= &host
->chip
;
2033 struct brcmnand_controller
*ctrl
= host
->ctrl
;
2034 struct brcmnand_cfg
*cfg
= &host
->hwcfg
;
2036 u32 offs
, tmp
, oob_sector
;
2039 memset(cfg
, 0, sizeof(*cfg
));
2041 ret
= of_property_read_u32(nand_get_flash_node(chip
),
2042 "brcm,nand-oob-sector-size",
2045 /* Use detected size */
2046 cfg
->spare_area_size
= mtd
->oobsize
/
2047 (mtd
->writesize
>> FC_SHIFT
);
2049 cfg
->spare_area_size
= oob_sector
;
2051 if (cfg
->spare_area_size
> ctrl
->max_oob
)
2052 cfg
->spare_area_size
= ctrl
->max_oob
;
2054 * Set oobsize to be consistent with controller's spare_area_size, as
2055 * the rest is inaccessible.
2057 mtd
->oobsize
= cfg
->spare_area_size
* (mtd
->writesize
>> FC_SHIFT
);
2059 cfg
->device_size
= mtd
->size
;
2060 cfg
->block_size
= mtd
->erasesize
;
2061 cfg
->page_size
= mtd
->writesize
;
2062 cfg
->device_width
= (chip
->options
& NAND_BUSWIDTH_16
) ? 16 : 8;
2063 cfg
->col_adr_bytes
= 2;
2064 cfg
->blk_adr_bytes
= get_blk_adr_bytes(mtd
->size
, mtd
->writesize
);
2066 if (chip
->ecc
.mode
!= NAND_ECC_HW
) {
2067 dev_err(ctrl
->dev
, "only HW ECC supported; selected: %d\n",
2072 if (chip
->ecc
.algo
== NAND_ECC_UNKNOWN
) {
2073 if (chip
->ecc
.strength
== 1 && chip
->ecc
.size
== 512)
2074 /* Default to Hamming for 1-bit ECC, if unspecified */
2075 chip
->ecc
.algo
= NAND_ECC_HAMMING
;
2077 /* Otherwise, BCH */
2078 chip
->ecc
.algo
= NAND_ECC_BCH
;
2081 if (chip
->ecc
.algo
== NAND_ECC_HAMMING
&& (chip
->ecc
.strength
!= 1 ||
2082 chip
->ecc
.size
!= 512)) {
2083 dev_err(ctrl
->dev
, "invalid Hamming params: %d bits per %d bytes\n",
2084 chip
->ecc
.strength
, chip
->ecc
.size
);
2088 switch (chip
->ecc
.size
) {
2090 if (chip
->ecc
.algo
== NAND_ECC_HAMMING
)
2091 cfg
->ecc_level
= 15;
2093 cfg
->ecc_level
= chip
->ecc
.strength
;
2094 cfg
->sector_size_1k
= 0;
2097 if (!(ctrl
->features
& BRCMNAND_HAS_1K_SECTORS
)) {
2098 dev_err(ctrl
->dev
, "1KB sectors not supported\n");
2101 if (chip
->ecc
.strength
& 0x1) {
2103 "odd ECC not supported with 1KB sectors\n");
2107 cfg
->ecc_level
= chip
->ecc
.strength
>> 1;
2108 cfg
->sector_size_1k
= 1;
2111 dev_err(ctrl
->dev
, "unsupported ECC size: %d\n",
2116 cfg
->ful_adr_bytes
= cfg
->blk_adr_bytes
;
2117 if (mtd
->writesize
> 512)
2118 cfg
->ful_adr_bytes
+= cfg
->col_adr_bytes
;
2120 cfg
->ful_adr_bytes
+= 1;
2122 ret
= brcmnand_set_cfg(host
, cfg
);
2126 brcmnand_set_ecc_enabled(host
, 1);
2128 brcmnand_print_cfg(host
, msg
, cfg
);
2129 dev_info(ctrl
->dev
, "detected %s\n", msg
);
2131 /* Configure ACC_CONTROL */
2132 offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_ACC_CONTROL
);
2133 tmp
= nand_readreg(ctrl
, offs
);
2134 tmp
&= ~ACC_CONTROL_PARTIAL_PAGE
;
2135 tmp
&= ~ACC_CONTROL_RD_ERASED
;
2137 /* We need to turn on Read from erased paged protected by ECC */
2138 if (ctrl
->nand_version
>= 0x0702)
2139 tmp
|= ACC_CONTROL_RD_ERASED
;
2140 tmp
&= ~ACC_CONTROL_FAST_PGM_RDIN
;
2141 if (ctrl
->features
& BRCMNAND_HAS_PREFETCH
) {
2143 * FIXME: Flash DMA + prefetch may see spurious erased-page ECC
2146 if (has_flash_dma(ctrl
))
2147 tmp
&= ~ACC_CONTROL_PREFETCH
;
2149 tmp
|= ACC_CONTROL_PREFETCH
;
2151 nand_writereg(ctrl
, offs
, tmp
);
2156 static int brcmnand_init_cs(struct brcmnand_host
*host
, struct device_node
*dn
)
2158 struct brcmnand_controller
*ctrl
= host
->ctrl
;
2159 struct platform_device
*pdev
= host
->pdev
;
2160 struct mtd_info
*mtd
;
2161 struct nand_chip
*chip
;
2165 ret
= of_property_read_u32(dn
, "reg", &host
->cs
);
2167 dev_err(&pdev
->dev
, "can't get chip-select\n");
2171 mtd
= nand_to_mtd(&host
->chip
);
2174 nand_set_flash_node(chip
, dn
);
2175 nand_set_controller_data(chip
, host
);
2176 mtd
->name
= devm_kasprintf(&pdev
->dev
, GFP_KERNEL
, "brcmnand.%d",
2178 mtd
->owner
= THIS_MODULE
;
2179 mtd
->dev
.parent
= &pdev
->dev
;
2181 chip
->IO_ADDR_R
= (void __iomem
*)0xdeadbeef;
2182 chip
->IO_ADDR_W
= (void __iomem
*)0xdeadbeef;
2184 chip
->cmd_ctrl
= brcmnand_cmd_ctrl
;
2185 chip
->cmdfunc
= brcmnand_cmdfunc
;
2186 chip
->waitfunc
= brcmnand_waitfunc
;
2187 chip
->read_byte
= brcmnand_read_byte
;
2188 chip
->read_buf
= brcmnand_read_buf
;
2189 chip
->write_buf
= brcmnand_write_buf
;
2191 chip
->ecc
.mode
= NAND_ECC_HW
;
2192 chip
->ecc
.read_page
= brcmnand_read_page
;
2193 chip
->ecc
.write_page
= brcmnand_write_page
;
2194 chip
->ecc
.read_page_raw
= brcmnand_read_page_raw
;
2195 chip
->ecc
.write_page_raw
= brcmnand_write_page_raw
;
2196 chip
->ecc
.write_oob_raw
= brcmnand_write_oob_raw
;
2197 chip
->ecc
.read_oob_raw
= brcmnand_read_oob_raw
;
2198 chip
->ecc
.read_oob
= brcmnand_read_oob
;
2199 chip
->ecc
.write_oob
= brcmnand_write_oob
;
2201 chip
->controller
= &ctrl
->controller
;
2204 * The bootloader might have configured 16bit mode but
2205 * NAND READID command only works in 8bit mode. We force
2206 * 8bit mode here to ensure that NAND READID commands works.
2208 cfg_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_CFG
);
2209 nand_writereg(ctrl
, cfg_offs
,
2210 nand_readreg(ctrl
, cfg_offs
) & ~CFG_BUS_WIDTH
);
2212 if (nand_scan_ident(mtd
, 1, NULL
))
2215 chip
->options
|= NAND_NO_SUBPAGE_WRITE
;
2217 * Avoid (for instance) kmap()'d buffers from JFFS2, which we can't DMA
2218 * to/from, and have nand_base pass us a bounce buffer instead, as
2221 chip
->options
|= NAND_USE_BOUNCE_BUFFER
;
2223 if (chip
->bbt_options
& NAND_BBT_USE_FLASH
)
2224 chip
->bbt_options
|= NAND_BBT_NO_OOB
;
2226 if (brcmnand_setup_dev(host
))
2229 chip
->ecc
.size
= host
->hwcfg
.sector_size_1k
? 1024 : 512;
2230 /* only use our internal HW threshold */
2231 mtd
->bitflip_threshold
= 1;
2233 ret
= brcmstb_choose_ecc_layout(host
);
2237 if (nand_scan_tail(mtd
))
2240 return mtd_device_register(mtd
, NULL
, 0);
2243 static void brcmnand_save_restore_cs_config(struct brcmnand_host
*host
,
2246 struct brcmnand_controller
*ctrl
= host
->ctrl
;
2247 u16 cfg_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_CFG
);
2248 u16 cfg_ext_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
2249 BRCMNAND_CS_CFG_EXT
);
2250 u16 acc_control_offs
= brcmnand_cs_offset(ctrl
, host
->cs
,
2251 BRCMNAND_CS_ACC_CONTROL
);
2252 u16 t1_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_TIMING1
);
2253 u16 t2_offs
= brcmnand_cs_offset(ctrl
, host
->cs
, BRCMNAND_CS_TIMING2
);
2256 nand_writereg(ctrl
, cfg_offs
, host
->hwcfg
.config
);
2257 if (cfg_offs
!= cfg_ext_offs
)
2258 nand_writereg(ctrl
, cfg_ext_offs
,
2259 host
->hwcfg
.config_ext
);
2260 nand_writereg(ctrl
, acc_control_offs
, host
->hwcfg
.acc_control
);
2261 nand_writereg(ctrl
, t1_offs
, host
->hwcfg
.timing_1
);
2262 nand_writereg(ctrl
, t2_offs
, host
->hwcfg
.timing_2
);
2264 host
->hwcfg
.config
= nand_readreg(ctrl
, cfg_offs
);
2265 if (cfg_offs
!= cfg_ext_offs
)
2266 host
->hwcfg
.config_ext
=
2267 nand_readreg(ctrl
, cfg_ext_offs
);
2268 host
->hwcfg
.acc_control
= nand_readreg(ctrl
, acc_control_offs
);
2269 host
->hwcfg
.timing_1
= nand_readreg(ctrl
, t1_offs
);
2270 host
->hwcfg
.timing_2
= nand_readreg(ctrl
, t2_offs
);
2274 static int brcmnand_suspend(struct device
*dev
)
2276 struct brcmnand_controller
*ctrl
= dev_get_drvdata(dev
);
2277 struct brcmnand_host
*host
;
2279 list_for_each_entry(host
, &ctrl
->host_list
, node
)
2280 brcmnand_save_restore_cs_config(host
, 0);
2282 ctrl
->nand_cs_nand_select
= brcmnand_read_reg(ctrl
, BRCMNAND_CS_SELECT
);
2283 ctrl
->nand_cs_nand_xor
= brcmnand_read_reg(ctrl
, BRCMNAND_CS_XOR
);
2284 ctrl
->corr_stat_threshold
=
2285 brcmnand_read_reg(ctrl
, BRCMNAND_CORR_THRESHOLD
);
2287 if (has_flash_dma(ctrl
))
2288 ctrl
->flash_dma_mode
= flash_dma_readl(ctrl
, FLASH_DMA_MODE
);
2293 static int brcmnand_resume(struct device
*dev
)
2295 struct brcmnand_controller
*ctrl
= dev_get_drvdata(dev
);
2296 struct brcmnand_host
*host
;
2298 if (has_flash_dma(ctrl
)) {
2299 flash_dma_writel(ctrl
, FLASH_DMA_MODE
, ctrl
->flash_dma_mode
);
2300 flash_dma_writel(ctrl
, FLASH_DMA_ERROR_STATUS
, 0);
2303 brcmnand_write_reg(ctrl
, BRCMNAND_CS_SELECT
, ctrl
->nand_cs_nand_select
);
2304 brcmnand_write_reg(ctrl
, BRCMNAND_CS_XOR
, ctrl
->nand_cs_nand_xor
);
2305 brcmnand_write_reg(ctrl
, BRCMNAND_CORR_THRESHOLD
,
2306 ctrl
->corr_stat_threshold
);
2308 /* Clear/re-enable interrupt */
2309 ctrl
->soc
->ctlrdy_ack(ctrl
->soc
);
2310 ctrl
->soc
->ctlrdy_set_enabled(ctrl
->soc
, true);
2313 list_for_each_entry(host
, &ctrl
->host_list
, node
) {
2314 struct nand_chip
*chip
= &host
->chip
;
2315 struct mtd_info
*mtd
= nand_to_mtd(chip
);
2317 brcmnand_save_restore_cs_config(host
, 1);
2319 /* Reset the chip, required by some chips after power-up */
2320 chip
->cmdfunc(mtd
, NAND_CMD_RESET
, -1, -1);
2326 const struct dev_pm_ops brcmnand_pm_ops
= {
2327 .suspend
= brcmnand_suspend
,
2328 .resume
= brcmnand_resume
,
2330 EXPORT_SYMBOL_GPL(brcmnand_pm_ops
);
2332 static const struct of_device_id brcmnand_of_match
[] = {
2333 { .compatible
= "brcm,brcmnand-v4.0" },
2334 { .compatible
= "brcm,brcmnand-v5.0" },
2335 { .compatible
= "brcm,brcmnand-v6.0" },
2336 { .compatible
= "brcm,brcmnand-v6.1" },
2337 { .compatible
= "brcm,brcmnand-v6.2" },
2338 { .compatible
= "brcm,brcmnand-v7.0" },
2339 { .compatible
= "brcm,brcmnand-v7.1" },
2340 { .compatible
= "brcm,brcmnand-v7.2" },
2343 MODULE_DEVICE_TABLE(of
, brcmnand_of_match
);
2345 /***********************************************************************
2346 * Platform driver setup (per controller)
2347 ***********************************************************************/
2349 int brcmnand_probe(struct platform_device
*pdev
, struct brcmnand_soc
*soc
)
2351 struct device
*dev
= &pdev
->dev
;
2352 struct device_node
*dn
= dev
->of_node
, *child
;
2353 struct brcmnand_controller
*ctrl
;
2354 struct resource
*res
;
2357 /* We only support device-tree instantiation */
2361 if (!of_match_node(brcmnand_of_match
, dn
))
2364 ctrl
= devm_kzalloc(dev
, sizeof(*ctrl
), GFP_KERNEL
);
2368 dev_set_drvdata(dev
, ctrl
);
2371 init_completion(&ctrl
->done
);
2372 init_completion(&ctrl
->dma_done
);
2373 spin_lock_init(&ctrl
->controller
.lock
);
2374 init_waitqueue_head(&ctrl
->controller
.wq
);
2375 INIT_LIST_HEAD(&ctrl
->host_list
);
2377 /* NAND register range */
2378 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2379 ctrl
->nand_base
= devm_ioremap_resource(dev
, res
);
2380 if (IS_ERR(ctrl
->nand_base
))
2381 return PTR_ERR(ctrl
->nand_base
);
2383 /* Enable clock before using NAND registers */
2384 ctrl
->clk
= devm_clk_get(dev
, "nand");
2385 if (!IS_ERR(ctrl
->clk
)) {
2386 ret
= clk_prepare_enable(ctrl
->clk
);
2390 ret
= PTR_ERR(ctrl
->clk
);
2391 if (ret
== -EPROBE_DEFER
)
2397 /* Initialize NAND revision */
2398 ret
= brcmnand_revision_init(ctrl
);
2403 * Most chips have this cache at a fixed offset within 'nand' block.
2404 * Some must specify this region separately.
2406 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "nand-cache");
2408 ctrl
->nand_fc
= devm_ioremap_resource(dev
, res
);
2409 if (IS_ERR(ctrl
->nand_fc
)) {
2410 ret
= PTR_ERR(ctrl
->nand_fc
);
2414 ctrl
->nand_fc
= ctrl
->nand_base
+
2415 ctrl
->reg_offsets
[BRCMNAND_FC_BASE
];
2419 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "flash-dma");
2421 ctrl
->flash_dma_base
= devm_ioremap_resource(dev
, res
);
2422 if (IS_ERR(ctrl
->flash_dma_base
)) {
2423 ret
= PTR_ERR(ctrl
->flash_dma_base
);
2427 flash_dma_writel(ctrl
, FLASH_DMA_MODE
, 1); /* linked-list */
2428 flash_dma_writel(ctrl
, FLASH_DMA_ERROR_STATUS
, 0);
2430 /* Allocate descriptor(s) */
2431 ctrl
->dma_desc
= dmam_alloc_coherent(dev
,
2432 sizeof(*ctrl
->dma_desc
),
2433 &ctrl
->dma_pa
, GFP_KERNEL
);
2434 if (!ctrl
->dma_desc
) {
2439 ctrl
->dma_irq
= platform_get_irq(pdev
, 1);
2440 if ((int)ctrl
->dma_irq
< 0) {
2441 dev_err(dev
, "missing FLASH_DMA IRQ\n");
2446 ret
= devm_request_irq(dev
, ctrl
->dma_irq
,
2447 brcmnand_dma_irq
, 0, DRV_NAME
,
2450 dev_err(dev
, "can't allocate IRQ %d: error %d\n",
2451 ctrl
->dma_irq
, ret
);
2455 dev_info(dev
, "enabling FLASH_DMA\n");
2458 /* Disable automatic device ID config, direct addressing */
2459 brcmnand_rmw_reg(ctrl
, BRCMNAND_CS_SELECT
,
2460 CS_SELECT_AUTO_DEVICE_ID_CFG
| 0xff, 0, 0);
2461 /* Disable XOR addressing */
2462 brcmnand_rmw_reg(ctrl
, BRCMNAND_CS_XOR
, 0xff, 0, 0);
2464 if (ctrl
->features
& BRCMNAND_HAS_WP
) {
2465 /* Permanently disable write protection */
2467 brcmnand_set_wp(ctrl
, false);
2473 ctrl
->irq
= platform_get_irq(pdev
, 0);
2474 if ((int)ctrl
->irq
< 0) {
2475 dev_err(dev
, "no IRQ defined\n");
2481 * Some SoCs integrate this controller (e.g., its interrupt bits) in
2487 ret
= devm_request_irq(dev
, ctrl
->irq
, brcmnand_irq
, 0,
2490 /* Enable interrupt */
2491 ctrl
->soc
->ctlrdy_ack(ctrl
->soc
);
2492 ctrl
->soc
->ctlrdy_set_enabled(ctrl
->soc
, true);
2494 /* Use standard interrupt infrastructure */
2495 ret
= devm_request_irq(dev
, ctrl
->irq
, brcmnand_ctlrdy_irq
, 0,
2499 dev_err(dev
, "can't allocate IRQ %d: error %d\n",
2504 for_each_available_child_of_node(dn
, child
) {
2505 if (of_device_is_compatible(child
, "brcm,nandcs")) {
2506 struct brcmnand_host
*host
;
2508 host
= devm_kzalloc(dev
, sizeof(*host
), GFP_KERNEL
);
2517 ret
= brcmnand_init_cs(host
, child
);
2519 devm_kfree(dev
, host
);
2520 continue; /* Try all chip-selects */
2523 list_add_tail(&host
->node
, &ctrl
->host_list
);
2527 /* No chip-selects could initialize properly */
2528 if (list_empty(&ctrl
->host_list
)) {
2536 clk_disable_unprepare(ctrl
->clk
);
2540 EXPORT_SYMBOL_GPL(brcmnand_probe
);
2542 int brcmnand_remove(struct platform_device
*pdev
)
2544 struct brcmnand_controller
*ctrl
= dev_get_drvdata(&pdev
->dev
);
2545 struct brcmnand_host
*host
;
2547 list_for_each_entry(host
, &ctrl
->host_list
, node
)
2548 nand_release(nand_to_mtd(&host
->chip
));
2550 clk_disable_unprepare(ctrl
->clk
);
2552 dev_set_drvdata(&pdev
->dev
, NULL
);
2556 EXPORT_SYMBOL_GPL(brcmnand_remove
);
2558 MODULE_LICENSE("GPL v2");
2559 MODULE_AUTHOR("Kevin Cernekee");
2560 MODULE_AUTHOR("Brian Norris");
2561 MODULE_DESCRIPTION("NAND driver for Broadcom chips");
2562 MODULE_ALIAS("platform:brcmnand");