201bad5570478e8de1f3ee26cc24b80dcef0e912
[deliverable/linux.git] / include / linux / mtd / mtd.h
1 /*
2 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> et al.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 *
18 */
19
20 #ifndef __MTD_MTD_H__
21 #define __MTD_MTD_H__
22
23 #include <linux/types.h>
24 #include <linux/uio.h>
25 #include <linux/notifier.h>
26 #include <linux/device.h>
27
28 #include <mtd/mtd-abi.h>
29
30 #include <asm/div64.h>
31
32 #define MTD_CHAR_MAJOR 90
33 #define MTD_BLOCK_MAJOR 31
34
35 #define MTD_ERASE_PENDING 0x01
36 #define MTD_ERASING 0x02
37 #define MTD_ERASE_SUSPEND 0x04
38 #define MTD_ERASE_DONE 0x08
39 #define MTD_ERASE_FAILED 0x10
40
41 #define MTD_FAIL_ADDR_UNKNOWN -1LL
42
43 /*
44 * If the erase fails, fail_addr might indicate exactly which block failed. If
45 * fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level
46 * or was not specific to any particular block.
47 */
48 struct erase_info {
49 struct mtd_info *mtd;
50 uint64_t addr;
51 uint64_t len;
52 uint64_t fail_addr;
53 u_long time;
54 u_long retries;
55 unsigned dev;
56 unsigned cell;
57 void (*callback) (struct erase_info *self);
58 u_long priv;
59 u_char state;
60 struct erase_info *next;
61 };
62
63 struct mtd_erase_region_info {
64 uint64_t offset; /* At which this region starts, from the beginning of the MTD */
65 uint32_t erasesize; /* For this region */
66 uint32_t numblocks; /* Number of blocks of erasesize in this region */
67 unsigned long *lockmap; /* If keeping bitmap of locks */
68 };
69
70 /**
71 * struct mtd_oob_ops - oob operation operands
72 * @mode: operation mode
73 *
74 * @len: number of data bytes to write/read
75 *
76 * @retlen: number of data bytes written/read
77 *
78 * @ooblen: number of oob bytes to write/read
79 * @oobretlen: number of oob bytes written/read
80 * @ooboffs: offset of oob data in the oob area (only relevant when
81 * mode = MTD_OPS_PLACE_OOB or MTD_OPS_RAW)
82 * @datbuf: data buffer - if NULL only oob data are read/written
83 * @oobbuf: oob data buffer
84 *
85 * Note, it is allowed to read more than one OOB area at one go, but not write.
86 * The interface assumes that the OOB write requests program only one page's
87 * OOB area.
88 */
89 struct mtd_oob_ops {
90 unsigned int mode;
91 size_t len;
92 size_t retlen;
93 size_t ooblen;
94 size_t oobretlen;
95 uint32_t ooboffs;
96 uint8_t *datbuf;
97 uint8_t *oobbuf;
98 };
99
100 #define MTD_MAX_OOBFREE_ENTRIES_LARGE 32
101 #define MTD_MAX_ECCPOS_ENTRIES_LARGE 448
102 /*
103 * Internal ECC layout control structure. For historical reasons, there is a
104 * similar, smaller struct nand_ecclayout_user (in mtd-abi.h) that is retained
105 * for export to user-space via the ECCGETLAYOUT ioctl.
106 * nand_ecclayout should be expandable in the future simply by the above macros.
107 */
108 struct nand_ecclayout {
109 __u32 eccbytes;
110 __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE];
111 __u32 oobavail;
112 struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE];
113 };
114
115 struct module; /* only needed for owner field in mtd_info */
116
117 struct mtd_info {
118 u_char type;
119 uint32_t flags;
120 uint64_t size; // Total size of the MTD
121
122 /* "Major" erase size for the device. Naïve users may take this
123 * to be the only erase size available, or may use the more detailed
124 * information below if they desire
125 */
126 uint32_t erasesize;
127 /* Minimal writable flash unit size. In case of NOR flash it is 1 (even
128 * though individual bits can be cleared), in case of NAND flash it is
129 * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR
130 * it is of ECC block size, etc. It is illegal to have writesize = 0.
131 * Any driver registering a struct mtd_info must ensure a writesize of
132 * 1 or larger.
133 */
134 uint32_t writesize;
135
136 /*
137 * Size of the write buffer used by the MTD. MTD devices having a write
138 * buffer can write multiple writesize chunks at a time. E.g. while
139 * writing 4 * writesize bytes to a device with 2 * writesize bytes
140 * buffer the MTD driver can (but doesn't have to) do 2 writesize
141 * operations, but not 4. Currently, all NANDs have writebufsize
142 * equivalent to writesize (NAND page size). Some NOR flashes do have
143 * writebufsize greater than writesize.
144 */
145 uint32_t writebufsize;
146
147 uint32_t oobsize; // Amount of OOB data per block (e.g. 16)
148 uint32_t oobavail; // Available OOB bytes per block
149
150 /*
151 * If erasesize is a power of 2 then the shift is stored in
152 * erasesize_shift otherwise erasesize_shift is zero. Ditto writesize.
153 */
154 unsigned int erasesize_shift;
155 unsigned int writesize_shift;
156 /* Masks based on erasesize_shift and writesize_shift */
157 unsigned int erasesize_mask;
158 unsigned int writesize_mask;
159
160 // Kernel-only stuff starts here.
161 const char *name;
162 int index;
163
164 /* ECC layout structure pointer - read only! */
165 struct nand_ecclayout *ecclayout;
166
167 /* Data for variable erase regions. If numeraseregions is zero,
168 * it means that the whole device has erasesize as given above.
169 */
170 int numeraseregions;
171 struct mtd_erase_region_info *eraseregions;
172
173 /*
174 * Do not call via these pointers, use corresponding mtd_*()
175 * wrappers instead.
176 */
177 int (*erase) (struct mtd_info *mtd, struct erase_info *instr);
178
179 /* This stuff for eXecute-In-Place */
180 /* phys is optional and may be set to NULL */
181 int (*point) (struct mtd_info *mtd, loff_t from, size_t len,
182 size_t *retlen, void **virt, resource_size_t *phys);
183
184 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
185 void (*unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
186
187 /* Allow NOMMU mmap() to directly map the device (if not NULL)
188 * - return the address to which the offset maps
189 * - return -ENOSYS to indicate refusal to do the mapping
190 */
191 unsigned long (*get_unmapped_area) (struct mtd_info *mtd,
192 unsigned long len,
193 unsigned long offset,
194 unsigned long flags);
195
196 /* Backing device capabilities for this device
197 * - provides mmap capabilities
198 */
199 struct backing_dev_info *backing_dev_info;
200
201
202 int (*read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
203 int (*write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf);
204
205 /* In blackbox flight recorder like scenarios we want to make successful
206 writes in interrupt context. panic_write() is only intended to be
207 called when its known the kernel is about to panic and we need the
208 write to succeed. Since the kernel is not going to be running for much
209 longer, this function can break locks and delay to ensure the write
210 succeeds (but not sleep). */
211
212 int (*panic_write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf);
213
214 int (*read_oob) (struct mtd_info *mtd, loff_t from,
215 struct mtd_oob_ops *ops);
216 int (*write_oob) (struct mtd_info *mtd, loff_t to,
217 struct mtd_oob_ops *ops);
218
219 /*
220 * Methods to access the protection register area, present in some
221 * flash devices. The user data is one time programmable but the
222 * factory data is read only.
223 */
224 int (*get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf, size_t len);
225 int (*read_fact_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
226 int (*get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf, size_t len);
227 int (*read_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
228 int (*write_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
229 int (*lock_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len);
230
231 /* kvec-based read/write methods.
232 NB: The 'count' parameter is the number of _vectors_, each of
233 which contains an (ofs, len) tuple.
234 */
235 int (*writev) (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen);
236
237 /* Sync */
238 void (*sync) (struct mtd_info *mtd);
239
240 /* Chip-supported device locking */
241 int (*lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
242 int (*unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
243 int (*is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
244
245 /* Power Management functions */
246 int (*suspend) (struct mtd_info *mtd);
247 void (*resume) (struct mtd_info *mtd);
248
249 /* Bad block management functions */
250 int (*block_isbad) (struct mtd_info *mtd, loff_t ofs);
251 int (*block_markbad) (struct mtd_info *mtd, loff_t ofs);
252
253 struct notifier_block reboot_notifier; /* default mode before reboot */
254
255 /* ECC status information */
256 struct mtd_ecc_stats ecc_stats;
257 /* Subpage shift (NAND) */
258 int subpage_sft;
259
260 void *priv;
261
262 struct module *owner;
263 struct device dev;
264 int usecount;
265
266 /* If the driver is something smart, like UBI, it may need to maintain
267 * its own reference counting. The below functions are only for driver.
268 * The driver may register its callbacks. These callbacks are not
269 * supposed to be called by MTD users */
270 int (*get_device) (struct mtd_info *mtd);
271 void (*put_device) (struct mtd_info *mtd);
272 };
273
274 /*
275 * Erase is an asynchronous operation. Device drivers are supposed
276 * to call instr->callback() whenever the operation completes, even
277 * if it completes with a failure.
278 * Callers are supposed to pass a callback function and wait for it
279 * to be called before writing to the block.
280 */
281 static inline int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
282 {
283 return mtd->erase(mtd, instr);
284 }
285
286 static inline struct mtd_info *dev_to_mtd(struct device *dev)
287 {
288 return dev ? dev_get_drvdata(dev) : NULL;
289 }
290
291 static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
292 {
293 if (mtd->erasesize_shift)
294 return sz >> mtd->erasesize_shift;
295 do_div(sz, mtd->erasesize);
296 return sz;
297 }
298
299 static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd)
300 {
301 if (mtd->erasesize_shift)
302 return sz & mtd->erasesize_mask;
303 return do_div(sz, mtd->erasesize);
304 }
305
306 static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
307 {
308 if (mtd->writesize_shift)
309 return sz >> mtd->writesize_shift;
310 do_div(sz, mtd->writesize);
311 return sz;
312 }
313
314 static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
315 {
316 if (mtd->writesize_shift)
317 return sz & mtd->writesize_mask;
318 return do_div(sz, mtd->writesize);
319 }
320
321 /* Kernel-side ioctl definitions */
322
323 struct mtd_partition;
324 struct mtd_part_parser_data;
325
326 extern int mtd_device_parse_register(struct mtd_info *mtd,
327 const char **part_probe_types,
328 struct mtd_part_parser_data *parser_data,
329 const struct mtd_partition *defparts,
330 int defnr_parts);
331 #define mtd_device_register(master, parts, nr_parts) \
332 mtd_device_parse_register(master, NULL, NULL, parts, nr_parts)
333 extern int mtd_device_unregister(struct mtd_info *master);
334 extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
335 extern int __get_mtd_device(struct mtd_info *mtd);
336 extern void __put_mtd_device(struct mtd_info *mtd);
337 extern struct mtd_info *get_mtd_device_nm(const char *name);
338 extern void put_mtd_device(struct mtd_info *mtd);
339
340
341 struct mtd_notifier {
342 void (*add)(struct mtd_info *mtd);
343 void (*remove)(struct mtd_info *mtd);
344 struct list_head list;
345 };
346
347
348 extern void register_mtd_user (struct mtd_notifier *new);
349 extern int unregister_mtd_user (struct mtd_notifier *old);
350
351 int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
352 unsigned long count, loff_t to, size_t *retlen);
353
354 int default_mtd_readv(struct mtd_info *mtd, struct kvec *vecs,
355 unsigned long count, loff_t from, size_t *retlen);
356
357 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size);
358
359 void mtd_erase_callback(struct erase_info *instr);
360
361 static inline int mtd_is_bitflip(int err) {
362 return err == -EUCLEAN;
363 }
364
365 static inline int mtd_is_eccerr(int err) {
366 return err == -EBADMSG;
367 }
368
369 static inline int mtd_is_bitflip_or_eccerr(int err) {
370 return mtd_is_bitflip(err) || mtd_is_eccerr(err);
371 }
372
373 #endif /* __MTD_MTD_H__ */
This page took 0.06965 seconds and 4 git commands to generate.