Merge commit 'v2.6.37-rc7' into spi/next
[deliverable/linux.git] / drivers / net / sfc / mtd.c
1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11 #include <linux/bitops.h>
12 #include <linux/module.h>
13 #include <linux/mtd/mtd.h>
14 #include <linux/delay.h>
15 #include <linux/slab.h>
16 #include <linux/rtnetlink.h>
17
18 #include "net_driver.h"
19 #include "spi.h"
20 #include "efx.h"
21 #include "nic.h"
22 #include "mcdi.h"
23 #include "mcdi_pcol.h"
24
25 #define EFX_SPI_VERIFY_BUF_LEN 16
26
27 struct efx_mtd_partition {
28 struct mtd_info mtd;
29 union {
30 struct {
31 bool updating;
32 u8 nvram_type;
33 u16 fw_subtype;
34 } mcdi;
35 size_t offset;
36 };
37 const char *type_name;
38 char name[IFNAMSIZ + 20];
39 };
40
41 struct efx_mtd_ops {
42 int (*read)(struct mtd_info *mtd, loff_t start, size_t len,
43 size_t *retlen, u8 *buffer);
44 int (*erase)(struct mtd_info *mtd, loff_t start, size_t len);
45 int (*write)(struct mtd_info *mtd, loff_t start, size_t len,
46 size_t *retlen, const u8 *buffer);
47 int (*sync)(struct mtd_info *mtd);
48 };
49
50 struct efx_mtd {
51 struct list_head node;
52 struct efx_nic *efx;
53 const struct efx_spi_device *spi;
54 const char *name;
55 const struct efx_mtd_ops *ops;
56 size_t n_parts;
57 struct efx_mtd_partition part[0];
58 };
59
60 #define efx_for_each_partition(part, efx_mtd) \
61 for ((part) = &(efx_mtd)->part[0]; \
62 (part) != &(efx_mtd)->part[(efx_mtd)->n_parts]; \
63 (part)++)
64
65 #define to_efx_mtd_partition(mtd) \
66 container_of(mtd, struct efx_mtd_partition, mtd)
67
68 static int falcon_mtd_probe(struct efx_nic *efx);
69 static int siena_mtd_probe(struct efx_nic *efx);
70
71 /* SPI utilities */
72
73 static int
74 efx_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible)
75 {
76 struct efx_mtd *efx_mtd = part->mtd.priv;
77 const struct efx_spi_device *spi = efx_mtd->spi;
78 struct efx_nic *efx = efx_mtd->efx;
79 u8 status;
80 int rc, i;
81
82 /* Wait up to 4s for flash/EEPROM to finish a slow operation. */
83 for (i = 0; i < 40; i++) {
84 __set_current_state(uninterruptible ?
85 TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
86 schedule_timeout(HZ / 10);
87 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
88 &status, sizeof(status));
89 if (rc)
90 return rc;
91 if (!(status & SPI_STATUS_NRDY))
92 return 0;
93 if (signal_pending(current))
94 return -EINTR;
95 }
96 pr_err("%s: timed out waiting for %s\n", part->name, efx_mtd->name);
97 return -ETIMEDOUT;
98 }
99
100 static int
101 efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi)
102 {
103 const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
104 SPI_STATUS_BP0);
105 u8 status;
106 int rc;
107
108 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
109 &status, sizeof(status));
110 if (rc)
111 return rc;
112
113 if (!(status & unlock_mask))
114 return 0; /* already unlocked */
115
116 rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
117 if (rc)
118 return rc;
119 rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
120 if (rc)
121 return rc;
122
123 status &= ~unlock_mask;
124 rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
125 NULL, sizeof(status));
126 if (rc)
127 return rc;
128 rc = falcon_spi_wait_write(efx, spi);
129 if (rc)
130 return rc;
131
132 return 0;
133 }
134
135 static int
136 efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len)
137 {
138 struct efx_mtd *efx_mtd = part->mtd.priv;
139 const struct efx_spi_device *spi = efx_mtd->spi;
140 struct efx_nic *efx = efx_mtd->efx;
141 unsigned pos, block_len;
142 u8 empty[EFX_SPI_VERIFY_BUF_LEN];
143 u8 buffer[EFX_SPI_VERIFY_BUF_LEN];
144 int rc;
145
146 if (len != spi->erase_size)
147 return -EINVAL;
148
149 if (spi->erase_command == 0)
150 return -EOPNOTSUPP;
151
152 rc = efx_spi_unlock(efx, spi);
153 if (rc)
154 return rc;
155 rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
156 if (rc)
157 return rc;
158 rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
159 NULL, 0);
160 if (rc)
161 return rc;
162 rc = efx_spi_slow_wait(part, false);
163
164 /* Verify the entire region has been wiped */
165 memset(empty, 0xff, sizeof(empty));
166 for (pos = 0; pos < len; pos += block_len) {
167 block_len = min(len - pos, sizeof(buffer));
168 rc = falcon_spi_read(efx, spi, start + pos, block_len,
169 NULL, buffer);
170 if (rc)
171 return rc;
172 if (memcmp(empty, buffer, block_len))
173 return -EIO;
174
175 /* Avoid locking up the system */
176 cond_resched();
177 if (signal_pending(current))
178 return -EINTR;
179 }
180
181 return rc;
182 }
183
184 /* MTD interface */
185
186 static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
187 {
188 struct efx_mtd *efx_mtd = mtd->priv;
189 int rc;
190
191 rc = efx_mtd->ops->erase(mtd, erase->addr, erase->len);
192 if (rc == 0) {
193 erase->state = MTD_ERASE_DONE;
194 } else {
195 erase->state = MTD_ERASE_FAILED;
196 erase->fail_addr = 0xffffffff;
197 }
198 mtd_erase_callback(erase);
199 return rc;
200 }
201
202 static void efx_mtd_sync(struct mtd_info *mtd)
203 {
204 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
205 struct efx_mtd *efx_mtd = mtd->priv;
206 int rc;
207
208 rc = efx_mtd->ops->sync(mtd);
209 if (rc)
210 pr_err("%s: %s sync failed (%d)\n",
211 part->name, efx_mtd->name, rc);
212 }
213
214 static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
215 {
216 int rc;
217
218 for (;;) {
219 rc = del_mtd_device(&part->mtd);
220 if (rc != -EBUSY)
221 break;
222 ssleep(1);
223 }
224 WARN_ON(rc);
225 }
226
227 static void efx_mtd_remove_device(struct efx_mtd *efx_mtd)
228 {
229 struct efx_mtd_partition *part;
230
231 efx_for_each_partition(part, efx_mtd)
232 efx_mtd_remove_partition(part);
233 list_del(&efx_mtd->node);
234 kfree(efx_mtd);
235 }
236
237 static void efx_mtd_rename_device(struct efx_mtd *efx_mtd)
238 {
239 struct efx_mtd_partition *part;
240
241 efx_for_each_partition(part, efx_mtd)
242 if (efx_nic_rev(efx_mtd->efx) >= EFX_REV_SIENA_A0)
243 snprintf(part->name, sizeof(part->name),
244 "%s %s:%02x", efx_mtd->efx->name,
245 part->type_name, part->mcdi.fw_subtype);
246 else
247 snprintf(part->name, sizeof(part->name),
248 "%s %s", efx_mtd->efx->name,
249 part->type_name);
250 }
251
252 static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd)
253 {
254 struct efx_mtd_partition *part;
255
256 efx_mtd->efx = efx;
257
258 efx_mtd_rename_device(efx_mtd);
259
260 efx_for_each_partition(part, efx_mtd) {
261 part->mtd.writesize = 1;
262
263 part->mtd.owner = THIS_MODULE;
264 part->mtd.priv = efx_mtd;
265 part->mtd.name = part->name;
266 part->mtd.erase = efx_mtd_erase;
267 part->mtd.read = efx_mtd->ops->read;
268 part->mtd.write = efx_mtd->ops->write;
269 part->mtd.sync = efx_mtd_sync;
270
271 if (add_mtd_device(&part->mtd))
272 goto fail;
273 }
274
275 list_add(&efx_mtd->node, &efx->mtd_list);
276 return 0;
277
278 fail:
279 while (part != &efx_mtd->part[0]) {
280 --part;
281 efx_mtd_remove_partition(part);
282 }
283 /* add_mtd_device() returns 1 if the MTD table is full */
284 return -ENOMEM;
285 }
286
287 void efx_mtd_remove(struct efx_nic *efx)
288 {
289 struct efx_mtd *efx_mtd, *next;
290
291 WARN_ON(efx_dev_registered(efx));
292
293 list_for_each_entry_safe(efx_mtd, next, &efx->mtd_list, node)
294 efx_mtd_remove_device(efx_mtd);
295 }
296
297 void efx_mtd_rename(struct efx_nic *efx)
298 {
299 struct efx_mtd *efx_mtd;
300
301 ASSERT_RTNL();
302
303 list_for_each_entry(efx_mtd, &efx->mtd_list, node)
304 efx_mtd_rename_device(efx_mtd);
305 }
306
307 int efx_mtd_probe(struct efx_nic *efx)
308 {
309 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
310 return siena_mtd_probe(efx);
311 else
312 return falcon_mtd_probe(efx);
313 }
314
315 /* Implementation of MTD operations for Falcon */
316
317 static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
318 size_t len, size_t *retlen, u8 *buffer)
319 {
320 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
321 struct efx_mtd *efx_mtd = mtd->priv;
322 const struct efx_spi_device *spi = efx_mtd->spi;
323 struct efx_nic *efx = efx_mtd->efx;
324 int rc;
325
326 rc = mutex_lock_interruptible(&efx->spi_lock);
327 if (rc)
328 return rc;
329 rc = falcon_spi_read(efx, spi, part->offset + start, len,
330 retlen, buffer);
331 mutex_unlock(&efx->spi_lock);
332 return rc;
333 }
334
335 static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
336 {
337 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
338 struct efx_mtd *efx_mtd = mtd->priv;
339 struct efx_nic *efx = efx_mtd->efx;
340 int rc;
341
342 rc = mutex_lock_interruptible(&efx->spi_lock);
343 if (rc)
344 return rc;
345 rc = efx_spi_erase(part, part->offset + start, len);
346 mutex_unlock(&efx->spi_lock);
347 return rc;
348 }
349
350 static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
351 size_t len, size_t *retlen, const u8 *buffer)
352 {
353 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
354 struct efx_mtd *efx_mtd = mtd->priv;
355 const struct efx_spi_device *spi = efx_mtd->spi;
356 struct efx_nic *efx = efx_mtd->efx;
357 int rc;
358
359 rc = mutex_lock_interruptible(&efx->spi_lock);
360 if (rc)
361 return rc;
362 rc = falcon_spi_write(efx, spi, part->offset + start, len,
363 retlen, buffer);
364 mutex_unlock(&efx->spi_lock);
365 return rc;
366 }
367
368 static int falcon_mtd_sync(struct mtd_info *mtd)
369 {
370 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
371 struct efx_mtd *efx_mtd = mtd->priv;
372 struct efx_nic *efx = efx_mtd->efx;
373 int rc;
374
375 mutex_lock(&efx->spi_lock);
376 rc = efx_spi_slow_wait(part, true);
377 mutex_unlock(&efx->spi_lock);
378 return rc;
379 }
380
381 static struct efx_mtd_ops falcon_mtd_ops = {
382 .read = falcon_mtd_read,
383 .erase = falcon_mtd_erase,
384 .write = falcon_mtd_write,
385 .sync = falcon_mtd_sync,
386 };
387
388 static int falcon_mtd_probe(struct efx_nic *efx)
389 {
390 struct efx_spi_device *spi = efx->spi_flash;
391 struct efx_mtd *efx_mtd;
392 int rc;
393
394 ASSERT_RTNL();
395
396 if (!spi || spi->size <= FALCON_FLASH_BOOTCODE_START)
397 return -ENODEV;
398
399 efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
400 GFP_KERNEL);
401 if (!efx_mtd)
402 return -ENOMEM;
403
404 efx_mtd->spi = spi;
405 efx_mtd->name = "flash";
406 efx_mtd->ops = &falcon_mtd_ops;
407
408 efx_mtd->n_parts = 1;
409 efx_mtd->part[0].mtd.type = MTD_NORFLASH;
410 efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
411 efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
412 efx_mtd->part[0].mtd.erasesize = spi->erase_size;
413 efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
414 efx_mtd->part[0].type_name = "sfc_flash_bootrom";
415
416 rc = efx_mtd_probe_device(efx, efx_mtd);
417 if (rc)
418 kfree(efx_mtd);
419 return rc;
420 }
421
422 /* Implementation of MTD operations for Siena */
423
424 static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
425 size_t len, size_t *retlen, u8 *buffer)
426 {
427 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
428 struct efx_mtd *efx_mtd = mtd->priv;
429 struct efx_nic *efx = efx_mtd->efx;
430 loff_t offset = start;
431 loff_t end = min_t(loff_t, start + len, mtd->size);
432 size_t chunk;
433 int rc = 0;
434
435 while (offset < end) {
436 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
437 rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset,
438 buffer, chunk);
439 if (rc)
440 goto out;
441 offset += chunk;
442 buffer += chunk;
443 }
444 out:
445 *retlen = offset - start;
446 return rc;
447 }
448
449 static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
450 {
451 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
452 struct efx_mtd *efx_mtd = mtd->priv;
453 struct efx_nic *efx = efx_mtd->efx;
454 loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
455 loff_t end = min_t(loff_t, start + len, mtd->size);
456 size_t chunk = part->mtd.erasesize;
457 int rc = 0;
458
459 if (!part->mcdi.updating) {
460 rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
461 if (rc)
462 goto out;
463 part->mcdi.updating = 1;
464 }
465
466 /* The MCDI interface can in fact do multiple erase blocks at once;
467 * but erasing may be slow, so we make multiple calls here to avoid
468 * tripping the MCDI RPC timeout. */
469 while (offset < end) {
470 rc = efx_mcdi_nvram_erase(efx, part->mcdi.nvram_type, offset,
471 chunk);
472 if (rc)
473 goto out;
474 offset += chunk;
475 }
476 out:
477 return rc;
478 }
479
480 static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
481 size_t len, size_t *retlen, const u8 *buffer)
482 {
483 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
484 struct efx_mtd *efx_mtd = mtd->priv;
485 struct efx_nic *efx = efx_mtd->efx;
486 loff_t offset = start;
487 loff_t end = min_t(loff_t, start + len, mtd->size);
488 size_t chunk;
489 int rc = 0;
490
491 if (!part->mcdi.updating) {
492 rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
493 if (rc)
494 goto out;
495 part->mcdi.updating = 1;
496 }
497
498 while (offset < end) {
499 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
500 rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset,
501 buffer, chunk);
502 if (rc)
503 goto out;
504 offset += chunk;
505 buffer += chunk;
506 }
507 out:
508 *retlen = offset - start;
509 return rc;
510 }
511
512 static int siena_mtd_sync(struct mtd_info *mtd)
513 {
514 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
515 struct efx_mtd *efx_mtd = mtd->priv;
516 struct efx_nic *efx = efx_mtd->efx;
517 int rc = 0;
518
519 if (part->mcdi.updating) {
520 part->mcdi.updating = 0;
521 rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type);
522 }
523
524 return rc;
525 }
526
527 static struct efx_mtd_ops siena_mtd_ops = {
528 .read = siena_mtd_read,
529 .erase = siena_mtd_erase,
530 .write = siena_mtd_write,
531 .sync = siena_mtd_sync,
532 };
533
534 struct siena_nvram_type_info {
535 int port;
536 const char *name;
537 };
538
539 static struct siena_nvram_type_info siena_nvram_types[] = {
540 [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" },
541 [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" },
542 [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" },
543 [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" },
544 [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" },
545 [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" },
546 [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" },
547 [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" },
548 [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" },
549 [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" },
550 [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" },
551 [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" },
552 };
553
554 static int siena_mtd_probe_partition(struct efx_nic *efx,
555 struct efx_mtd *efx_mtd,
556 unsigned int part_id,
557 unsigned int type)
558 {
559 struct efx_mtd_partition *part = &efx_mtd->part[part_id];
560 struct siena_nvram_type_info *info;
561 size_t size, erase_size;
562 bool protected;
563 int rc;
564
565 if (type >= ARRAY_SIZE(siena_nvram_types))
566 return -ENODEV;
567
568 info = &siena_nvram_types[type];
569
570 if (info->port != efx_port_num(efx))
571 return -ENODEV;
572
573 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
574 if (rc)
575 return rc;
576 if (protected)
577 return -ENODEV; /* hide it */
578
579 part->mcdi.nvram_type = type;
580 part->type_name = info->name;
581
582 part->mtd.type = MTD_NORFLASH;
583 part->mtd.flags = MTD_CAP_NORFLASH;
584 part->mtd.size = size;
585 part->mtd.erasesize = erase_size;
586
587 return 0;
588 }
589
590 static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
591 struct efx_mtd *efx_mtd)
592 {
593 struct efx_mtd_partition *part;
594 uint16_t fw_subtype_list[MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN /
595 sizeof(uint16_t)];
596 int rc;
597
598 rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list);
599 if (rc)
600 return rc;
601
602 efx_for_each_partition(part, efx_mtd)
603 part->mcdi.fw_subtype = fw_subtype_list[part->mcdi.nvram_type];
604
605 return 0;
606 }
607
608 static int siena_mtd_probe(struct efx_nic *efx)
609 {
610 struct efx_mtd *efx_mtd;
611 int rc = -ENODEV;
612 u32 nvram_types;
613 unsigned int type;
614
615 ASSERT_RTNL();
616
617 rc = efx_mcdi_nvram_types(efx, &nvram_types);
618 if (rc)
619 return rc;
620
621 efx_mtd = kzalloc(sizeof(*efx_mtd) +
622 hweight32(nvram_types) * sizeof(efx_mtd->part[0]),
623 GFP_KERNEL);
624 if (!efx_mtd)
625 return -ENOMEM;
626
627 efx_mtd->name = "Siena NVRAM manager";
628
629 efx_mtd->ops = &siena_mtd_ops;
630
631 type = 0;
632 efx_mtd->n_parts = 0;
633
634 while (nvram_types != 0) {
635 if (nvram_types & 1) {
636 rc = siena_mtd_probe_partition(efx, efx_mtd,
637 efx_mtd->n_parts, type);
638 if (rc == 0)
639 efx_mtd->n_parts++;
640 else if (rc != -ENODEV)
641 goto fail;
642 }
643 type++;
644 nvram_types >>= 1;
645 }
646
647 rc = siena_mtd_get_fw_subtypes(efx, efx_mtd);
648 if (rc)
649 goto fail;
650
651 rc = efx_mtd_probe_device(efx, efx_mtd);
652 fail:
653 if (rc)
654 kfree(efx_mtd);
655 return rc;
656 }
657
This page took 0.044274 seconds and 5 git commands to generate.