mg_disk: fix dependency on libata
[deliverable/linux.git] / drivers / block / mg_disk.c
1 /*
2 * drivers/block/mg_disk.c
3 *
4 * Support for the mGine m[g]flash IO mode.
5 * Based on legacy hd.c
6 *
7 * (c) 2008 mGine Co.,LTD
8 * (c) 2008 unsik Kim <donari75@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/fs.h>
18 #include <linux/blkdev.h>
19 #include <linux/hdreg.h>
20 #include <linux/ata.h>
21 #include <linux/interrupt.h>
22 #include <linux/delay.h>
23 #include <linux/platform_device.h>
24 #include <linux/gpio.h>
25
26 #define MG_RES_SEC (CONFIG_MG_DISK_RES << 1)
27
28 /* name for block device */
29 #define MG_DISK_NAME "mgd"
30 /* name for platform device */
31 #define MG_DEV_NAME "mg_disk"
32
33 #define MG_DISK_MAJ 0
34 #define MG_DISK_MAX_PART 16
35 #define MG_SECTOR_SIZE 512
36 #define MG_MAX_SECTS 256
37
38 /* Register offsets */
39 #define MG_BUFF_OFFSET 0x8000
40 #define MG_STORAGE_BUFFER_SIZE 0x200
41 #define MG_REG_OFFSET 0xC000
42 #define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */
43 #define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */
44 #define MG_REG_SECT_CNT (MG_REG_OFFSET + 4)
45 #define MG_REG_SECT_NUM (MG_REG_OFFSET + 6)
46 #define MG_REG_CYL_LOW (MG_REG_OFFSET + 8)
47 #define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA)
48 #define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC)
49 #define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */
50 #define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */
51 #define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10)
52 #define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12)
53
54 /* "Drive Select/Head Register" bit values */
55 #define MG_REG_HEAD_MUST_BE_ON 0xA0 /* These 2 bits are always on */
56 #define MG_REG_HEAD_DRIVE_MASTER (0x00 | MG_REG_HEAD_MUST_BE_ON)
57 #define MG_REG_HEAD_DRIVE_SLAVE (0x10 | MG_REG_HEAD_MUST_BE_ON)
58 #define MG_REG_HEAD_LBA_MODE (0x40 | MG_REG_HEAD_MUST_BE_ON)
59
60
61 /* "Device Control Register" bit values */
62 #define MG_REG_CTRL_INTR_ENABLE 0x0
63 #define MG_REG_CTRL_INTR_DISABLE (0x1<<1)
64 #define MG_REG_CTRL_RESET (0x1<<2)
65 #define MG_REG_CTRL_INTR_POLA_ACTIVE_HIGH 0x0
66 #define MG_REG_CTRL_INTR_POLA_ACTIVE_LOW (0x1<<4)
67 #define MG_REG_CTRL_DPD_POLA_ACTIVE_LOW 0x0
68 #define MG_REG_CTRL_DPD_POLA_ACTIVE_HIGH (0x1<<5)
69 #define MG_REG_CTRL_DPD_DISABLE 0x0
70 #define MG_REG_CTRL_DPD_ENABLE (0x1<<6)
71
72 /* Status register bit */
73 /* error bit in status register */
74 #define MG_REG_STATUS_BIT_ERROR 0x01
75 /* corrected error in status register */
76 #define MG_REG_STATUS_BIT_CORRECTED_ERROR 0x04
77 /* data request bit in status register */
78 #define MG_REG_STATUS_BIT_DATA_REQ 0x08
79 /* DSC - Drive Seek Complete */
80 #define MG_REG_STATUS_BIT_SEEK_DONE 0x10
81 /* DWF - Drive Write Fault */
82 #define MG_REG_STATUS_BIT_WRITE_FAULT 0x20
83 #define MG_REG_STATUS_BIT_READY 0x40
84 #define MG_REG_STATUS_BIT_BUSY 0x80
85
86 /* handy status */
87 #define MG_STAT_READY (MG_REG_STATUS_BIT_READY | MG_REG_STATUS_BIT_SEEK_DONE)
88 #define MG_READY_OK(s) (((s) & (MG_STAT_READY | \
89 (MG_REG_STATUS_BIT_BUSY | \
90 MG_REG_STATUS_BIT_WRITE_FAULT | \
91 MG_REG_STATUS_BIT_ERROR))) == MG_STAT_READY)
92
93 /* Error register */
94 #define MG_REG_ERR_AMNF 0x01
95 #define MG_REG_ERR_ABRT 0x04
96 #define MG_REG_ERR_IDNF 0x10
97 #define MG_REG_ERR_UNC 0x40
98 #define MG_REG_ERR_BBK 0x80
99
100 /* error code for others */
101 #define MG_ERR_NONE 0
102 #define MG_ERR_TIMEOUT 0x100
103 #define MG_ERR_INIT_STAT 0x101
104 #define MG_ERR_TRANSLATION 0x102
105 #define MG_ERR_CTRL_RST 0x103
106 #define MG_ERR_INV_STAT 0x104
107 #define MG_ERR_RSTOUT 0x105
108
109 #define MG_MAX_ERRORS 6 /* Max read/write errors */
110
111 /* command */
112 #define MG_CMD_RD 0x20
113 #define MG_CMD_WR 0x30
114 #define MG_CMD_SLEEP 0x99
115 #define MG_CMD_WAKEUP 0xC3
116 #define MG_CMD_ID 0xEC
117 #define MG_CMD_WR_CONF 0x3C
118 #define MG_CMD_RD_CONF 0x40
119
120 /* operation mode */
121 #define MG_OP_CASCADE (1 << 0)
122 #define MG_OP_CASCADE_SYNC_RD (1 << 1)
123 #define MG_OP_CASCADE_SYNC_WR (1 << 2)
124 #define MG_OP_INTERLEAVE (1 << 3)
125
126 /* synchronous */
127 #define MG_BURST_LAT_4 (3 << 4)
128 #define MG_BURST_LAT_5 (4 << 4)
129 #define MG_BURST_LAT_6 (5 << 4)
130 #define MG_BURST_LAT_7 (6 << 4)
131 #define MG_BURST_LAT_8 (7 << 4)
132 #define MG_BURST_LEN_4 (1 << 1)
133 #define MG_BURST_LEN_8 (2 << 1)
134 #define MG_BURST_LEN_16 (3 << 1)
135 #define MG_BURST_LEN_32 (4 << 1)
136 #define MG_BURST_LEN_CONT (0 << 1)
137
138 /* timeout value (unit: ms) */
139 #define MG_TMAX_CONF_TO_CMD 1
140 #define MG_TMAX_WAIT_RD_DRQ 10
141 #define MG_TMAX_WAIT_WR_DRQ 500
142 #define MG_TMAX_RST_TO_BUSY 10
143 #define MG_TMAX_HDRST_TO_RDY 500
144 #define MG_TMAX_SWRST_TO_RDY 500
145 #define MG_TMAX_RSTOUT 3000
146
147 /* device attribution */
148 /* use mflash as boot device */
149 #define MG_BOOT_DEV (1 << 0)
150 /* use mflash as storage device */
151 #define MG_STORAGE_DEV (1 << 1)
152 /* same as MG_STORAGE_DEV, but bootloader already done reset sequence */
153 #define MG_STORAGE_DEV_SKIP_RST (1 << 2)
154
155 #define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST)
156
157 /* names of GPIO resource */
158 #define MG_RST_PIN "mg_rst"
159 /* except MG_BOOT_DEV, reset-out pin should be assigned */
160 #define MG_RSTOUT_PIN "mg_rstout"
161
162 /* private driver data */
163 struct mg_drv_data {
164 /* disk resource */
165 u32 use_polling;
166
167 /* device attribution */
168 u32 dev_attr;
169
170 /* internally used */
171 struct mg_host *host;
172 };
173
174 /* main structure for mflash driver */
175 struct mg_host {
176 struct device *dev;
177
178 struct request_queue *breq;
179 spinlock_t lock;
180 struct gendisk *gd;
181
182 struct timer_list timer;
183 void (*mg_do_intr) (struct mg_host *);
184
185 u16 id[ATA_ID_WORDS];
186
187 u16 cyls;
188 u16 heads;
189 u16 sectors;
190 u32 n_sectors;
191 u32 nres_sectors;
192
193 void __iomem *dev_base;
194 unsigned int irq;
195 unsigned int rst;
196 unsigned int rstout;
197
198 u32 major;
199 u32 error;
200 };
201
202 /*
203 * Debugging macro and defines
204 */
205 #undef DO_MG_DEBUG
206 #ifdef DO_MG_DEBUG
207 # define MG_DBG(fmt, args...) \
208 printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args)
209 #else /* CONFIG_MG_DEBUG */
210 # define MG_DBG(fmt, args...) do { } while (0)
211 #endif /* CONFIG_MG_DEBUG */
212
213 static void mg_request(struct request_queue *);
214
215 static void mg_dump_status(const char *msg, unsigned int stat,
216 struct mg_host *host)
217 {
218 char *name = MG_DISK_NAME;
219 struct request *req;
220
221 if (host->breq) {
222 req = elv_next_request(host->breq);
223 if (req)
224 name = req->rq_disk->disk_name;
225 }
226
227 printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
228 if (stat & MG_REG_STATUS_BIT_BUSY)
229 printk("Busy ");
230 if (stat & MG_REG_STATUS_BIT_READY)
231 printk("DriveReady ");
232 if (stat & MG_REG_STATUS_BIT_WRITE_FAULT)
233 printk("WriteFault ");
234 if (stat & MG_REG_STATUS_BIT_SEEK_DONE)
235 printk("SeekComplete ");
236 if (stat & MG_REG_STATUS_BIT_DATA_REQ)
237 printk("DataRequest ");
238 if (stat & MG_REG_STATUS_BIT_CORRECTED_ERROR)
239 printk("CorrectedError ");
240 if (stat & MG_REG_STATUS_BIT_ERROR)
241 printk("Error ");
242 printk("}\n");
243 if ((stat & MG_REG_STATUS_BIT_ERROR) == 0) {
244 host->error = 0;
245 } else {
246 host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR);
247 printk(KERN_ERR "%s: %s: error=0x%02x { ", name, msg,
248 host->error & 0xff);
249 if (host->error & MG_REG_ERR_BBK)
250 printk("BadSector ");
251 if (host->error & MG_REG_ERR_UNC)
252 printk("UncorrectableError ");
253 if (host->error & MG_REG_ERR_IDNF)
254 printk("SectorIdNotFound ");
255 if (host->error & MG_REG_ERR_ABRT)
256 printk("DriveStatusError ");
257 if (host->error & MG_REG_ERR_AMNF)
258 printk("AddrMarkNotFound ");
259 printk("}");
260 if (host->error &
261 (MG_REG_ERR_BBK | MG_REG_ERR_UNC |
262 MG_REG_ERR_IDNF | MG_REG_ERR_AMNF)) {
263 if (host->breq) {
264 req = elv_next_request(host->breq);
265 if (req)
266 printk(", sector=%u", (u32)req->sector);
267 }
268
269 }
270 printk("\n");
271 }
272 }
273
274 static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
275 {
276 u8 status;
277 unsigned long expire, cur_jiffies;
278 struct mg_drv_data *prv_data = host->dev->platform_data;
279
280 host->error = MG_ERR_NONE;
281 expire = jiffies + msecs_to_jiffies(msec);
282
283 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
284
285 do {
286 cur_jiffies = jiffies;
287 if (status & MG_REG_STATUS_BIT_BUSY) {
288 if (expect == MG_REG_STATUS_BIT_BUSY)
289 break;
290 } else {
291 /* Check the error condition! */
292 if (status & MG_REG_STATUS_BIT_ERROR) {
293 mg_dump_status("mg_wait", status, host);
294 break;
295 }
296
297 if (expect == MG_STAT_READY)
298 if (MG_READY_OK(status))
299 break;
300
301 if (expect == MG_REG_STATUS_BIT_DATA_REQ)
302 if (status & MG_REG_STATUS_BIT_DATA_REQ)
303 break;
304 }
305 if (!msec) {
306 mg_dump_status("not ready", status, host);
307 return MG_ERR_INV_STAT;
308 }
309 if (prv_data->use_polling)
310 msleep(1);
311
312 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
313 } while (time_before(cur_jiffies, expire));
314
315 if (time_after_eq(cur_jiffies, expire) && msec)
316 host->error = MG_ERR_TIMEOUT;
317
318 return host->error;
319 }
320
321 static unsigned int mg_wait_rstout(u32 rstout, u32 msec)
322 {
323 unsigned long expire;
324
325 expire = jiffies + msecs_to_jiffies(msec);
326 while (time_before(jiffies, expire)) {
327 if (gpio_get_value(rstout) == 1)
328 return MG_ERR_NONE;
329 msleep(10);
330 }
331
332 return MG_ERR_RSTOUT;
333 }
334
335 static void mg_unexpected_intr(struct mg_host *host)
336 {
337 u32 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
338
339 mg_dump_status("mg_unexpected_intr", status, host);
340 }
341
342 static irqreturn_t mg_irq(int irq, void *dev_id)
343 {
344 struct mg_host *host = dev_id;
345 void (*handler)(struct mg_host *) = host->mg_do_intr;
346
347 spin_lock(&host->lock);
348
349 host->mg_do_intr = NULL;
350 del_timer(&host->timer);
351 if (!handler)
352 handler = mg_unexpected_intr;
353 handler(host);
354
355 spin_unlock(&host->lock);
356
357 return IRQ_HANDLED;
358 }
359
360 /* local copy of ata_id_string() */
361 static void mg_id_string(const u16 *id, unsigned char *s,
362 unsigned int ofs, unsigned int len)
363 {
364 unsigned int c;
365
366 BUG_ON(len & 1);
367
368 while (len > 0) {
369 c = id[ofs] >> 8;
370 *s = c;
371 s++;
372
373 c = id[ofs] & 0xff;
374 *s = c;
375 s++;
376
377 ofs++;
378 len -= 2;
379 }
380 }
381
382 /* local copy of ata_id_c_string() */
383 static void mg_id_c_string(const u16 *id, unsigned char *s,
384 unsigned int ofs, unsigned int len)
385 {
386 unsigned char *p;
387
388 mg_id_string(id, s, ofs, len - 1);
389
390 p = s + strnlen(s, len - 1);
391 while (p > s && p[-1] == ' ')
392 p--;
393 *p = '\0';
394 }
395
396 static int mg_get_disk_id(struct mg_host *host)
397 {
398 u32 i;
399 s32 err;
400 const u16 *id = host->id;
401 struct mg_drv_data *prv_data = host->dev->platform_data;
402 char fwrev[ATA_ID_FW_REV_LEN + 1];
403 char model[ATA_ID_PROD_LEN + 1];
404 char serial[ATA_ID_SERNO_LEN + 1];
405
406 if (!prv_data->use_polling)
407 outb(MG_REG_CTRL_INTR_DISABLE,
408 (unsigned long)host->dev_base +
409 MG_REG_DRV_CTRL);
410
411 outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND);
412 err = mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_RD_DRQ);
413 if (err)
414 return err;
415
416 for (i = 0; i < (MG_SECTOR_SIZE >> 1); i++)
417 host->id[i] = le16_to_cpu(inw((unsigned long)host->dev_base +
418 MG_BUFF_OFFSET + i * 2));
419
420 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
421 err = mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD);
422 if (err)
423 return err;
424
425 if ((id[ATA_ID_FIELD_VALID] & 1) == 0)
426 return MG_ERR_TRANSLATION;
427
428 host->n_sectors = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
429 host->cyls = id[ATA_ID_CYLS];
430 host->heads = id[ATA_ID_HEADS];
431 host->sectors = id[ATA_ID_SECTORS];
432
433 if (MG_RES_SEC && host->heads && host->sectors) {
434 /* modify cyls, n_sectors */
435 host->cyls = (host->n_sectors - MG_RES_SEC) /
436 host->heads / host->sectors;
437 host->nres_sectors = host->n_sectors - host->cyls *
438 host->heads * host->sectors;
439 host->n_sectors -= host->nres_sectors;
440 }
441
442 mg_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev));
443 mg_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
444 mg_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial));
445 printk(KERN_INFO "mg_disk: model: %s\n", model);
446 printk(KERN_INFO "mg_disk: firm: %.8s\n", fwrev);
447 printk(KERN_INFO "mg_disk: serial: %s\n", serial);
448 printk(KERN_INFO "mg_disk: %d + reserved %d sectors\n",
449 host->n_sectors, host->nres_sectors);
450
451 if (!prv_data->use_polling)
452 outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
453 MG_REG_DRV_CTRL);
454
455 return err;
456 }
457
458
459 static int mg_disk_init(struct mg_host *host)
460 {
461 struct mg_drv_data *prv_data = host->dev->platform_data;
462 s32 err;
463 u8 init_status;
464
465 /* hdd rst low */
466 gpio_set_value(host->rst, 0);
467 err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY);
468 if (err)
469 return err;
470
471 /* hdd rst high */
472 gpio_set_value(host->rst, 1);
473 err = mg_wait(host, MG_STAT_READY, MG_TMAX_HDRST_TO_RDY);
474 if (err)
475 return err;
476
477 /* soft reset on */
478 outb(MG_REG_CTRL_RESET |
479 (prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE :
480 MG_REG_CTRL_INTR_ENABLE),
481 (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
482 err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY);
483 if (err)
484 return err;
485
486 /* soft reset off */
487 outb(prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE :
488 MG_REG_CTRL_INTR_ENABLE,
489 (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
490 err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY);
491 if (err)
492 return err;
493
494 init_status = inb((unsigned long)host->dev_base + MG_REG_STATUS) & 0xf;
495
496 if (init_status == 0xf)
497 return MG_ERR_INIT_STAT;
498
499 return err;
500 }
501
502 static void mg_bad_rw_intr(struct mg_host *host)
503 {
504 struct request *req = elv_next_request(host->breq);
505 if (req != NULL)
506 if (++req->errors >= MG_MAX_ERRORS ||
507 host->error == MG_ERR_TIMEOUT)
508 __blk_end_request_cur(req, -EIO);
509 }
510
511 static unsigned int mg_out(struct mg_host *host,
512 unsigned int sect_num,
513 unsigned int sect_cnt,
514 unsigned int cmd,
515 void (*intr_addr)(struct mg_host *))
516 {
517 struct mg_drv_data *prv_data = host->dev->platform_data;
518
519 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
520 return host->error;
521
522 if (!prv_data->use_polling) {
523 host->mg_do_intr = intr_addr;
524 mod_timer(&host->timer, jiffies + 3 * HZ);
525 }
526 if (MG_RES_SEC)
527 sect_num += MG_RES_SEC;
528 outb((u8)sect_cnt, (unsigned long)host->dev_base + MG_REG_SECT_CNT);
529 outb((u8)sect_num, (unsigned long)host->dev_base + MG_REG_SECT_NUM);
530 outb((u8)(sect_num >> 8), (unsigned long)host->dev_base +
531 MG_REG_CYL_LOW);
532 outb((u8)(sect_num >> 16), (unsigned long)host->dev_base +
533 MG_REG_CYL_HIGH);
534 outb((u8)((sect_num >> 24) | MG_REG_HEAD_LBA_MODE),
535 (unsigned long)host->dev_base + MG_REG_DRV_HEAD);
536 outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND);
537 return MG_ERR_NONE;
538 }
539
540 static void mg_read(struct request *req)
541 {
542 u32 j;
543 struct mg_host *host = req->rq_disk->private_data;
544
545 if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_RD, NULL) !=
546 MG_ERR_NONE)
547 mg_bad_rw_intr(host);
548
549 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
550 req->nr_sectors, req->sector, req->buffer);
551
552 do {
553 u16 *buff = (u16 *)req->buffer;
554
555 if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ,
556 MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
557 mg_bad_rw_intr(host);
558 return;
559 }
560 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
561 *buff++ = inw((unsigned long)host->dev_base +
562 MG_BUFF_OFFSET + (j << 1));
563
564 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base +
565 MG_REG_COMMAND);
566 } while (__blk_end_request(req, 0, MG_SECTOR_SIZE));
567 }
568
569 static void mg_write(struct request *req)
570 {
571 u32 j;
572 struct mg_host *host = req->rq_disk->private_data;
573
574 if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_WR, NULL) !=
575 MG_ERR_NONE) {
576 mg_bad_rw_intr(host);
577 return;
578 }
579
580 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
581 req->nr_sectors, req->sector, req->buffer);
582
583 do {
584 u16 *buff = (u16 *)req->buffer;
585
586 if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ,
587 MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
588 mg_bad_rw_intr(host);
589 return;
590 }
591 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
592 outw(*buff++, (unsigned long)host->dev_base +
593 MG_BUFF_OFFSET + (j << 1));
594
595 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
596 MG_REG_COMMAND);
597 } while (__blk_end_request(req, 0, MG_SECTOR_SIZE));
598 }
599
600 static void mg_read_intr(struct mg_host *host)
601 {
602 u32 i;
603 u16 *buff;
604 struct request *req;
605
606 /* check status */
607 do {
608 i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
609 if (i & MG_REG_STATUS_BIT_BUSY)
610 break;
611 if (!MG_READY_OK(i))
612 break;
613 if (i & MG_REG_STATUS_BIT_DATA_REQ)
614 goto ok_to_read;
615 } while (0);
616 mg_dump_status("mg_read_intr", i, host);
617 mg_bad_rw_intr(host);
618 mg_request(host->breq);
619 return;
620
621 ok_to_read:
622 /* get current segment of request */
623 req = elv_next_request(host->breq);
624 buff = (u16 *)req->buffer;
625
626 /* read 1 sector */
627 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
628 *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
629 (i << 1));
630
631 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
632 req->sector, req->nr_sectors - 1, req->buffer);
633
634 /* send read confirm */
635 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
636
637 if (__blk_end_request(req, 0, MG_SECTOR_SIZE)) {
638 /* set handler if read remains */
639 host->mg_do_intr = mg_read_intr;
640 mod_timer(&host->timer, jiffies + 3 * HZ);
641 } else /* goto next request */
642 mg_request(host->breq);
643 }
644
645 static void mg_write_intr(struct mg_host *host)
646 {
647 u32 i, j;
648 u16 *buff;
649 struct request *req;
650 bool rem;
651
652 /* get current segment of request */
653 req = elv_next_request(host->breq);
654
655 /* check status */
656 do {
657 i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
658 if (i & MG_REG_STATUS_BIT_BUSY)
659 break;
660 if (!MG_READY_OK(i))
661 break;
662 if ((req->nr_sectors <= 1) || (i & MG_REG_STATUS_BIT_DATA_REQ))
663 goto ok_to_write;
664 } while (0);
665 mg_dump_status("mg_write_intr", i, host);
666 mg_bad_rw_intr(host);
667 mg_request(host->breq);
668 return;
669
670 ok_to_write:
671 if ((rem = __blk_end_request(req, 0, MG_SECTOR_SIZE))) {
672 /* write 1 sector and set handler if remains */
673 buff = (u16 *)req->buffer;
674 for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) {
675 outw(*buff, (unsigned long)host->dev_base +
676 MG_BUFF_OFFSET + (j << 1));
677 buff++;
678 }
679 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
680 req->sector, req->nr_sectors, req->buffer);
681 host->mg_do_intr = mg_write_intr;
682 mod_timer(&host->timer, jiffies + 3 * HZ);
683 }
684
685 /* send write confirm */
686 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
687
688 if (!rem)
689 mg_request(host->breq);
690 }
691
692 void mg_times_out(unsigned long data)
693 {
694 struct mg_host *host = (struct mg_host *)data;
695 char *name;
696 struct request *req;
697
698 spin_lock_irq(&host->lock);
699
700 req = elv_next_request(host->breq);
701 if (!req)
702 goto out_unlock;
703
704 host->mg_do_intr = NULL;
705
706 name = req->rq_disk->disk_name;
707 printk(KERN_DEBUG "%s: timeout\n", name);
708
709 host->error = MG_ERR_TIMEOUT;
710 mg_bad_rw_intr(host);
711
712 mg_request(host->breq);
713 out_unlock:
714 spin_unlock_irq(&host->lock);
715 }
716
717 static void mg_request_poll(struct request_queue *q)
718 {
719 struct request *req;
720 struct mg_host *host;
721
722 while ((req = elv_next_request(q)) != NULL) {
723 host = req->rq_disk->private_data;
724 if (blk_fs_request(req)) {
725 switch (rq_data_dir(req)) {
726 case READ:
727 mg_read(req);
728 break;
729 case WRITE:
730 mg_write(req);
731 break;
732 }
733 }
734 }
735 }
736
737 static unsigned int mg_issue_req(struct request *req,
738 struct mg_host *host,
739 unsigned int sect_num,
740 unsigned int sect_cnt)
741 {
742 u16 *buff;
743 u32 i;
744
745 switch (rq_data_dir(req)) {
746 case READ:
747 if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr)
748 != MG_ERR_NONE) {
749 mg_bad_rw_intr(host);
750 return host->error;
751 }
752 break;
753 case WRITE:
754 /* TODO : handler */
755 outb(MG_REG_CTRL_INTR_DISABLE,
756 (unsigned long)host->dev_base +
757 MG_REG_DRV_CTRL);
758 if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr)
759 != MG_ERR_NONE) {
760 mg_bad_rw_intr(host);
761 return host->error;
762 }
763 del_timer(&host->timer);
764 mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_WR_DRQ);
765 outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
766 MG_REG_DRV_CTRL);
767 if (host->error) {
768 mg_bad_rw_intr(host);
769 return host->error;
770 }
771 buff = (u16 *)req->buffer;
772 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) {
773 outw(*buff, (unsigned long)host->dev_base +
774 MG_BUFF_OFFSET + (i << 1));
775 buff++;
776 }
777 mod_timer(&host->timer, jiffies + 3 * HZ);
778 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
779 MG_REG_COMMAND);
780 break;
781 }
782 return MG_ERR_NONE;
783 }
784
785 /* This function also called from IRQ context */
786 static void mg_request(struct request_queue *q)
787 {
788 struct request *req;
789 struct mg_host *host;
790 u32 sect_num, sect_cnt;
791
792 while (1) {
793 req = elv_next_request(q);
794 if (!req)
795 return;
796
797 host = req->rq_disk->private_data;
798
799 /* check unwanted request call */
800 if (host->mg_do_intr)
801 return;
802
803 del_timer(&host->timer);
804
805 sect_num = req->sector;
806 /* deal whole segments */
807 sect_cnt = req->nr_sectors;
808
809 /* sanity check */
810 if (sect_num >= get_capacity(req->rq_disk) ||
811 ((sect_num + sect_cnt) >
812 get_capacity(req->rq_disk))) {
813 printk(KERN_WARNING
814 "%s: bad access: sector=%d, count=%d\n",
815 req->rq_disk->disk_name,
816 sect_num, sect_cnt);
817 __blk_end_request_cur(req, -EIO);
818 continue;
819 }
820
821 if (!blk_fs_request(req))
822 return;
823
824 if (!mg_issue_req(req, host, sect_num, sect_cnt))
825 return;
826 }
827 }
828
829 static int mg_getgeo(struct block_device *bdev, struct hd_geometry *geo)
830 {
831 struct mg_host *host = bdev->bd_disk->private_data;
832
833 geo->cylinders = (unsigned short)host->cyls;
834 geo->heads = (unsigned char)host->heads;
835 geo->sectors = (unsigned char)host->sectors;
836 return 0;
837 }
838
839 static struct block_device_operations mg_disk_ops = {
840 .getgeo = mg_getgeo
841 };
842
843 static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
844 {
845 struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
846 struct mg_host *host = prv_data->host;
847
848 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
849 return -EIO;
850
851 if (!prv_data->use_polling)
852 outb(MG_REG_CTRL_INTR_DISABLE,
853 (unsigned long)host->dev_base +
854 MG_REG_DRV_CTRL);
855
856 outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND);
857 /* wait until mflash deep sleep */
858 msleep(1);
859
860 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) {
861 if (!prv_data->use_polling)
862 outb(MG_REG_CTRL_INTR_ENABLE,
863 (unsigned long)host->dev_base +
864 MG_REG_DRV_CTRL);
865 return -EIO;
866 }
867
868 return 0;
869 }
870
871 static int mg_resume(struct platform_device *plat_dev)
872 {
873 struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
874 struct mg_host *host = prv_data->host;
875
876 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
877 return -EIO;
878
879 outb(MG_CMD_WAKEUP, (unsigned long)host->dev_base + MG_REG_COMMAND);
880 /* wait until mflash wakeup */
881 msleep(1);
882
883 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
884 return -EIO;
885
886 if (!prv_data->use_polling)
887 outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
888 MG_REG_DRV_CTRL);
889
890 return 0;
891 }
892
893 static int mg_probe(struct platform_device *plat_dev)
894 {
895 struct mg_host *host;
896 struct resource *rsc;
897 struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
898 int err = 0;
899
900 if (!prv_data) {
901 printk(KERN_ERR "%s:%d fail (no driver_data)\n",
902 __func__, __LINE__);
903 err = -EINVAL;
904 goto probe_err;
905 }
906
907 /* alloc mg_host */
908 host = kzalloc(sizeof(struct mg_host), GFP_KERNEL);
909 if (!host) {
910 printk(KERN_ERR "%s:%d fail (no memory for mg_host)\n",
911 __func__, __LINE__);
912 err = -ENOMEM;
913 goto probe_err;
914 }
915 host->major = MG_DISK_MAJ;
916
917 /* link each other */
918 prv_data->host = host;
919 host->dev = &plat_dev->dev;
920
921 /* io remap */
922 rsc = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
923 if (!rsc) {
924 printk(KERN_ERR "%s:%d platform_get_resource fail\n",
925 __func__, __LINE__);
926 err = -EINVAL;
927 goto probe_err_2;
928 }
929 host->dev_base = ioremap(rsc->start , rsc->end + 1);
930 if (!host->dev_base) {
931 printk(KERN_ERR "%s:%d ioremap fail\n",
932 __func__, __LINE__);
933 err = -EIO;
934 goto probe_err_2;
935 }
936 MG_DBG("dev_base = 0x%x\n", (u32)host->dev_base);
937
938 /* get reset pin */
939 rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO,
940 MG_RST_PIN);
941 if (!rsc) {
942 printk(KERN_ERR "%s:%d get reset pin fail\n",
943 __func__, __LINE__);
944 err = -EIO;
945 goto probe_err_3;
946 }
947 host->rst = rsc->start;
948
949 /* init rst pin */
950 err = gpio_request(host->rst, MG_RST_PIN);
951 if (err)
952 goto probe_err_3;
953 gpio_direction_output(host->rst, 1);
954
955 /* reset out pin */
956 if (!(prv_data->dev_attr & MG_DEV_MASK))
957 goto probe_err_3a;
958
959 if (prv_data->dev_attr != MG_BOOT_DEV) {
960 rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO,
961 MG_RSTOUT_PIN);
962 if (!rsc) {
963 printk(KERN_ERR "%s:%d get reset-out pin fail\n",
964 __func__, __LINE__);
965 err = -EIO;
966 goto probe_err_3a;
967 }
968 host->rstout = rsc->start;
969 err = gpio_request(host->rstout, MG_RSTOUT_PIN);
970 if (err)
971 goto probe_err_3a;
972 gpio_direction_input(host->rstout);
973 }
974
975 /* disk reset */
976 if (prv_data->dev_attr == MG_STORAGE_DEV) {
977 /* If POR seq. not yet finised, wait */
978 err = mg_wait_rstout(host->rstout, MG_TMAX_RSTOUT);
979 if (err)
980 goto probe_err_3b;
981 err = mg_disk_init(host);
982 if (err) {
983 printk(KERN_ERR "%s:%d fail (err code : %d)\n",
984 __func__, __LINE__, err);
985 err = -EIO;
986 goto probe_err_3b;
987 }
988 }
989
990 /* get irq resource */
991 if (!prv_data->use_polling) {
992 host->irq = platform_get_irq(plat_dev, 0);
993 if (host->irq == -ENXIO) {
994 err = host->irq;
995 goto probe_err_3b;
996 }
997 err = request_irq(host->irq, mg_irq,
998 IRQF_DISABLED | IRQF_TRIGGER_RISING,
999 MG_DEV_NAME, host);
1000 if (err) {
1001 printk(KERN_ERR "%s:%d fail (request_irq err=%d)\n",
1002 __func__, __LINE__, err);
1003 goto probe_err_3b;
1004 }
1005
1006 }
1007
1008 /* get disk id */
1009 err = mg_get_disk_id(host);
1010 if (err) {
1011 printk(KERN_ERR "%s:%d fail (err code : %d)\n",
1012 __func__, __LINE__, err);
1013 err = -EIO;
1014 goto probe_err_4;
1015 }
1016
1017 err = register_blkdev(host->major, MG_DISK_NAME);
1018 if (err < 0) {
1019 printk(KERN_ERR "%s:%d register_blkdev fail (err code : %d)\n",
1020 __func__, __LINE__, err);
1021 goto probe_err_4;
1022 }
1023 if (!host->major)
1024 host->major = err;
1025
1026 spin_lock_init(&host->lock);
1027
1028 if (prv_data->use_polling)
1029 host->breq = blk_init_queue(mg_request_poll, &host->lock);
1030 else
1031 host->breq = blk_init_queue(mg_request, &host->lock);
1032
1033 if (!host->breq) {
1034 err = -ENOMEM;
1035 printk(KERN_ERR "%s:%d (blk_init_queue) fail\n",
1036 __func__, __LINE__);
1037 goto probe_err_5;
1038 }
1039
1040 /* mflash is random device, thanx for the noop */
1041 elevator_exit(host->breq->elevator);
1042 err = elevator_init(host->breq, "noop");
1043 if (err) {
1044 printk(KERN_ERR "%s:%d (elevator_init) fail\n",
1045 __func__, __LINE__);
1046 goto probe_err_6;
1047 }
1048 blk_queue_max_sectors(host->breq, MG_MAX_SECTS);
1049 blk_queue_hardsect_size(host->breq, MG_SECTOR_SIZE);
1050
1051 init_timer(&host->timer);
1052 host->timer.function = mg_times_out;
1053 host->timer.data = (unsigned long)host;
1054
1055 host->gd = alloc_disk(MG_DISK_MAX_PART);
1056 if (!host->gd) {
1057 printk(KERN_ERR "%s:%d (alloc_disk) fail\n",
1058 __func__, __LINE__);
1059 err = -ENOMEM;
1060 goto probe_err_7;
1061 }
1062 host->gd->major = host->major;
1063 host->gd->first_minor = 0;
1064 host->gd->fops = &mg_disk_ops;
1065 host->gd->queue = host->breq;
1066 host->gd->private_data = host;
1067 sprintf(host->gd->disk_name, MG_DISK_NAME"a");
1068
1069 set_capacity(host->gd, host->n_sectors);
1070
1071 add_disk(host->gd);
1072
1073 return err;
1074
1075 probe_err_7:
1076 del_timer_sync(&host->timer);
1077 probe_err_6:
1078 blk_cleanup_queue(host->breq);
1079 probe_err_5:
1080 unregister_blkdev(MG_DISK_MAJ, MG_DISK_NAME);
1081 probe_err_4:
1082 if (!prv_data->use_polling)
1083 free_irq(host->irq, host);
1084 probe_err_3b:
1085 gpio_free(host->rstout);
1086 probe_err_3a:
1087 gpio_free(host->rst);
1088 probe_err_3:
1089 iounmap(host->dev_base);
1090 probe_err_2:
1091 kfree(host);
1092 probe_err:
1093 return err;
1094 }
1095
1096 static int mg_remove(struct platform_device *plat_dev)
1097 {
1098 struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
1099 struct mg_host *host = prv_data->host;
1100 int err = 0;
1101
1102 /* delete timer */
1103 del_timer_sync(&host->timer);
1104
1105 /* remove disk */
1106 if (host->gd) {
1107 del_gendisk(host->gd);
1108 put_disk(host->gd);
1109 }
1110 /* remove queue */
1111 if (host->breq)
1112 blk_cleanup_queue(host->breq);
1113
1114 /* unregister blk device */
1115 unregister_blkdev(host->major, MG_DISK_NAME);
1116
1117 /* free irq */
1118 if (!prv_data->use_polling)
1119 free_irq(host->irq, host);
1120
1121 /* free reset-out pin */
1122 if (prv_data->dev_attr != MG_BOOT_DEV)
1123 gpio_free(host->rstout);
1124
1125 /* free rst pin */
1126 if (host->rst)
1127 gpio_free(host->rst);
1128
1129 /* unmap io */
1130 if (host->dev_base)
1131 iounmap(host->dev_base);
1132
1133 /* free mg_host */
1134 kfree(host);
1135
1136 return err;
1137 }
1138
1139 static struct platform_driver mg_disk_driver = {
1140 .probe = mg_probe,
1141 .remove = mg_remove,
1142 .suspend = mg_suspend,
1143 .resume = mg_resume,
1144 .driver = {
1145 .name = MG_DEV_NAME,
1146 .owner = THIS_MODULE,
1147 }
1148 };
1149
1150 /****************************************************************************
1151 *
1152 * Module stuff
1153 *
1154 ****************************************************************************/
1155
1156 static int __init mg_init(void)
1157 {
1158 printk(KERN_INFO "mGine mflash driver, (c) 2008 mGine Co.\n");
1159 return platform_driver_register(&mg_disk_driver);
1160 }
1161
1162 static void __exit mg_exit(void)
1163 {
1164 printk(KERN_INFO "mflash driver : bye bye\n");
1165 platform_driver_unregister(&mg_disk_driver);
1166 }
1167
1168 module_init(mg_init);
1169 module_exit(mg_exit);
1170
1171 MODULE_LICENSE("GPL");
1172 MODULE_AUTHOR("unsik Kim <donari75@gmail.com>");
1173 MODULE_DESCRIPTION("mGine m[g]flash device driver");
This page took 0.095858 seconds and 5 git commands to generate.