ahci: Add Marvell 6121 SATA support
[deliverable/linux.git] / drivers / ata / ahci.c
1 /*
2 * ahci.c - AHCI SATA support
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004-2005 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32 *
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/device.h>
44 #include <linux/dmi.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_cmnd.h>
47 #include <linux/libata.h>
48
49 #define DRV_NAME "ahci"
50 #define DRV_VERSION "3.0"
51
52 static int ahci_skip_host_reset;
53 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
54 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
55
56 static int ahci_enable_alpm(struct ata_port *ap,
57 enum link_pm policy);
58 static void ahci_disable_alpm(struct ata_port *ap);
59
60 enum {
61 AHCI_PCI_BAR = 5,
62 AHCI_MAX_PORTS = 32,
63 AHCI_MAX_SG = 168, /* hardware max is 64K */
64 AHCI_DMA_BOUNDARY = 0xffffffff,
65 AHCI_USE_CLUSTERING = 1,
66 AHCI_MAX_CMDS = 32,
67 AHCI_CMD_SZ = 32,
68 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
69 AHCI_RX_FIS_SZ = 256,
70 AHCI_CMD_TBL_CDB = 0x40,
71 AHCI_CMD_TBL_HDR_SZ = 0x80,
72 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
73 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
74 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
75 AHCI_RX_FIS_SZ,
76 AHCI_IRQ_ON_SG = (1 << 31),
77 AHCI_CMD_ATAPI = (1 << 5),
78 AHCI_CMD_WRITE = (1 << 6),
79 AHCI_CMD_PREFETCH = (1 << 7),
80 AHCI_CMD_RESET = (1 << 8),
81 AHCI_CMD_CLR_BUSY = (1 << 10),
82
83 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
84 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
85 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
86
87 board_ahci = 0,
88 board_ahci_vt8251 = 1,
89 board_ahci_ign_iferr = 2,
90 board_ahci_sb600 = 3,
91 board_ahci_mv = 4,
92 board_ahci_sb700 = 5,
93
94 /* global controller registers */
95 HOST_CAP = 0x00, /* host capabilities */
96 HOST_CTL = 0x04, /* global host control */
97 HOST_IRQ_STAT = 0x08, /* interrupt status */
98 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
99 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
100
101 /* HOST_CTL bits */
102 HOST_RESET = (1 << 0), /* reset controller; self-clear */
103 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
104 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
105
106 /* HOST_CAP bits */
107 HOST_CAP_SSC = (1 << 14), /* Slumber capable */
108 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
109 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
110 HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
111 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
112 HOST_CAP_SNTF = (1 << 29), /* SNotification register */
113 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
114 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
115
116 /* registers for each SATA port */
117 PORT_LST_ADDR = 0x00, /* command list DMA addr */
118 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
119 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
120 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
121 PORT_IRQ_STAT = 0x10, /* interrupt status */
122 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
123 PORT_CMD = 0x18, /* port command */
124 PORT_TFDATA = 0x20, /* taskfile data */
125 PORT_SIG = 0x24, /* device TF signature */
126 PORT_CMD_ISSUE = 0x38, /* command issue */
127 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
128 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
129 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
130 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
131 PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
132
133 /* PORT_IRQ_{STAT,MASK} bits */
134 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
135 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
136 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
137 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
138 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
139 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
140 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
141 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
142
143 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
144 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
145 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
146 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
147 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
148 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
149 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
150 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
151 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
152
153 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
154 PORT_IRQ_IF_ERR |
155 PORT_IRQ_CONNECT |
156 PORT_IRQ_PHYRDY |
157 PORT_IRQ_UNK_FIS |
158 PORT_IRQ_BAD_PMP,
159 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
160 PORT_IRQ_TF_ERR |
161 PORT_IRQ_HBUS_DATA_ERR,
162 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
163 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
164 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
165
166 /* PORT_CMD bits */
167 PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
168 PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
169 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
170 PORT_CMD_PMP = (1 << 17), /* PMP attached */
171 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
172 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
173 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
174 PORT_CMD_CLO = (1 << 3), /* Command list override */
175 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
176 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
177 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
178
179 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
180 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
181 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
182 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
183
184 /* hpriv->flags bits */
185 AHCI_HFLAG_NO_NCQ = (1 << 0),
186 AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
187 AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
188 AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
189 AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
190 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
191 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
192 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
193 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
194
195 /* ap->flags bits */
196
197 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
198 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
199 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
200 ATA_FLAG_IPM,
201 AHCI_LFLAG_COMMON = ATA_LFLAG_SKIP_D2H_BSY,
202
203 ICH_MAP = 0x90, /* ICH MAP register */
204 };
205
206 struct ahci_cmd_hdr {
207 __le32 opts;
208 __le32 status;
209 __le32 tbl_addr;
210 __le32 tbl_addr_hi;
211 __le32 reserved[4];
212 };
213
214 struct ahci_sg {
215 __le32 addr;
216 __le32 addr_hi;
217 __le32 reserved;
218 __le32 flags_size;
219 };
220
221 struct ahci_host_priv {
222 unsigned int flags; /* AHCI_HFLAG_* */
223 u32 cap; /* cap to use */
224 u32 port_map; /* port map to use */
225 u32 saved_cap; /* saved initial cap */
226 u32 saved_port_map; /* saved initial port_map */
227 };
228
229 struct ahci_port_priv {
230 struct ata_link *active_link;
231 struct ahci_cmd_hdr *cmd_slot;
232 dma_addr_t cmd_slot_dma;
233 void *cmd_tbl;
234 dma_addr_t cmd_tbl_dma;
235 void *rx_fis;
236 dma_addr_t rx_fis_dma;
237 /* for NCQ spurious interrupt analysis */
238 unsigned int ncq_saw_d2h:1;
239 unsigned int ncq_saw_dmas:1;
240 unsigned int ncq_saw_sdb:1;
241 u32 intr_mask; /* interrupts to enable */
242 };
243
244 static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
245 static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
246 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
247 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
248 static void ahci_irq_clear(struct ata_port *ap);
249 static int ahci_port_start(struct ata_port *ap);
250 static void ahci_port_stop(struct ata_port *ap);
251 static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
252 static void ahci_qc_prep(struct ata_queued_cmd *qc);
253 static u8 ahci_check_status(struct ata_port *ap);
254 static void ahci_freeze(struct ata_port *ap);
255 static void ahci_thaw(struct ata_port *ap);
256 static void ahci_pmp_attach(struct ata_port *ap);
257 static void ahci_pmp_detach(struct ata_port *ap);
258 static void ahci_error_handler(struct ata_port *ap);
259 static void ahci_vt8251_error_handler(struct ata_port *ap);
260 static void ahci_p5wdh_error_handler(struct ata_port *ap);
261 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
262 static int ahci_port_resume(struct ata_port *ap);
263 static void ahci_dev_config(struct ata_device *dev);
264 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl);
265 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
266 u32 opts);
267 #ifdef CONFIG_PM
268 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
269 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
270 static int ahci_pci_device_resume(struct pci_dev *pdev);
271 #endif
272
273 static struct class_device_attribute *ahci_shost_attrs[] = {
274 &class_device_attr_link_power_management_policy,
275 NULL
276 };
277
278 static struct scsi_host_template ahci_sht = {
279 .module = THIS_MODULE,
280 .name = DRV_NAME,
281 .ioctl = ata_scsi_ioctl,
282 .queuecommand = ata_scsi_queuecmd,
283 .change_queue_depth = ata_scsi_change_queue_depth,
284 .can_queue = AHCI_MAX_CMDS - 1,
285 .this_id = ATA_SHT_THIS_ID,
286 .sg_tablesize = AHCI_MAX_SG,
287 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
288 .emulated = ATA_SHT_EMULATED,
289 .use_clustering = AHCI_USE_CLUSTERING,
290 .proc_name = DRV_NAME,
291 .dma_boundary = AHCI_DMA_BOUNDARY,
292 .slave_configure = ata_scsi_slave_config,
293 .slave_destroy = ata_scsi_slave_destroy,
294 .bios_param = ata_std_bios_param,
295 .shost_attrs = ahci_shost_attrs,
296 };
297
298 static const struct ata_port_operations ahci_ops = {
299 .check_status = ahci_check_status,
300 .check_altstatus = ahci_check_status,
301 .dev_select = ata_noop_dev_select,
302
303 .dev_config = ahci_dev_config,
304
305 .tf_read = ahci_tf_read,
306
307 .qc_defer = sata_pmp_qc_defer_cmd_switch,
308 .qc_prep = ahci_qc_prep,
309 .qc_issue = ahci_qc_issue,
310
311 .irq_clear = ahci_irq_clear,
312
313 .scr_read = ahci_scr_read,
314 .scr_write = ahci_scr_write,
315
316 .freeze = ahci_freeze,
317 .thaw = ahci_thaw,
318
319 .error_handler = ahci_error_handler,
320 .post_internal_cmd = ahci_post_internal_cmd,
321
322 .pmp_attach = ahci_pmp_attach,
323 .pmp_detach = ahci_pmp_detach,
324
325 #ifdef CONFIG_PM
326 .port_suspend = ahci_port_suspend,
327 .port_resume = ahci_port_resume,
328 #endif
329 .enable_pm = ahci_enable_alpm,
330 .disable_pm = ahci_disable_alpm,
331
332 .port_start = ahci_port_start,
333 .port_stop = ahci_port_stop,
334 };
335
336 static const struct ata_port_operations ahci_vt8251_ops = {
337 .check_status = ahci_check_status,
338 .check_altstatus = ahci_check_status,
339 .dev_select = ata_noop_dev_select,
340
341 .tf_read = ahci_tf_read,
342
343 .qc_defer = sata_pmp_qc_defer_cmd_switch,
344 .qc_prep = ahci_qc_prep,
345 .qc_issue = ahci_qc_issue,
346
347 .irq_clear = ahci_irq_clear,
348
349 .scr_read = ahci_scr_read,
350 .scr_write = ahci_scr_write,
351
352 .freeze = ahci_freeze,
353 .thaw = ahci_thaw,
354
355 .error_handler = ahci_vt8251_error_handler,
356 .post_internal_cmd = ahci_post_internal_cmd,
357
358 .pmp_attach = ahci_pmp_attach,
359 .pmp_detach = ahci_pmp_detach,
360
361 #ifdef CONFIG_PM
362 .port_suspend = ahci_port_suspend,
363 .port_resume = ahci_port_resume,
364 #endif
365
366 .port_start = ahci_port_start,
367 .port_stop = ahci_port_stop,
368 };
369
370 static const struct ata_port_operations ahci_p5wdh_ops = {
371 .check_status = ahci_check_status,
372 .check_altstatus = ahci_check_status,
373 .dev_select = ata_noop_dev_select,
374
375 .tf_read = ahci_tf_read,
376
377 .qc_defer = sata_pmp_qc_defer_cmd_switch,
378 .qc_prep = ahci_qc_prep,
379 .qc_issue = ahci_qc_issue,
380
381 .irq_clear = ahci_irq_clear,
382
383 .scr_read = ahci_scr_read,
384 .scr_write = ahci_scr_write,
385
386 .freeze = ahci_freeze,
387 .thaw = ahci_thaw,
388
389 .error_handler = ahci_p5wdh_error_handler,
390 .post_internal_cmd = ahci_post_internal_cmd,
391
392 .pmp_attach = ahci_pmp_attach,
393 .pmp_detach = ahci_pmp_detach,
394
395 #ifdef CONFIG_PM
396 .port_suspend = ahci_port_suspend,
397 .port_resume = ahci_port_resume,
398 #endif
399
400 .port_start = ahci_port_start,
401 .port_stop = ahci_port_stop,
402 };
403
404 #define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
405
406 static const struct ata_port_info ahci_port_info[] = {
407 /* board_ahci */
408 {
409 .flags = AHCI_FLAG_COMMON,
410 .link_flags = AHCI_LFLAG_COMMON,
411 .pio_mask = 0x1f, /* pio0-4 */
412 .udma_mask = ATA_UDMA6,
413 .port_ops = &ahci_ops,
414 },
415 /* board_ahci_vt8251 */
416 {
417 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
418 .flags = AHCI_FLAG_COMMON,
419 .link_flags = AHCI_LFLAG_COMMON | ATA_LFLAG_HRST_TO_RESUME,
420 .pio_mask = 0x1f, /* pio0-4 */
421 .udma_mask = ATA_UDMA6,
422 .port_ops = &ahci_vt8251_ops,
423 },
424 /* board_ahci_ign_iferr */
425 {
426 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
427 .flags = AHCI_FLAG_COMMON,
428 .link_flags = AHCI_LFLAG_COMMON,
429 .pio_mask = 0x1f, /* pio0-4 */
430 .udma_mask = ATA_UDMA6,
431 .port_ops = &ahci_ops,
432 },
433 /* board_ahci_sb600 */
434 {
435 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
436 AHCI_HFLAG_SECT255 | AHCI_HFLAG_NO_PMP),
437 .flags = AHCI_FLAG_COMMON,
438 .link_flags = AHCI_LFLAG_COMMON,
439 .pio_mask = 0x1f, /* pio0-4 */
440 .udma_mask = ATA_UDMA6,
441 .port_ops = &ahci_ops,
442 },
443 /* board_ahci_mv */
444 {
445 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
446 AHCI_HFLAG_MV_PATA),
447 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
448 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
449 .link_flags = AHCI_LFLAG_COMMON,
450 .pio_mask = 0x1f, /* pio0-4 */
451 .udma_mask = ATA_UDMA6,
452 .port_ops = &ahci_ops,
453 },
454 /* board_ahci_sb700 */
455 {
456 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
457 AHCI_HFLAG_NO_PMP),
458 .flags = AHCI_FLAG_COMMON,
459 .link_flags = AHCI_LFLAG_COMMON,
460 .pio_mask = 0x1f, /* pio0-4 */
461 .udma_mask = ATA_UDMA6,
462 .port_ops = &ahci_ops,
463 },
464 };
465
466 static const struct pci_device_id ahci_pci_tbl[] = {
467 /* Intel */
468 { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
469 { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
470 { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
471 { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
472 { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
473 { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
474 { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
475 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
476 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
477 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
478 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
479 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
480 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
481 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
482 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
483 { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
484 { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
485 { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
486 { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
487 { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
488 { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
489 { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
490 { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
491 { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
492 { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
493 { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
494 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
495 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
496 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
497 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
498 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
499
500 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
501 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
502 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
503
504 /* ATI */
505 { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
506 { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
507 { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
508 { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
509 { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
510 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
511 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
512
513 /* VIA */
514 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
515 { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
516
517 /* NVIDIA */
518 { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci }, /* MCP65 */
519 { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci }, /* MCP65 */
520 { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci }, /* MCP65 */
521 { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci }, /* MCP65 */
522 { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci }, /* MCP65 */
523 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci }, /* MCP65 */
524 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci }, /* MCP65 */
525 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci }, /* MCP65 */
526 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci }, /* MCP67 */
527 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci }, /* MCP67 */
528 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci }, /* MCP67 */
529 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci }, /* MCP67 */
530 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci }, /* MCP67 */
531 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci }, /* MCP67 */
532 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci }, /* MCP67 */
533 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci }, /* MCP67 */
534 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci }, /* MCP67 */
535 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci }, /* MCP67 */
536 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci }, /* MCP67 */
537 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci }, /* MCP67 */
538 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci }, /* MCP73 */
539 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci }, /* MCP73 */
540 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci }, /* MCP73 */
541 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci }, /* MCP73 */
542 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci }, /* MCP73 */
543 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci }, /* MCP73 */
544 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci }, /* MCP73 */
545 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci }, /* MCP73 */
546 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci }, /* MCP73 */
547 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci }, /* MCP73 */
548 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci }, /* MCP73 */
549 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci }, /* MCP73 */
550 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
551 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
552 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
553 { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */
554 { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */
555 { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */
556 { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */
557 { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */
558 { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */
559 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
560 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
561 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
562 { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
563 { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
564 { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
565 { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
566 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
567 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
568 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
569 { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
570 { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
571 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
572 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
573 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
574 { PCI_VDEVICE(NVIDIA, 0x0bc8), board_ahci }, /* MCP7B */
575 { PCI_VDEVICE(NVIDIA, 0x0bc9), board_ahci }, /* MCP7B */
576 { PCI_VDEVICE(NVIDIA, 0x0bca), board_ahci }, /* MCP7B */
577 { PCI_VDEVICE(NVIDIA, 0x0bcb), board_ahci }, /* MCP7B */
578 { PCI_VDEVICE(NVIDIA, 0x0bcc), board_ahci }, /* MCP7B */
579 { PCI_VDEVICE(NVIDIA, 0x0bcd), board_ahci }, /* MCP7B */
580 { PCI_VDEVICE(NVIDIA, 0x0bce), board_ahci }, /* MCP7B */
581 { PCI_VDEVICE(NVIDIA, 0x0bcf), board_ahci }, /* MCP7B */
582 { PCI_VDEVICE(NVIDIA, 0x0bd0), board_ahci }, /* MCP7B */
583 { PCI_VDEVICE(NVIDIA, 0x0bd1), board_ahci }, /* MCP7B */
584 { PCI_VDEVICE(NVIDIA, 0x0bd2), board_ahci }, /* MCP7B */
585 { PCI_VDEVICE(NVIDIA, 0x0bd3), board_ahci }, /* MCP7B */
586
587 /* SiS */
588 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
589 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 966 */
590 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
591
592 /* Marvell */
593 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
594 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
595
596 /* Generic, PCI class code for AHCI */
597 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
598 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
599
600 { } /* terminate list */
601 };
602
603
604 static struct pci_driver ahci_pci_driver = {
605 .name = DRV_NAME,
606 .id_table = ahci_pci_tbl,
607 .probe = ahci_init_one,
608 .remove = ata_pci_remove_one,
609 #ifdef CONFIG_PM
610 .suspend = ahci_pci_device_suspend,
611 .resume = ahci_pci_device_resume,
612 #endif
613 };
614
615
616 static inline int ahci_nr_ports(u32 cap)
617 {
618 return (cap & 0x1f) + 1;
619 }
620
621 static inline void __iomem *__ahci_port_base(struct ata_host *host,
622 unsigned int port_no)
623 {
624 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
625
626 return mmio + 0x100 + (port_no * 0x80);
627 }
628
629 static inline void __iomem *ahci_port_base(struct ata_port *ap)
630 {
631 return __ahci_port_base(ap->host, ap->port_no);
632 }
633
634 static void ahci_enable_ahci(void __iomem *mmio)
635 {
636 u32 tmp;
637
638 /* turn on AHCI_EN */
639 tmp = readl(mmio + HOST_CTL);
640 if (!(tmp & HOST_AHCI_EN)) {
641 tmp |= HOST_AHCI_EN;
642 writel(tmp, mmio + HOST_CTL);
643 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
644 WARN_ON(!(tmp & HOST_AHCI_EN));
645 }
646 }
647
648 /**
649 * ahci_save_initial_config - Save and fixup initial config values
650 * @pdev: target PCI device
651 * @hpriv: host private area to store config values
652 *
653 * Some registers containing configuration info might be setup by
654 * BIOS and might be cleared on reset. This function saves the
655 * initial values of those registers into @hpriv such that they
656 * can be restored after controller reset.
657 *
658 * If inconsistent, config values are fixed up by this function.
659 *
660 * LOCKING:
661 * None.
662 */
663 static void ahci_save_initial_config(struct pci_dev *pdev,
664 struct ahci_host_priv *hpriv)
665 {
666 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
667 u32 cap, port_map;
668 int i;
669 int mv;
670
671 /* make sure AHCI mode is enabled before accessing CAP */
672 ahci_enable_ahci(mmio);
673
674 /* Values prefixed with saved_ are written back to host after
675 * reset. Values without are used for driver operation.
676 */
677 hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
678 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
679
680 /* some chips have errata preventing 64bit use */
681 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
682 dev_printk(KERN_INFO, &pdev->dev,
683 "controller can't do 64bit DMA, forcing 32bit\n");
684 cap &= ~HOST_CAP_64;
685 }
686
687 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
688 dev_printk(KERN_INFO, &pdev->dev,
689 "controller can't do NCQ, turning off CAP_NCQ\n");
690 cap &= ~HOST_CAP_NCQ;
691 }
692
693 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
694 dev_printk(KERN_INFO, &pdev->dev,
695 "controller can't do PMP, turning off CAP_PMP\n");
696 cap &= ~HOST_CAP_PMP;
697 }
698
699 /*
700 * Temporary Marvell 6145 hack: PATA port presence
701 * is asserted through the standard AHCI port
702 * presence register, as bit 4 (counting from 0)
703 */
704 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
705 if (pdev->device == 0x6121)
706 mv = 0x3;
707 else
708 mv = 0xf;
709 dev_printk(KERN_ERR, &pdev->dev,
710 "MV_AHCI HACK: port_map %x -> %x\n",
711 port_map,
712 port_map & mv);
713
714 port_map &= mv;
715 }
716
717 /* cross check port_map and cap.n_ports */
718 if (port_map) {
719 int map_ports = 0;
720
721 for (i = 0; i < AHCI_MAX_PORTS; i++)
722 if (port_map & (1 << i))
723 map_ports++;
724
725 /* If PI has more ports than n_ports, whine, clear
726 * port_map and let it be generated from n_ports.
727 */
728 if (map_ports > ahci_nr_ports(cap)) {
729 dev_printk(KERN_WARNING, &pdev->dev,
730 "implemented port map (0x%x) contains more "
731 "ports than nr_ports (%u), using nr_ports\n",
732 port_map, ahci_nr_ports(cap));
733 port_map = 0;
734 }
735 }
736
737 /* fabricate port_map from cap.nr_ports */
738 if (!port_map) {
739 port_map = (1 << ahci_nr_ports(cap)) - 1;
740 dev_printk(KERN_WARNING, &pdev->dev,
741 "forcing PORTS_IMPL to 0x%x\n", port_map);
742
743 /* write the fixed up value to the PI register */
744 hpriv->saved_port_map = port_map;
745 }
746
747 /* record values to use during operation */
748 hpriv->cap = cap;
749 hpriv->port_map = port_map;
750 }
751
752 /**
753 * ahci_restore_initial_config - Restore initial config
754 * @host: target ATA host
755 *
756 * Restore initial config stored by ahci_save_initial_config().
757 *
758 * LOCKING:
759 * None.
760 */
761 static void ahci_restore_initial_config(struct ata_host *host)
762 {
763 struct ahci_host_priv *hpriv = host->private_data;
764 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
765
766 writel(hpriv->saved_cap, mmio + HOST_CAP);
767 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
768 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
769 }
770
771 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
772 {
773 static const int offset[] = {
774 [SCR_STATUS] = PORT_SCR_STAT,
775 [SCR_CONTROL] = PORT_SCR_CTL,
776 [SCR_ERROR] = PORT_SCR_ERR,
777 [SCR_ACTIVE] = PORT_SCR_ACT,
778 [SCR_NOTIFICATION] = PORT_SCR_NTF,
779 };
780 struct ahci_host_priv *hpriv = ap->host->private_data;
781
782 if (sc_reg < ARRAY_SIZE(offset) &&
783 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
784 return offset[sc_reg];
785 return 0;
786 }
787
788 static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
789 {
790 void __iomem *port_mmio = ahci_port_base(ap);
791 int offset = ahci_scr_offset(ap, sc_reg);
792
793 if (offset) {
794 *val = readl(port_mmio + offset);
795 return 0;
796 }
797 return -EINVAL;
798 }
799
800 static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
801 {
802 void __iomem *port_mmio = ahci_port_base(ap);
803 int offset = ahci_scr_offset(ap, sc_reg);
804
805 if (offset) {
806 writel(val, port_mmio + offset);
807 return 0;
808 }
809 return -EINVAL;
810 }
811
812 static void ahci_start_engine(struct ata_port *ap)
813 {
814 void __iomem *port_mmio = ahci_port_base(ap);
815 u32 tmp;
816
817 /* start DMA */
818 tmp = readl(port_mmio + PORT_CMD);
819 tmp |= PORT_CMD_START;
820 writel(tmp, port_mmio + PORT_CMD);
821 readl(port_mmio + PORT_CMD); /* flush */
822 }
823
824 static int ahci_stop_engine(struct ata_port *ap)
825 {
826 void __iomem *port_mmio = ahci_port_base(ap);
827 u32 tmp;
828
829 tmp = readl(port_mmio + PORT_CMD);
830
831 /* check if the HBA is idle */
832 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
833 return 0;
834
835 /* setting HBA to idle */
836 tmp &= ~PORT_CMD_START;
837 writel(tmp, port_mmio + PORT_CMD);
838
839 /* wait for engine to stop. This could be as long as 500 msec */
840 tmp = ata_wait_register(port_mmio + PORT_CMD,
841 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
842 if (tmp & PORT_CMD_LIST_ON)
843 return -EIO;
844
845 return 0;
846 }
847
848 static void ahci_start_fis_rx(struct ata_port *ap)
849 {
850 void __iomem *port_mmio = ahci_port_base(ap);
851 struct ahci_host_priv *hpriv = ap->host->private_data;
852 struct ahci_port_priv *pp = ap->private_data;
853 u32 tmp;
854
855 /* set FIS registers */
856 if (hpriv->cap & HOST_CAP_64)
857 writel((pp->cmd_slot_dma >> 16) >> 16,
858 port_mmio + PORT_LST_ADDR_HI);
859 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
860
861 if (hpriv->cap & HOST_CAP_64)
862 writel((pp->rx_fis_dma >> 16) >> 16,
863 port_mmio + PORT_FIS_ADDR_HI);
864 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
865
866 /* enable FIS reception */
867 tmp = readl(port_mmio + PORT_CMD);
868 tmp |= PORT_CMD_FIS_RX;
869 writel(tmp, port_mmio + PORT_CMD);
870
871 /* flush */
872 readl(port_mmio + PORT_CMD);
873 }
874
875 static int ahci_stop_fis_rx(struct ata_port *ap)
876 {
877 void __iomem *port_mmio = ahci_port_base(ap);
878 u32 tmp;
879
880 /* disable FIS reception */
881 tmp = readl(port_mmio + PORT_CMD);
882 tmp &= ~PORT_CMD_FIS_RX;
883 writel(tmp, port_mmio + PORT_CMD);
884
885 /* wait for completion, spec says 500ms, give it 1000 */
886 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
887 PORT_CMD_FIS_ON, 10, 1000);
888 if (tmp & PORT_CMD_FIS_ON)
889 return -EBUSY;
890
891 return 0;
892 }
893
894 static void ahci_power_up(struct ata_port *ap)
895 {
896 struct ahci_host_priv *hpriv = ap->host->private_data;
897 void __iomem *port_mmio = ahci_port_base(ap);
898 u32 cmd;
899
900 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
901
902 /* spin up device */
903 if (hpriv->cap & HOST_CAP_SSS) {
904 cmd |= PORT_CMD_SPIN_UP;
905 writel(cmd, port_mmio + PORT_CMD);
906 }
907
908 /* wake up link */
909 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
910 }
911
912 static void ahci_disable_alpm(struct ata_port *ap)
913 {
914 struct ahci_host_priv *hpriv = ap->host->private_data;
915 void __iomem *port_mmio = ahci_port_base(ap);
916 u32 cmd;
917 struct ahci_port_priv *pp = ap->private_data;
918
919 /* IPM bits should be disabled by libata-core */
920 /* get the existing command bits */
921 cmd = readl(port_mmio + PORT_CMD);
922
923 /* disable ALPM and ASP */
924 cmd &= ~PORT_CMD_ASP;
925 cmd &= ~PORT_CMD_ALPE;
926
927 /* force the interface back to active */
928 cmd |= PORT_CMD_ICC_ACTIVE;
929
930 /* write out new cmd value */
931 writel(cmd, port_mmio + PORT_CMD);
932 cmd = readl(port_mmio + PORT_CMD);
933
934 /* wait 10ms to be sure we've come out of any low power state */
935 msleep(10);
936
937 /* clear out any PhyRdy stuff from interrupt status */
938 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
939
940 /* go ahead and clean out PhyRdy Change from Serror too */
941 ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18)));
942
943 /*
944 * Clear flag to indicate that we should ignore all PhyRdy
945 * state changes
946 */
947 hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
948
949 /*
950 * Enable interrupts on Phy Ready.
951 */
952 pp->intr_mask |= PORT_IRQ_PHYRDY;
953 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
954
955 /*
956 * don't change the link pm policy - we can be called
957 * just to turn of link pm temporarily
958 */
959 }
960
961 static int ahci_enable_alpm(struct ata_port *ap,
962 enum link_pm policy)
963 {
964 struct ahci_host_priv *hpriv = ap->host->private_data;
965 void __iomem *port_mmio = ahci_port_base(ap);
966 u32 cmd;
967 struct ahci_port_priv *pp = ap->private_data;
968 u32 asp;
969
970 /* Make sure the host is capable of link power management */
971 if (!(hpriv->cap & HOST_CAP_ALPM))
972 return -EINVAL;
973
974 switch (policy) {
975 case MAX_PERFORMANCE:
976 case NOT_AVAILABLE:
977 /*
978 * if we came here with NOT_AVAILABLE,
979 * it just means this is the first time we
980 * have tried to enable - default to max performance,
981 * and let the user go to lower power modes on request.
982 */
983 ahci_disable_alpm(ap);
984 return 0;
985 case MIN_POWER:
986 /* configure HBA to enter SLUMBER */
987 asp = PORT_CMD_ASP;
988 break;
989 case MEDIUM_POWER:
990 /* configure HBA to enter PARTIAL */
991 asp = 0;
992 break;
993 default:
994 return -EINVAL;
995 }
996
997 /*
998 * Disable interrupts on Phy Ready. This keeps us from
999 * getting woken up due to spurious phy ready interrupts
1000 * TBD - Hot plug should be done via polling now, is
1001 * that even supported?
1002 */
1003 pp->intr_mask &= ~PORT_IRQ_PHYRDY;
1004 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1005
1006 /*
1007 * Set a flag to indicate that we should ignore all PhyRdy
1008 * state changes since these can happen now whenever we
1009 * change link state
1010 */
1011 hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
1012
1013 /* get the existing command bits */
1014 cmd = readl(port_mmio + PORT_CMD);
1015
1016 /*
1017 * Set ASP based on Policy
1018 */
1019 cmd |= asp;
1020
1021 /*
1022 * Setting this bit will instruct the HBA to aggressively
1023 * enter a lower power link state when it's appropriate and
1024 * based on the value set above for ASP
1025 */
1026 cmd |= PORT_CMD_ALPE;
1027
1028 /* write out new cmd value */
1029 writel(cmd, port_mmio + PORT_CMD);
1030 cmd = readl(port_mmio + PORT_CMD);
1031
1032 /* IPM bits should be set by libata-core */
1033 return 0;
1034 }
1035
1036 #ifdef CONFIG_PM
1037 static void ahci_power_down(struct ata_port *ap)
1038 {
1039 struct ahci_host_priv *hpriv = ap->host->private_data;
1040 void __iomem *port_mmio = ahci_port_base(ap);
1041 u32 cmd, scontrol;
1042
1043 if (!(hpriv->cap & HOST_CAP_SSS))
1044 return;
1045
1046 /* put device into listen mode, first set PxSCTL.DET to 0 */
1047 scontrol = readl(port_mmio + PORT_SCR_CTL);
1048 scontrol &= ~0xf;
1049 writel(scontrol, port_mmio + PORT_SCR_CTL);
1050
1051 /* then set PxCMD.SUD to 0 */
1052 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1053 cmd &= ~PORT_CMD_SPIN_UP;
1054 writel(cmd, port_mmio + PORT_CMD);
1055 }
1056 #endif
1057
1058 static void ahci_start_port(struct ata_port *ap)
1059 {
1060 /* enable FIS reception */
1061 ahci_start_fis_rx(ap);
1062
1063 /* enable DMA */
1064 ahci_start_engine(ap);
1065 }
1066
1067 static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
1068 {
1069 int rc;
1070
1071 /* disable DMA */
1072 rc = ahci_stop_engine(ap);
1073 if (rc) {
1074 *emsg = "failed to stop engine";
1075 return rc;
1076 }
1077
1078 /* disable FIS reception */
1079 rc = ahci_stop_fis_rx(ap);
1080 if (rc) {
1081 *emsg = "failed stop FIS RX";
1082 return rc;
1083 }
1084
1085 return 0;
1086 }
1087
1088 static int ahci_reset_controller(struct ata_host *host)
1089 {
1090 struct pci_dev *pdev = to_pci_dev(host->dev);
1091 struct ahci_host_priv *hpriv = host->private_data;
1092 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1093 u32 tmp;
1094
1095 /* we must be in AHCI mode, before using anything
1096 * AHCI-specific, such as HOST_RESET.
1097 */
1098 ahci_enable_ahci(mmio);
1099
1100 /* global controller reset */
1101 if (!ahci_skip_host_reset) {
1102 tmp = readl(mmio + HOST_CTL);
1103 if ((tmp & HOST_RESET) == 0) {
1104 writel(tmp | HOST_RESET, mmio + HOST_CTL);
1105 readl(mmio + HOST_CTL); /* flush */
1106 }
1107
1108 /* reset must complete within 1 second, or
1109 * the hardware should be considered fried.
1110 */
1111 ssleep(1);
1112
1113 tmp = readl(mmio + HOST_CTL);
1114 if (tmp & HOST_RESET) {
1115 dev_printk(KERN_ERR, host->dev,
1116 "controller reset failed (0x%x)\n", tmp);
1117 return -EIO;
1118 }
1119
1120 /* turn on AHCI mode */
1121 ahci_enable_ahci(mmio);
1122
1123 /* Some registers might be cleared on reset. Restore
1124 * initial values.
1125 */
1126 ahci_restore_initial_config(host);
1127 } else
1128 dev_printk(KERN_INFO, host->dev,
1129 "skipping global host reset\n");
1130
1131 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
1132 u16 tmp16;
1133
1134 /* configure PCS */
1135 pci_read_config_word(pdev, 0x92, &tmp16);
1136 if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
1137 tmp16 |= hpriv->port_map;
1138 pci_write_config_word(pdev, 0x92, tmp16);
1139 }
1140 }
1141
1142 return 0;
1143 }
1144
1145 static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
1146 int port_no, void __iomem *mmio,
1147 void __iomem *port_mmio)
1148 {
1149 const char *emsg = NULL;
1150 int rc;
1151 u32 tmp;
1152
1153 /* make sure port is not active */
1154 rc = ahci_deinit_port(ap, &emsg);
1155 if (rc)
1156 dev_printk(KERN_WARNING, &pdev->dev,
1157 "%s (%d)\n", emsg, rc);
1158
1159 /* clear SError */
1160 tmp = readl(port_mmio + PORT_SCR_ERR);
1161 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1162 writel(tmp, port_mmio + PORT_SCR_ERR);
1163
1164 /* clear port IRQ */
1165 tmp = readl(port_mmio + PORT_IRQ_STAT);
1166 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1167 if (tmp)
1168 writel(tmp, port_mmio + PORT_IRQ_STAT);
1169
1170 writel(1 << port_no, mmio + HOST_IRQ_STAT);
1171 }
1172
1173 static void ahci_init_controller(struct ata_host *host)
1174 {
1175 struct ahci_host_priv *hpriv = host->private_data;
1176 struct pci_dev *pdev = to_pci_dev(host->dev);
1177 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1178 int i;
1179 void __iomem *port_mmio;
1180 u32 tmp;
1181 int mv;
1182
1183 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
1184 if (pdev->device == 0x6121)
1185 mv = 2;
1186 else
1187 mv = 4;
1188 port_mmio = __ahci_port_base(host, mv);
1189
1190 writel(0, port_mmio + PORT_IRQ_MASK);
1191
1192 /* clear port IRQ */
1193 tmp = readl(port_mmio + PORT_IRQ_STAT);
1194 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1195 if (tmp)
1196 writel(tmp, port_mmio + PORT_IRQ_STAT);
1197 }
1198
1199 for (i = 0; i < host->n_ports; i++) {
1200 struct ata_port *ap = host->ports[i];
1201
1202 port_mmio = ahci_port_base(ap);
1203 if (ata_port_is_dummy(ap))
1204 continue;
1205
1206 ahci_port_init(pdev, ap, i, mmio, port_mmio);
1207 }
1208
1209 tmp = readl(mmio + HOST_CTL);
1210 VPRINTK("HOST_CTL 0x%x\n", tmp);
1211 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1212 tmp = readl(mmio + HOST_CTL);
1213 VPRINTK("HOST_CTL 0x%x\n", tmp);
1214 }
1215
1216 static void ahci_dev_config(struct ata_device *dev)
1217 {
1218 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1219
1220 if (hpriv->flags & AHCI_HFLAG_SECT255)
1221 dev->max_sectors = 255;
1222 }
1223
1224 static unsigned int ahci_dev_classify(struct ata_port *ap)
1225 {
1226 void __iomem *port_mmio = ahci_port_base(ap);
1227 struct ata_taskfile tf;
1228 u32 tmp;
1229
1230 tmp = readl(port_mmio + PORT_SIG);
1231 tf.lbah = (tmp >> 24) & 0xff;
1232 tf.lbam = (tmp >> 16) & 0xff;
1233 tf.lbal = (tmp >> 8) & 0xff;
1234 tf.nsect = (tmp) & 0xff;
1235
1236 return ata_dev_classify(&tf);
1237 }
1238
1239 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1240 u32 opts)
1241 {
1242 dma_addr_t cmd_tbl_dma;
1243
1244 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1245
1246 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1247 pp->cmd_slot[tag].status = 0;
1248 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1249 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1250 }
1251
1252 static int ahci_kick_engine(struct ata_port *ap, int force_restart)
1253 {
1254 void __iomem *port_mmio = ap->ioaddr.cmd_addr;
1255 struct ahci_host_priv *hpriv = ap->host->private_data;
1256 u32 tmp;
1257 int busy, rc;
1258
1259 /* do we need to kick the port? */
1260 busy = ahci_check_status(ap) & (ATA_BUSY | ATA_DRQ);
1261 if (!busy && !force_restart)
1262 return 0;
1263
1264 /* stop engine */
1265 rc = ahci_stop_engine(ap);
1266 if (rc)
1267 goto out_restart;
1268
1269 /* need to do CLO? */
1270 if (!busy) {
1271 rc = 0;
1272 goto out_restart;
1273 }
1274
1275 if (!(hpriv->cap & HOST_CAP_CLO)) {
1276 rc = -EOPNOTSUPP;
1277 goto out_restart;
1278 }
1279
1280 /* perform CLO */
1281 tmp = readl(port_mmio + PORT_CMD);
1282 tmp |= PORT_CMD_CLO;
1283 writel(tmp, port_mmio + PORT_CMD);
1284
1285 rc = 0;
1286 tmp = ata_wait_register(port_mmio + PORT_CMD,
1287 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1288 if (tmp & PORT_CMD_CLO)
1289 rc = -EIO;
1290
1291 /* restart engine */
1292 out_restart:
1293 ahci_start_engine(ap);
1294 return rc;
1295 }
1296
1297 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1298 struct ata_taskfile *tf, int is_cmd, u16 flags,
1299 unsigned long timeout_msec)
1300 {
1301 const u32 cmd_fis_len = 5; /* five dwords */
1302 struct ahci_port_priv *pp = ap->private_data;
1303 void __iomem *port_mmio = ahci_port_base(ap);
1304 u8 *fis = pp->cmd_tbl;
1305 u32 tmp;
1306
1307 /* prep the command */
1308 ata_tf_to_fis(tf, pmp, is_cmd, fis);
1309 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1310
1311 /* issue & wait */
1312 writel(1, port_mmio + PORT_CMD_ISSUE);
1313
1314 if (timeout_msec) {
1315 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1316 1, timeout_msec);
1317 if (tmp & 0x1) {
1318 ahci_kick_engine(ap, 1);
1319 return -EBUSY;
1320 }
1321 } else
1322 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1323
1324 return 0;
1325 }
1326
1327 static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1328 int pmp, unsigned long deadline)
1329 {
1330 struct ata_port *ap = link->ap;
1331 const char *reason = NULL;
1332 unsigned long now, msecs;
1333 struct ata_taskfile tf;
1334 int rc;
1335
1336 DPRINTK("ENTER\n");
1337
1338 if (ata_link_offline(link)) {
1339 DPRINTK("PHY reports no device\n");
1340 *class = ATA_DEV_NONE;
1341 return 0;
1342 }
1343
1344 /* prepare for SRST (AHCI-1.1 10.4.1) */
1345 rc = ahci_kick_engine(ap, 1);
1346 if (rc && rc != -EOPNOTSUPP)
1347 ata_link_printk(link, KERN_WARNING,
1348 "failed to reset engine (errno=%d)\n", rc);
1349
1350 ata_tf_init(link->device, &tf);
1351
1352 /* issue the first D2H Register FIS */
1353 msecs = 0;
1354 now = jiffies;
1355 if (time_after(now, deadline))
1356 msecs = jiffies_to_msecs(deadline - now);
1357
1358 tf.ctl |= ATA_SRST;
1359 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1360 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1361 rc = -EIO;
1362 reason = "1st FIS failed";
1363 goto fail;
1364 }
1365
1366 /* spec says at least 5us, but be generous and sleep for 1ms */
1367 msleep(1);
1368
1369 /* issue the second D2H Register FIS */
1370 tf.ctl &= ~ATA_SRST;
1371 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1372
1373 /* wait a while before checking status */
1374 ata_wait_after_reset(ap, deadline);
1375
1376 rc = ata_wait_ready(ap, deadline);
1377 /* link occupied, -ENODEV too is an error */
1378 if (rc) {
1379 reason = "device not ready";
1380 goto fail;
1381 }
1382 *class = ahci_dev_classify(ap);
1383
1384 DPRINTK("EXIT, class=%u\n", *class);
1385 return 0;
1386
1387 fail:
1388 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1389 return rc;
1390 }
1391
1392 static int ahci_softreset(struct ata_link *link, unsigned int *class,
1393 unsigned long deadline)
1394 {
1395 int pmp = 0;
1396
1397 if (link->ap->flags & ATA_FLAG_PMP)
1398 pmp = SATA_PMP_CTRL_PORT;
1399
1400 return ahci_do_softreset(link, class, pmp, deadline);
1401 }
1402
1403 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1404 unsigned long deadline)
1405 {
1406 struct ata_port *ap = link->ap;
1407 struct ahci_port_priv *pp = ap->private_data;
1408 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1409 struct ata_taskfile tf;
1410 int rc;
1411
1412 DPRINTK("ENTER\n");
1413
1414 ahci_stop_engine(ap);
1415
1416 /* clear D2H reception area to properly wait for D2H FIS */
1417 ata_tf_init(link->device, &tf);
1418 tf.command = 0x80;
1419 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1420
1421 rc = sata_std_hardreset(link, class, deadline);
1422
1423 ahci_start_engine(ap);
1424
1425 if (rc == 0 && ata_link_online(link))
1426 *class = ahci_dev_classify(ap);
1427 if (rc != -EAGAIN && *class == ATA_DEV_UNKNOWN)
1428 *class = ATA_DEV_NONE;
1429
1430 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1431 return rc;
1432 }
1433
1434 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
1435 unsigned long deadline)
1436 {
1437 struct ata_port *ap = link->ap;
1438 u32 serror;
1439 int rc;
1440
1441 DPRINTK("ENTER\n");
1442
1443 ahci_stop_engine(ap);
1444
1445 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1446 deadline);
1447
1448 /* vt8251 needs SError cleared for the port to operate */
1449 ahci_scr_read(ap, SCR_ERROR, &serror);
1450 ahci_scr_write(ap, SCR_ERROR, serror);
1451
1452 ahci_start_engine(ap);
1453
1454 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1455
1456 /* vt8251 doesn't clear BSY on signature FIS reception,
1457 * request follow-up softreset.
1458 */
1459 return rc ?: -EAGAIN;
1460 }
1461
1462 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
1463 unsigned long deadline)
1464 {
1465 struct ata_port *ap = link->ap;
1466 struct ahci_port_priv *pp = ap->private_data;
1467 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1468 struct ata_taskfile tf;
1469 int rc;
1470
1471 ahci_stop_engine(ap);
1472
1473 /* clear D2H reception area to properly wait for D2H FIS */
1474 ata_tf_init(link->device, &tf);
1475 tf.command = 0x80;
1476 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1477
1478 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1479 deadline);
1480
1481 ahci_start_engine(ap);
1482
1483 if (rc || ata_link_offline(link))
1484 return rc;
1485
1486 /* spec mandates ">= 2ms" before checking status */
1487 msleep(150);
1488
1489 /* The pseudo configuration device on SIMG4726 attached to
1490 * ASUS P5W-DH Deluxe doesn't send signature FIS after
1491 * hardreset if no device is attached to the first downstream
1492 * port && the pseudo device locks up on SRST w/ PMP==0. To
1493 * work around this, wait for !BSY only briefly. If BSY isn't
1494 * cleared, perform CLO and proceed to IDENTIFY (achieved by
1495 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
1496 *
1497 * Wait for two seconds. Devices attached to downstream port
1498 * which can't process the following IDENTIFY after this will
1499 * have to be reset again. For most cases, this should
1500 * suffice while making probing snappish enough.
1501 */
1502 rc = ata_wait_ready(ap, jiffies + 2 * HZ);
1503 if (rc)
1504 ahci_kick_engine(ap, 0);
1505
1506 return 0;
1507 }
1508
1509 static void ahci_postreset(struct ata_link *link, unsigned int *class)
1510 {
1511 struct ata_port *ap = link->ap;
1512 void __iomem *port_mmio = ahci_port_base(ap);
1513 u32 new_tmp, tmp;
1514
1515 ata_std_postreset(link, class);
1516
1517 /* Make sure port's ATAPI bit is set appropriately */
1518 new_tmp = tmp = readl(port_mmio + PORT_CMD);
1519 if (*class == ATA_DEV_ATAPI)
1520 new_tmp |= PORT_CMD_ATAPI;
1521 else
1522 new_tmp &= ~PORT_CMD_ATAPI;
1523 if (new_tmp != tmp) {
1524 writel(new_tmp, port_mmio + PORT_CMD);
1525 readl(port_mmio + PORT_CMD); /* flush */
1526 }
1527 }
1528
1529 static int ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
1530 unsigned long deadline)
1531 {
1532 return ahci_do_softreset(link, class, link->pmp, deadline);
1533 }
1534
1535 static u8 ahci_check_status(struct ata_port *ap)
1536 {
1537 void __iomem *mmio = ap->ioaddr.cmd_addr;
1538
1539 return readl(mmio + PORT_TFDATA) & 0xFF;
1540 }
1541
1542 static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
1543 {
1544 struct ahci_port_priv *pp = ap->private_data;
1545 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1546
1547 ata_tf_from_fis(d2h_fis, tf);
1548 }
1549
1550 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
1551 {
1552 struct scatterlist *sg;
1553 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
1554 unsigned int si;
1555
1556 VPRINTK("ENTER\n");
1557
1558 /*
1559 * Next, the S/G list.
1560 */
1561 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1562 dma_addr_t addr = sg_dma_address(sg);
1563 u32 sg_len = sg_dma_len(sg);
1564
1565 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
1566 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
1567 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
1568 }
1569
1570 return si;
1571 }
1572
1573 static void ahci_qc_prep(struct ata_queued_cmd *qc)
1574 {
1575 struct ata_port *ap = qc->ap;
1576 struct ahci_port_priv *pp = ap->private_data;
1577 int is_atapi = ata_is_atapi(qc->tf.protocol);
1578 void *cmd_tbl;
1579 u32 opts;
1580 const u32 cmd_fis_len = 5; /* five dwords */
1581 unsigned int n_elem;
1582
1583 /*
1584 * Fill in command table information. First, the header,
1585 * a SATA Register - Host to Device command FIS.
1586 */
1587 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
1588
1589 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
1590 if (is_atapi) {
1591 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
1592 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
1593 }
1594
1595 n_elem = 0;
1596 if (qc->flags & ATA_QCFLAG_DMAMAP)
1597 n_elem = ahci_fill_sg(qc, cmd_tbl);
1598
1599 /*
1600 * Fill in command slot information.
1601 */
1602 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
1603 if (qc->tf.flags & ATA_TFLAG_WRITE)
1604 opts |= AHCI_CMD_WRITE;
1605 if (is_atapi)
1606 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
1607
1608 ahci_fill_cmd_slot(pp, qc->tag, opts);
1609 }
1610
1611 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
1612 {
1613 struct ahci_host_priv *hpriv = ap->host->private_data;
1614 struct ahci_port_priv *pp = ap->private_data;
1615 struct ata_eh_info *host_ehi = &ap->link.eh_info;
1616 struct ata_link *link = NULL;
1617 struct ata_queued_cmd *active_qc;
1618 struct ata_eh_info *active_ehi;
1619 u32 serror;
1620
1621 /* determine active link */
1622 ata_port_for_each_link(link, ap)
1623 if (ata_link_active(link))
1624 break;
1625 if (!link)
1626 link = &ap->link;
1627
1628 active_qc = ata_qc_from_tag(ap, link->active_tag);
1629 active_ehi = &link->eh_info;
1630
1631 /* record irq stat */
1632 ata_ehi_clear_desc(host_ehi);
1633 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
1634
1635 /* AHCI needs SError cleared; otherwise, it might lock up */
1636 ahci_scr_read(ap, SCR_ERROR, &serror);
1637 ahci_scr_write(ap, SCR_ERROR, serror);
1638 host_ehi->serror |= serror;
1639
1640 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
1641 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
1642 irq_stat &= ~PORT_IRQ_IF_ERR;
1643
1644 if (irq_stat & PORT_IRQ_TF_ERR) {
1645 /* If qc is active, charge it; otherwise, the active
1646 * link. There's no active qc on NCQ errors. It will
1647 * be determined by EH by reading log page 10h.
1648 */
1649 if (active_qc)
1650 active_qc->err_mask |= AC_ERR_DEV;
1651 else
1652 active_ehi->err_mask |= AC_ERR_DEV;
1653
1654 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
1655 host_ehi->serror &= ~SERR_INTERNAL;
1656 }
1657
1658 if (irq_stat & PORT_IRQ_UNK_FIS) {
1659 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
1660
1661 active_ehi->err_mask |= AC_ERR_HSM;
1662 active_ehi->action |= ATA_EH_SOFTRESET;
1663 ata_ehi_push_desc(active_ehi,
1664 "unknown FIS %08x %08x %08x %08x" ,
1665 unk[0], unk[1], unk[2], unk[3]);
1666 }
1667
1668 if (ap->nr_pmp_links && (irq_stat & PORT_IRQ_BAD_PMP)) {
1669 active_ehi->err_mask |= AC_ERR_HSM;
1670 active_ehi->action |= ATA_EH_SOFTRESET;
1671 ata_ehi_push_desc(active_ehi, "incorrect PMP");
1672 }
1673
1674 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
1675 host_ehi->err_mask |= AC_ERR_HOST_BUS;
1676 host_ehi->action |= ATA_EH_SOFTRESET;
1677 ata_ehi_push_desc(host_ehi, "host bus error");
1678 }
1679
1680 if (irq_stat & PORT_IRQ_IF_ERR) {
1681 host_ehi->err_mask |= AC_ERR_ATA_BUS;
1682 host_ehi->action |= ATA_EH_SOFTRESET;
1683 ata_ehi_push_desc(host_ehi, "interface fatal error");
1684 }
1685
1686 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
1687 ata_ehi_hotplugged(host_ehi);
1688 ata_ehi_push_desc(host_ehi, "%s",
1689 irq_stat & PORT_IRQ_CONNECT ?
1690 "connection status changed" : "PHY RDY changed");
1691 }
1692
1693 /* okay, let's hand over to EH */
1694
1695 if (irq_stat & PORT_IRQ_FREEZE)
1696 ata_port_freeze(ap);
1697 else
1698 ata_port_abort(ap);
1699 }
1700
1701 static void ahci_port_intr(struct ata_port *ap)
1702 {
1703 void __iomem *port_mmio = ap->ioaddr.cmd_addr;
1704 struct ata_eh_info *ehi = &ap->link.eh_info;
1705 struct ahci_port_priv *pp = ap->private_data;
1706 struct ahci_host_priv *hpriv = ap->host->private_data;
1707 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
1708 u32 status, qc_active;
1709 int rc;
1710
1711 status = readl(port_mmio + PORT_IRQ_STAT);
1712 writel(status, port_mmio + PORT_IRQ_STAT);
1713
1714 /* ignore BAD_PMP while resetting */
1715 if (unlikely(resetting))
1716 status &= ~PORT_IRQ_BAD_PMP;
1717
1718 /* If we are getting PhyRdy, this is
1719 * just a power state change, we should
1720 * clear out this, plus the PhyRdy/Comm
1721 * Wake bits from Serror
1722 */
1723 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
1724 (status & PORT_IRQ_PHYRDY)) {
1725 status &= ~PORT_IRQ_PHYRDY;
1726 ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18)));
1727 }
1728
1729 if (unlikely(status & PORT_IRQ_ERROR)) {
1730 ahci_error_intr(ap, status);
1731 return;
1732 }
1733
1734 if (status & PORT_IRQ_SDB_FIS) {
1735 /* If SNotification is available, leave notification
1736 * handling to sata_async_notification(). If not,
1737 * emulate it by snooping SDB FIS RX area.
1738 *
1739 * Snooping FIS RX area is probably cheaper than
1740 * poking SNotification but some constrollers which
1741 * implement SNotification, ICH9 for example, don't
1742 * store AN SDB FIS into receive area.
1743 */
1744 if (hpriv->cap & HOST_CAP_SNTF)
1745 sata_async_notification(ap);
1746 else {
1747 /* If the 'N' bit in word 0 of the FIS is set,
1748 * we just received asynchronous notification.
1749 * Tell libata about it.
1750 */
1751 const __le32 *f = pp->rx_fis + RX_FIS_SDB;
1752 u32 f0 = le32_to_cpu(f[0]);
1753
1754 if (f0 & (1 << 15))
1755 sata_async_notification(ap);
1756 }
1757 }
1758
1759 /* pp->active_link is valid iff any command is in flight */
1760 if (ap->qc_active && pp->active_link->sactive)
1761 qc_active = readl(port_mmio + PORT_SCR_ACT);
1762 else
1763 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
1764
1765 rc = ata_qc_complete_multiple(ap, qc_active, NULL);
1766
1767 /* while resetting, invalid completions are expected */
1768 if (unlikely(rc < 0 && !resetting)) {
1769 ehi->err_mask |= AC_ERR_HSM;
1770 ehi->action |= ATA_EH_SOFTRESET;
1771 ata_port_freeze(ap);
1772 }
1773 }
1774
1775 static void ahci_irq_clear(struct ata_port *ap)
1776 {
1777 /* TODO */
1778 }
1779
1780 static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
1781 {
1782 struct ata_host *host = dev_instance;
1783 struct ahci_host_priv *hpriv;
1784 unsigned int i, handled = 0;
1785 void __iomem *mmio;
1786 u32 irq_stat, irq_ack = 0;
1787
1788 VPRINTK("ENTER\n");
1789
1790 hpriv = host->private_data;
1791 mmio = host->iomap[AHCI_PCI_BAR];
1792
1793 /* sigh. 0xffffffff is a valid return from h/w */
1794 irq_stat = readl(mmio + HOST_IRQ_STAT);
1795 irq_stat &= hpriv->port_map;
1796 if (!irq_stat)
1797 return IRQ_NONE;
1798
1799 spin_lock(&host->lock);
1800
1801 for (i = 0; i < host->n_ports; i++) {
1802 struct ata_port *ap;
1803
1804 if (!(irq_stat & (1 << i)))
1805 continue;
1806
1807 ap = host->ports[i];
1808 if (ap) {
1809 ahci_port_intr(ap);
1810 VPRINTK("port %u\n", i);
1811 } else {
1812 VPRINTK("port %u (no irq)\n", i);
1813 if (ata_ratelimit())
1814 dev_printk(KERN_WARNING, host->dev,
1815 "interrupt on disabled port %u\n", i);
1816 }
1817
1818 irq_ack |= (1 << i);
1819 }
1820
1821 if (irq_ack) {
1822 writel(irq_ack, mmio + HOST_IRQ_STAT);
1823 handled = 1;
1824 }
1825
1826 spin_unlock(&host->lock);
1827
1828 VPRINTK("EXIT\n");
1829
1830 return IRQ_RETVAL(handled);
1831 }
1832
1833 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
1834 {
1835 struct ata_port *ap = qc->ap;
1836 void __iomem *port_mmio = ahci_port_base(ap);
1837 struct ahci_port_priv *pp = ap->private_data;
1838
1839 /* Keep track of the currently active link. It will be used
1840 * in completion path to determine whether NCQ phase is in
1841 * progress.
1842 */
1843 pp->active_link = qc->dev->link;
1844
1845 if (qc->tf.protocol == ATA_PROT_NCQ)
1846 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
1847 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
1848 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1849
1850 return 0;
1851 }
1852
1853 static void ahci_freeze(struct ata_port *ap)
1854 {
1855 void __iomem *port_mmio = ahci_port_base(ap);
1856
1857 /* turn IRQ off */
1858 writel(0, port_mmio + PORT_IRQ_MASK);
1859 }
1860
1861 static void ahci_thaw(struct ata_port *ap)
1862 {
1863 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1864 void __iomem *port_mmio = ahci_port_base(ap);
1865 u32 tmp;
1866 struct ahci_port_priv *pp = ap->private_data;
1867
1868 /* clear IRQ */
1869 tmp = readl(port_mmio + PORT_IRQ_STAT);
1870 writel(tmp, port_mmio + PORT_IRQ_STAT);
1871 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
1872
1873 /* turn IRQ back on */
1874 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1875 }
1876
1877 static void ahci_error_handler(struct ata_port *ap)
1878 {
1879 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
1880 /* restart engine */
1881 ahci_stop_engine(ap);
1882 ahci_start_engine(ap);
1883 }
1884
1885 /* perform recovery */
1886 sata_pmp_do_eh(ap, ata_std_prereset, ahci_softreset,
1887 ahci_hardreset, ahci_postreset,
1888 sata_pmp_std_prereset, ahci_pmp_softreset,
1889 sata_pmp_std_hardreset, sata_pmp_std_postreset);
1890 }
1891
1892 static void ahci_vt8251_error_handler(struct ata_port *ap)
1893 {
1894 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
1895 /* restart engine */
1896 ahci_stop_engine(ap);
1897 ahci_start_engine(ap);
1898 }
1899
1900 /* perform recovery */
1901 ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_vt8251_hardreset,
1902 ahci_postreset);
1903 }
1904
1905 static void ahci_p5wdh_error_handler(struct ata_port *ap)
1906 {
1907 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
1908 /* restart engine */
1909 ahci_stop_engine(ap);
1910 ahci_start_engine(ap);
1911 }
1912
1913 /* perform recovery */
1914 ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_p5wdh_hardreset,
1915 ahci_postreset);
1916 }
1917
1918 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
1919 {
1920 struct ata_port *ap = qc->ap;
1921
1922 /* make DMA engine forget about the failed command */
1923 if (qc->flags & ATA_QCFLAG_FAILED)
1924 ahci_kick_engine(ap, 1);
1925 }
1926
1927 static void ahci_pmp_attach(struct ata_port *ap)
1928 {
1929 void __iomem *port_mmio = ahci_port_base(ap);
1930 struct ahci_port_priv *pp = ap->private_data;
1931 u32 cmd;
1932
1933 cmd = readl(port_mmio + PORT_CMD);
1934 cmd |= PORT_CMD_PMP;
1935 writel(cmd, port_mmio + PORT_CMD);
1936
1937 pp->intr_mask |= PORT_IRQ_BAD_PMP;
1938 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1939 }
1940
1941 static void ahci_pmp_detach(struct ata_port *ap)
1942 {
1943 void __iomem *port_mmio = ahci_port_base(ap);
1944 struct ahci_port_priv *pp = ap->private_data;
1945 u32 cmd;
1946
1947 cmd = readl(port_mmio + PORT_CMD);
1948 cmd &= ~PORT_CMD_PMP;
1949 writel(cmd, port_mmio + PORT_CMD);
1950
1951 pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
1952 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1953 }
1954
1955 static int ahci_port_resume(struct ata_port *ap)
1956 {
1957 ahci_power_up(ap);
1958 ahci_start_port(ap);
1959
1960 if (ap->nr_pmp_links)
1961 ahci_pmp_attach(ap);
1962 else
1963 ahci_pmp_detach(ap);
1964
1965 return 0;
1966 }
1967
1968 #ifdef CONFIG_PM
1969 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
1970 {
1971 const char *emsg = NULL;
1972 int rc;
1973
1974 rc = ahci_deinit_port(ap, &emsg);
1975 if (rc == 0)
1976 ahci_power_down(ap);
1977 else {
1978 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
1979 ahci_start_port(ap);
1980 }
1981
1982 return rc;
1983 }
1984
1985 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
1986 {
1987 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1988 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1989 u32 ctl;
1990
1991 if (mesg.event & PM_EVENT_SLEEP) {
1992 /* AHCI spec rev1.1 section 8.3.3:
1993 * Software must disable interrupts prior to requesting a
1994 * transition of the HBA to D3 state.
1995 */
1996 ctl = readl(mmio + HOST_CTL);
1997 ctl &= ~HOST_IRQ_EN;
1998 writel(ctl, mmio + HOST_CTL);
1999 readl(mmio + HOST_CTL); /* flush */
2000 }
2001
2002 return ata_pci_device_suspend(pdev, mesg);
2003 }
2004
2005 static int ahci_pci_device_resume(struct pci_dev *pdev)
2006 {
2007 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2008 int rc;
2009
2010 rc = ata_pci_device_do_resume(pdev);
2011 if (rc)
2012 return rc;
2013
2014 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2015 rc = ahci_reset_controller(host);
2016 if (rc)
2017 return rc;
2018
2019 ahci_init_controller(host);
2020 }
2021
2022 ata_host_resume(host);
2023
2024 return 0;
2025 }
2026 #endif
2027
2028 static int ahci_port_start(struct ata_port *ap)
2029 {
2030 struct device *dev = ap->host->dev;
2031 struct ahci_port_priv *pp;
2032 void *mem;
2033 dma_addr_t mem_dma;
2034
2035 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2036 if (!pp)
2037 return -ENOMEM;
2038
2039 mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
2040 GFP_KERNEL);
2041 if (!mem)
2042 return -ENOMEM;
2043 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
2044
2045 /*
2046 * First item in chunk of DMA memory: 32-slot command table,
2047 * 32 bytes each in size
2048 */
2049 pp->cmd_slot = mem;
2050 pp->cmd_slot_dma = mem_dma;
2051
2052 mem += AHCI_CMD_SLOT_SZ;
2053 mem_dma += AHCI_CMD_SLOT_SZ;
2054
2055 /*
2056 * Second item: Received-FIS area
2057 */
2058 pp->rx_fis = mem;
2059 pp->rx_fis_dma = mem_dma;
2060
2061 mem += AHCI_RX_FIS_SZ;
2062 mem_dma += AHCI_RX_FIS_SZ;
2063
2064 /*
2065 * Third item: data area for storing a single command
2066 * and its scatter-gather table
2067 */
2068 pp->cmd_tbl = mem;
2069 pp->cmd_tbl_dma = mem_dma;
2070
2071 /*
2072 * Save off initial list of interrupts to be enabled.
2073 * This could be changed later
2074 */
2075 pp->intr_mask = DEF_PORT_IRQ;
2076
2077 ap->private_data = pp;
2078
2079 /* engage engines, captain */
2080 return ahci_port_resume(ap);
2081 }
2082
2083 static void ahci_port_stop(struct ata_port *ap)
2084 {
2085 const char *emsg = NULL;
2086 int rc;
2087
2088 /* de-initialize port */
2089 rc = ahci_deinit_port(ap, &emsg);
2090 if (rc)
2091 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
2092 }
2093
2094 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
2095 {
2096 int rc;
2097
2098 if (using_dac &&
2099 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2100 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2101 if (rc) {
2102 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2103 if (rc) {
2104 dev_printk(KERN_ERR, &pdev->dev,
2105 "64-bit DMA enable failed\n");
2106 return rc;
2107 }
2108 }
2109 } else {
2110 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2111 if (rc) {
2112 dev_printk(KERN_ERR, &pdev->dev,
2113 "32-bit DMA enable failed\n");
2114 return rc;
2115 }
2116 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2117 if (rc) {
2118 dev_printk(KERN_ERR, &pdev->dev,
2119 "32-bit consistent DMA enable failed\n");
2120 return rc;
2121 }
2122 }
2123 return 0;
2124 }
2125
2126 static void ahci_print_info(struct ata_host *host)
2127 {
2128 struct ahci_host_priv *hpriv = host->private_data;
2129 struct pci_dev *pdev = to_pci_dev(host->dev);
2130 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2131 u32 vers, cap, impl, speed;
2132 const char *speed_s;
2133 u16 cc;
2134 const char *scc_s;
2135
2136 vers = readl(mmio + HOST_VERSION);
2137 cap = hpriv->cap;
2138 impl = hpriv->port_map;
2139
2140 speed = (cap >> 20) & 0xf;
2141 if (speed == 1)
2142 speed_s = "1.5";
2143 else if (speed == 2)
2144 speed_s = "3";
2145 else
2146 speed_s = "?";
2147
2148 pci_read_config_word(pdev, 0x0a, &cc);
2149 if (cc == PCI_CLASS_STORAGE_IDE)
2150 scc_s = "IDE";
2151 else if (cc == PCI_CLASS_STORAGE_SATA)
2152 scc_s = "SATA";
2153 else if (cc == PCI_CLASS_STORAGE_RAID)
2154 scc_s = "RAID";
2155 else
2156 scc_s = "unknown";
2157
2158 dev_printk(KERN_INFO, &pdev->dev,
2159 "AHCI %02x%02x.%02x%02x "
2160 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2161 ,
2162
2163 (vers >> 24) & 0xff,
2164 (vers >> 16) & 0xff,
2165 (vers >> 8) & 0xff,
2166 vers & 0xff,
2167
2168 ((cap >> 8) & 0x1f) + 1,
2169 (cap & 0x1f) + 1,
2170 speed_s,
2171 impl,
2172 scc_s);
2173
2174 dev_printk(KERN_INFO, &pdev->dev,
2175 "flags: "
2176 "%s%s%s%s%s%s%s"
2177 "%s%s%s%s%s%s%s\n"
2178 ,
2179
2180 cap & (1 << 31) ? "64bit " : "",
2181 cap & (1 << 30) ? "ncq " : "",
2182 cap & (1 << 29) ? "sntf " : "",
2183 cap & (1 << 28) ? "ilck " : "",
2184 cap & (1 << 27) ? "stag " : "",
2185 cap & (1 << 26) ? "pm " : "",
2186 cap & (1 << 25) ? "led " : "",
2187
2188 cap & (1 << 24) ? "clo " : "",
2189 cap & (1 << 19) ? "nz " : "",
2190 cap & (1 << 18) ? "only " : "",
2191 cap & (1 << 17) ? "pmp " : "",
2192 cap & (1 << 15) ? "pio " : "",
2193 cap & (1 << 14) ? "slum " : "",
2194 cap & (1 << 13) ? "part " : ""
2195 );
2196 }
2197
2198 /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
2199 * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't
2200 * support PMP and the 4726 either directly exports the device
2201 * attached to the first downstream port or acts as a hardware storage
2202 * controller and emulate a single ATA device (can be RAID 0/1 or some
2203 * other configuration).
2204 *
2205 * When there's no device attached to the first downstream port of the
2206 * 4726, "Config Disk" appears, which is a pseudo ATA device to
2207 * configure the 4726. However, ATA emulation of the device is very
2208 * lame. It doesn't send signature D2H Reg FIS after the initial
2209 * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
2210 *
2211 * The following function works around the problem by always using
2212 * hardreset on the port and not depending on receiving signature FIS
2213 * afterward. If signature FIS isn't received soon, ATA class is
2214 * assumed without follow-up softreset.
2215 */
2216 static void ahci_p5wdh_workaround(struct ata_host *host)
2217 {
2218 static struct dmi_system_id sysids[] = {
2219 {
2220 .ident = "P5W DH Deluxe",
2221 .matches = {
2222 DMI_MATCH(DMI_SYS_VENDOR,
2223 "ASUSTEK COMPUTER INC"),
2224 DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
2225 },
2226 },
2227 { }
2228 };
2229 struct pci_dev *pdev = to_pci_dev(host->dev);
2230
2231 if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
2232 dmi_check_system(sysids)) {
2233 struct ata_port *ap = host->ports[1];
2234
2235 dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
2236 "Deluxe on-board SIMG4726 workaround\n");
2237
2238 ap->ops = &ahci_p5wdh_ops;
2239 ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
2240 }
2241 }
2242
2243 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2244 {
2245 static int printed_version;
2246 struct ata_port_info pi = ahci_port_info[ent->driver_data];
2247 const struct ata_port_info *ppi[] = { &pi, NULL };
2248 struct device *dev = &pdev->dev;
2249 struct ahci_host_priv *hpriv;
2250 struct ata_host *host;
2251 int n_ports, i, rc;
2252
2253 VPRINTK("ENTER\n");
2254
2255 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
2256
2257 if (!printed_version++)
2258 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2259
2260 /* acquire resources */
2261 rc = pcim_enable_device(pdev);
2262 if (rc)
2263 return rc;
2264
2265 /* AHCI controllers often implement SFF compatible interface.
2266 * Grab all PCI BARs just in case.
2267 */
2268 rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
2269 if (rc == -EBUSY)
2270 pcim_pin_device(pdev);
2271 if (rc)
2272 return rc;
2273
2274 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
2275 (pdev->device == 0x2652 || pdev->device == 0x2653)) {
2276 u8 map;
2277
2278 /* ICH6s share the same PCI ID for both piix and ahci
2279 * modes. Enabling ahci mode while MAP indicates
2280 * combined mode is a bad idea. Yield to ata_piix.
2281 */
2282 pci_read_config_byte(pdev, ICH_MAP, &map);
2283 if (map & 0x3) {
2284 dev_printk(KERN_INFO, &pdev->dev, "controller is in "
2285 "combined mode, can't enable AHCI mode\n");
2286 return -ENODEV;
2287 }
2288 }
2289
2290 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
2291 if (!hpriv)
2292 return -ENOMEM;
2293 hpriv->flags |= (unsigned long)pi.private_data;
2294
2295 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
2296 pci_intx(pdev, 1);
2297
2298 /* save initial config */
2299 ahci_save_initial_config(pdev, hpriv);
2300
2301 /* prepare host */
2302 if (hpriv->cap & HOST_CAP_NCQ)
2303 pi.flags |= ATA_FLAG_NCQ;
2304
2305 if (hpriv->cap & HOST_CAP_PMP)
2306 pi.flags |= ATA_FLAG_PMP;
2307
2308 /* CAP.NP sometimes indicate the index of the last enabled
2309 * port, at other times, that of the last possible port, so
2310 * determining the maximum port number requires looking at
2311 * both CAP.NP and port_map.
2312 */
2313 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
2314
2315 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2316 if (!host)
2317 return -ENOMEM;
2318 host->iomap = pcim_iomap_table(pdev);
2319 host->private_data = hpriv;
2320
2321 for (i = 0; i < host->n_ports; i++) {
2322 struct ata_port *ap = host->ports[i];
2323 void __iomem *port_mmio = ahci_port_base(ap);
2324
2325 ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
2326 ata_port_pbar_desc(ap, AHCI_PCI_BAR,
2327 0x100 + ap->port_no * 0x80, "port");
2328
2329 /* set initial link pm policy */
2330 ap->pm_policy = NOT_AVAILABLE;
2331
2332 /* standard SATA port setup */
2333 if (hpriv->port_map & (1 << i))
2334 ap->ioaddr.cmd_addr = port_mmio;
2335
2336 /* disabled/not-implemented port */
2337 else
2338 ap->ops = &ata_dummy_port_ops;
2339 }
2340
2341 /* apply workaround for ASUS P5W DH Deluxe mainboard */
2342 ahci_p5wdh_workaround(host);
2343
2344 /* initialize adapter */
2345 rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
2346 if (rc)
2347 return rc;
2348
2349 rc = ahci_reset_controller(host);
2350 if (rc)
2351 return rc;
2352
2353 ahci_init_controller(host);
2354 ahci_print_info(host);
2355
2356 pci_set_master(pdev);
2357 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
2358 &ahci_sht);
2359 }
2360
2361 static int __init ahci_init(void)
2362 {
2363 return pci_register_driver(&ahci_pci_driver);
2364 }
2365
2366 static void __exit ahci_exit(void)
2367 {
2368 pci_unregister_driver(&ahci_pci_driver);
2369 }
2370
2371
2372 MODULE_AUTHOR("Jeff Garzik");
2373 MODULE_DESCRIPTION("AHCI SATA low-level driver");
2374 MODULE_LICENSE("GPL");
2375 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
2376 MODULE_VERSION(DRV_VERSION);
2377
2378 module_init(ahci_init);
2379 module_exit(ahci_exit);
This page took 0.111731 seconds and 5 git commands to generate.