libata/ahci: enclosure management support
[deliverable/linux.git] / drivers / ata / ahci.c
1 /*
2 * ahci.c - AHCI SATA support
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004-2005 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32 *
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/device.h>
44 #include <linux/dmi.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_cmnd.h>
47 #include <linux/libata.h>
48
49 #define DRV_NAME "ahci"
50 #define DRV_VERSION "3.0"
51
52 static int ahci_skip_host_reset;
53 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
54 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
55
56 static int ahci_enable_alpm(struct ata_port *ap,
57 enum link_pm policy);
58 static void ahci_disable_alpm(struct ata_port *ap);
59 static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
60 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
61 size_t size);
62 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
63 ssize_t size);
64 #define MAX_SLOTS 8
65
66 enum {
67 AHCI_PCI_BAR = 5,
68 AHCI_MAX_PORTS = 32,
69 AHCI_MAX_SG = 168, /* hardware max is 64K */
70 AHCI_DMA_BOUNDARY = 0xffffffff,
71 AHCI_MAX_CMDS = 32,
72 AHCI_CMD_SZ = 32,
73 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
74 AHCI_RX_FIS_SZ = 256,
75 AHCI_CMD_TBL_CDB = 0x40,
76 AHCI_CMD_TBL_HDR_SZ = 0x80,
77 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
78 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
79 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
80 AHCI_RX_FIS_SZ,
81 AHCI_IRQ_ON_SG = (1 << 31),
82 AHCI_CMD_ATAPI = (1 << 5),
83 AHCI_CMD_WRITE = (1 << 6),
84 AHCI_CMD_PREFETCH = (1 << 7),
85 AHCI_CMD_RESET = (1 << 8),
86 AHCI_CMD_CLR_BUSY = (1 << 10),
87
88 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
89 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
90 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
91
92 board_ahci = 0,
93 board_ahci_vt8251 = 1,
94 board_ahci_ign_iferr = 2,
95 board_ahci_sb600 = 3,
96 board_ahci_mv = 4,
97 board_ahci_sb700 = 5,
98 board_ahci_mcp65 = 6,
99 board_ahci_nopmp = 7,
100
101 /* global controller registers */
102 HOST_CAP = 0x00, /* host capabilities */
103 HOST_CTL = 0x04, /* global host control */
104 HOST_IRQ_STAT = 0x08, /* interrupt status */
105 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
106 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
107 HOST_EM_LOC = 0x1c, /* Enclosure Management location */
108 HOST_EM_CTL = 0x20, /* Enclosure Management Control */
109
110 /* HOST_CTL bits */
111 HOST_RESET = (1 << 0), /* reset controller; self-clear */
112 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
113 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
114
115 /* HOST_CAP bits */
116 HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
117 HOST_CAP_SSC = (1 << 14), /* Slumber capable */
118 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
119 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
120 HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
121 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
122 HOST_CAP_SNTF = (1 << 29), /* SNotification register */
123 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
124 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
125
126 /* registers for each SATA port */
127 PORT_LST_ADDR = 0x00, /* command list DMA addr */
128 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
129 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
130 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
131 PORT_IRQ_STAT = 0x10, /* interrupt status */
132 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
133 PORT_CMD = 0x18, /* port command */
134 PORT_TFDATA = 0x20, /* taskfile data */
135 PORT_SIG = 0x24, /* device TF signature */
136 PORT_CMD_ISSUE = 0x38, /* command issue */
137 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
138 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
139 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
140 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
141 PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
142
143 /* PORT_IRQ_{STAT,MASK} bits */
144 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
145 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
146 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
147 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
148 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
149 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
150 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
151 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
152
153 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
154 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
155 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
156 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
157 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
158 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
159 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
160 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
161 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
162
163 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
164 PORT_IRQ_IF_ERR |
165 PORT_IRQ_CONNECT |
166 PORT_IRQ_PHYRDY |
167 PORT_IRQ_UNK_FIS |
168 PORT_IRQ_BAD_PMP,
169 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
170 PORT_IRQ_TF_ERR |
171 PORT_IRQ_HBUS_DATA_ERR,
172 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
173 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
174 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
175
176 /* PORT_CMD bits */
177 PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
178 PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
179 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
180 PORT_CMD_PMP = (1 << 17), /* PMP attached */
181 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
182 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
183 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
184 PORT_CMD_CLO = (1 << 3), /* Command list override */
185 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
186 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
187 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
188
189 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
190 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
191 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
192 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
193
194 /* hpriv->flags bits */
195 AHCI_HFLAG_NO_NCQ = (1 << 0),
196 AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
197 AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
198 AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
199 AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
200 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
201 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
202 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
203 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
204 AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
205
206 /* ap->flags bits */
207
208 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
209 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
210 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
211 ATA_FLAG_IPM,
212
213 ICH_MAP = 0x90, /* ICH MAP register */
214
215 /* em_ctl bits */
216 EM_CTL_RST = (1 << 9), /* Reset */
217 EM_CTL_TM = (1 << 8), /* Transmit Message */
218 EM_CTL_ALHD = (1 << 26), /* Activity LED */
219 };
220
221 struct ahci_cmd_hdr {
222 __le32 opts;
223 __le32 status;
224 __le32 tbl_addr;
225 __le32 tbl_addr_hi;
226 __le32 reserved[4];
227 };
228
229 struct ahci_sg {
230 __le32 addr;
231 __le32 addr_hi;
232 __le32 reserved;
233 __le32 flags_size;
234 };
235
236 struct ahci_em_priv {
237 enum sw_activity blink_policy;
238 struct timer_list timer;
239 unsigned long saved_activity;
240 unsigned long activity;
241 unsigned long led_state;
242 };
243
244 struct ahci_host_priv {
245 unsigned int flags; /* AHCI_HFLAG_* */
246 u32 cap; /* cap to use */
247 u32 port_map; /* port map to use */
248 u32 saved_cap; /* saved initial cap */
249 u32 saved_port_map; /* saved initial port_map */
250 u32 em_loc; /* enclosure management location */
251 };
252
253 struct ahci_port_priv {
254 struct ata_link *active_link;
255 struct ahci_cmd_hdr *cmd_slot;
256 dma_addr_t cmd_slot_dma;
257 void *cmd_tbl;
258 dma_addr_t cmd_tbl_dma;
259 void *rx_fis;
260 dma_addr_t rx_fis_dma;
261 /* for NCQ spurious interrupt analysis */
262 unsigned int ncq_saw_d2h:1;
263 unsigned int ncq_saw_dmas:1;
264 unsigned int ncq_saw_sdb:1;
265 u32 intr_mask; /* interrupts to enable */
266 struct ahci_em_priv em_priv[MAX_SLOTS];/* enclosure management info
267 * per PM slot */
268 };
269
270 static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
271 static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
272 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
273 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
274 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
275 static int ahci_port_start(struct ata_port *ap);
276 static void ahci_port_stop(struct ata_port *ap);
277 static void ahci_qc_prep(struct ata_queued_cmd *qc);
278 static void ahci_freeze(struct ata_port *ap);
279 static void ahci_thaw(struct ata_port *ap);
280 static void ahci_pmp_attach(struct ata_port *ap);
281 static void ahci_pmp_detach(struct ata_port *ap);
282 static int ahci_softreset(struct ata_link *link, unsigned int *class,
283 unsigned long deadline);
284 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
285 unsigned long deadline);
286 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
287 unsigned long deadline);
288 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
289 unsigned long deadline);
290 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
291 unsigned long deadline);
292 static void ahci_postreset(struct ata_link *link, unsigned int *class);
293 static void ahci_error_handler(struct ata_port *ap);
294 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
295 static int ahci_port_resume(struct ata_port *ap);
296 static void ahci_dev_config(struct ata_device *dev);
297 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl);
298 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
299 u32 opts);
300 #ifdef CONFIG_PM
301 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
302 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
303 static int ahci_pci_device_resume(struct pci_dev *pdev);
304 #endif
305 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
306 static ssize_t ahci_activity_store(struct ata_device *dev,
307 enum sw_activity val);
308 static void ahci_init_sw_activity(struct ata_link *link);
309
310 static struct device_attribute *ahci_shost_attrs[] = {
311 &dev_attr_link_power_management_policy,
312 &dev_attr_em_message_type,
313 &dev_attr_em_message,
314 NULL
315 };
316
317 static struct device_attribute *ahci_sdev_attrs[] = {
318 &dev_attr_sw_activity,
319 NULL
320 };
321
322 static struct scsi_host_template ahci_sht = {
323 ATA_NCQ_SHT(DRV_NAME),
324 .can_queue = AHCI_MAX_CMDS - 1,
325 .sg_tablesize = AHCI_MAX_SG,
326 .dma_boundary = AHCI_DMA_BOUNDARY,
327 .shost_attrs = ahci_shost_attrs,
328 .sdev_attrs = ahci_sdev_attrs,
329 };
330
331 static struct ata_port_operations ahci_ops = {
332 .inherits = &sata_pmp_port_ops,
333
334 .qc_defer = sata_pmp_qc_defer_cmd_switch,
335 .qc_prep = ahci_qc_prep,
336 .qc_issue = ahci_qc_issue,
337 .qc_fill_rtf = ahci_qc_fill_rtf,
338
339 .freeze = ahci_freeze,
340 .thaw = ahci_thaw,
341 .softreset = ahci_softreset,
342 .hardreset = ahci_hardreset,
343 .postreset = ahci_postreset,
344 .pmp_softreset = ahci_softreset,
345 .error_handler = ahci_error_handler,
346 .post_internal_cmd = ahci_post_internal_cmd,
347 .dev_config = ahci_dev_config,
348
349 .scr_read = ahci_scr_read,
350 .scr_write = ahci_scr_write,
351 .pmp_attach = ahci_pmp_attach,
352 .pmp_detach = ahci_pmp_detach,
353
354 .enable_pm = ahci_enable_alpm,
355 .disable_pm = ahci_disable_alpm,
356 .em_show = ahci_led_show,
357 .em_store = ahci_led_store,
358 .sw_activity_show = ahci_activity_show,
359 .sw_activity_store = ahci_activity_store,
360 #ifdef CONFIG_PM
361 .port_suspend = ahci_port_suspend,
362 .port_resume = ahci_port_resume,
363 #endif
364 .port_start = ahci_port_start,
365 .port_stop = ahci_port_stop,
366 };
367
368 static struct ata_port_operations ahci_vt8251_ops = {
369 .inherits = &ahci_ops,
370 .hardreset = ahci_vt8251_hardreset,
371 };
372
373 static struct ata_port_operations ahci_p5wdh_ops = {
374 .inherits = &ahci_ops,
375 .hardreset = ahci_p5wdh_hardreset,
376 };
377
378 static struct ata_port_operations ahci_sb600_ops = {
379 .inherits = &ahci_ops,
380 .softreset = ahci_sb600_softreset,
381 .pmp_softreset = ahci_sb600_softreset,
382 };
383
384 #define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
385
386 static const struct ata_port_info ahci_port_info[] = {
387 /* board_ahci */
388 {
389 .flags = AHCI_FLAG_COMMON,
390 .pio_mask = 0x1f, /* pio0-4 */
391 .udma_mask = ATA_UDMA6,
392 .port_ops = &ahci_ops,
393 },
394 /* board_ahci_vt8251 */
395 {
396 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
397 .flags = AHCI_FLAG_COMMON,
398 .pio_mask = 0x1f, /* pio0-4 */
399 .udma_mask = ATA_UDMA6,
400 .port_ops = &ahci_vt8251_ops,
401 },
402 /* board_ahci_ign_iferr */
403 {
404 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
405 .flags = AHCI_FLAG_COMMON,
406 .pio_mask = 0x1f, /* pio0-4 */
407 .udma_mask = ATA_UDMA6,
408 .port_ops = &ahci_ops,
409 },
410 /* board_ahci_sb600 */
411 {
412 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
413 AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
414 AHCI_HFLAG_SECT255),
415 .flags = AHCI_FLAG_COMMON,
416 .pio_mask = 0x1f, /* pio0-4 */
417 .udma_mask = ATA_UDMA6,
418 .port_ops = &ahci_sb600_ops,
419 },
420 /* board_ahci_mv */
421 {
422 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
423 AHCI_HFLAG_MV_PATA),
424 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
425 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
426 .pio_mask = 0x1f, /* pio0-4 */
427 .udma_mask = ATA_UDMA6,
428 .port_ops = &ahci_ops,
429 },
430 /* board_ahci_sb700 */
431 {
432 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
433 .flags = AHCI_FLAG_COMMON,
434 .pio_mask = 0x1f, /* pio0-4 */
435 .udma_mask = ATA_UDMA6,
436 .port_ops = &ahci_sb600_ops,
437 },
438 /* board_ahci_mcp65 */
439 {
440 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
441 .flags = AHCI_FLAG_COMMON,
442 .pio_mask = 0x1f, /* pio0-4 */
443 .udma_mask = ATA_UDMA6,
444 .port_ops = &ahci_ops,
445 },
446 /* board_ahci_nopmp */
447 {
448 AHCI_HFLAGS (AHCI_HFLAG_NO_PMP),
449 .flags = AHCI_FLAG_COMMON,
450 .pio_mask = 0x1f, /* pio0-4 */
451 .udma_mask = ATA_UDMA6,
452 .port_ops = &ahci_ops,
453 },
454 };
455
456 static const struct pci_device_id ahci_pci_tbl[] = {
457 /* Intel */
458 { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
459 { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
460 { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
461 { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
462 { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
463 { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
464 { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
465 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
466 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
467 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
468 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
469 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
470 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
471 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
472 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
473 { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
474 { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
475 { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
476 { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
477 { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
478 { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
479 { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
480 { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
481 { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
482 { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
483 { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
484 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
485 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
486 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
487 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
488 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
489
490 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
491 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
492 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
493
494 /* ATI */
495 { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
496 { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
497 { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
498 { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
499 { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
500 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
501 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
502
503 /* VIA */
504 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
505 { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
506
507 /* NVIDIA */
508 { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */
509 { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */
510 { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */
511 { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */
512 { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */
513 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
514 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
515 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
516 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci }, /* MCP67 */
517 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci }, /* MCP67 */
518 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci }, /* MCP67 */
519 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci }, /* MCP67 */
520 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci }, /* MCP67 */
521 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci }, /* MCP67 */
522 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci }, /* MCP67 */
523 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci }, /* MCP67 */
524 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci }, /* MCP67 */
525 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci }, /* MCP67 */
526 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci }, /* MCP67 */
527 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci }, /* MCP67 */
528 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci }, /* MCP73 */
529 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci }, /* MCP73 */
530 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci }, /* MCP73 */
531 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci }, /* MCP73 */
532 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci }, /* MCP73 */
533 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci }, /* MCP73 */
534 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci }, /* MCP73 */
535 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci }, /* MCP73 */
536 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci }, /* MCP73 */
537 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci }, /* MCP73 */
538 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci }, /* MCP73 */
539 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci }, /* MCP73 */
540 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
541 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
542 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
543 { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */
544 { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */
545 { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */
546 { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */
547 { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */
548 { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */
549 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
550 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
551 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
552 { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
553 { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
554 { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
555 { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
556 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
557 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
558 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
559 { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
560 { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
561 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
562 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
563 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
564 { PCI_VDEVICE(NVIDIA, 0x0bc8), board_ahci }, /* MCP7B */
565 { PCI_VDEVICE(NVIDIA, 0x0bc9), board_ahci }, /* MCP7B */
566 { PCI_VDEVICE(NVIDIA, 0x0bca), board_ahci }, /* MCP7B */
567 { PCI_VDEVICE(NVIDIA, 0x0bcb), board_ahci }, /* MCP7B */
568 { PCI_VDEVICE(NVIDIA, 0x0bcc), board_ahci }, /* MCP7B */
569 { PCI_VDEVICE(NVIDIA, 0x0bcd), board_ahci }, /* MCP7B */
570 { PCI_VDEVICE(NVIDIA, 0x0bce), board_ahci }, /* MCP7B */
571 { PCI_VDEVICE(NVIDIA, 0x0bcf), board_ahci }, /* MCP7B */
572 { PCI_VDEVICE(NVIDIA, 0x0bc4), board_ahci }, /* MCP7B */
573 { PCI_VDEVICE(NVIDIA, 0x0bc5), board_ahci }, /* MCP7B */
574 { PCI_VDEVICE(NVIDIA, 0x0bc6), board_ahci }, /* MCP7B */
575 { PCI_VDEVICE(NVIDIA, 0x0bc7), board_ahci }, /* MCP7B */
576
577 /* SiS */
578 { PCI_VDEVICE(SI, 0x1184), board_ahci_nopmp }, /* SiS 966 */
579 { PCI_VDEVICE(SI, 0x1185), board_ahci_nopmp }, /* SiS 968 */
580 { PCI_VDEVICE(SI, 0x0186), board_ahci_nopmp }, /* SiS 968 */
581
582 /* Marvell */
583 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
584 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
585
586 /* Generic, PCI class code for AHCI */
587 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
588 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
589
590 { } /* terminate list */
591 };
592
593
594 static struct pci_driver ahci_pci_driver = {
595 .name = DRV_NAME,
596 .id_table = ahci_pci_tbl,
597 .probe = ahci_init_one,
598 .remove = ata_pci_remove_one,
599 #ifdef CONFIG_PM
600 .suspend = ahci_pci_device_suspend,
601 .resume = ahci_pci_device_resume,
602 #endif
603 };
604
605 static int ahci_em_messages = 1;
606 module_param(ahci_em_messages, int, 0444);
607 /* add other LED protocol types when they become supported */
608 MODULE_PARM_DESC(ahci_em_messages,
609 "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
610
611 static inline int ahci_nr_ports(u32 cap)
612 {
613 return (cap & 0x1f) + 1;
614 }
615
616 static inline void __iomem *__ahci_port_base(struct ata_host *host,
617 unsigned int port_no)
618 {
619 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
620
621 return mmio + 0x100 + (port_no * 0x80);
622 }
623
624 static inline void __iomem *ahci_port_base(struct ata_port *ap)
625 {
626 return __ahci_port_base(ap->host, ap->port_no);
627 }
628
629 static void ahci_enable_ahci(void __iomem *mmio)
630 {
631 int i;
632 u32 tmp;
633
634 /* turn on AHCI_EN */
635 tmp = readl(mmio + HOST_CTL);
636 if (tmp & HOST_AHCI_EN)
637 return;
638
639 /* Some controllers need AHCI_EN to be written multiple times.
640 * Try a few times before giving up.
641 */
642 for (i = 0; i < 5; i++) {
643 tmp |= HOST_AHCI_EN;
644 writel(tmp, mmio + HOST_CTL);
645 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
646 if (tmp & HOST_AHCI_EN)
647 return;
648 msleep(10);
649 }
650
651 WARN_ON(1);
652 }
653
654 /**
655 * ahci_save_initial_config - Save and fixup initial config values
656 * @pdev: target PCI device
657 * @hpriv: host private area to store config values
658 *
659 * Some registers containing configuration info might be setup by
660 * BIOS and might be cleared on reset. This function saves the
661 * initial values of those registers into @hpriv such that they
662 * can be restored after controller reset.
663 *
664 * If inconsistent, config values are fixed up by this function.
665 *
666 * LOCKING:
667 * None.
668 */
669 static void ahci_save_initial_config(struct pci_dev *pdev,
670 struct ahci_host_priv *hpriv)
671 {
672 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
673 u32 cap, port_map;
674 int i;
675 int mv;
676
677 /* make sure AHCI mode is enabled before accessing CAP */
678 ahci_enable_ahci(mmio);
679
680 /* Values prefixed with saved_ are written back to host after
681 * reset. Values without are used for driver operation.
682 */
683 hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
684 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
685
686 /* some chips have errata preventing 64bit use */
687 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
688 dev_printk(KERN_INFO, &pdev->dev,
689 "controller can't do 64bit DMA, forcing 32bit\n");
690 cap &= ~HOST_CAP_64;
691 }
692
693 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
694 dev_printk(KERN_INFO, &pdev->dev,
695 "controller can't do NCQ, turning off CAP_NCQ\n");
696 cap &= ~HOST_CAP_NCQ;
697 }
698
699 if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
700 dev_printk(KERN_INFO, &pdev->dev,
701 "controller can do NCQ, turning on CAP_NCQ\n");
702 cap |= HOST_CAP_NCQ;
703 }
704
705 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
706 dev_printk(KERN_INFO, &pdev->dev,
707 "controller can't do PMP, turning off CAP_PMP\n");
708 cap &= ~HOST_CAP_PMP;
709 }
710
711 if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
712 port_map != 1) {
713 dev_printk(KERN_INFO, &pdev->dev,
714 "JMB361 has only one port, port_map 0x%x -> 0x%x\n",
715 port_map, 1);
716 port_map = 1;
717 }
718
719 /*
720 * Temporary Marvell 6145 hack: PATA port presence
721 * is asserted through the standard AHCI port
722 * presence register, as bit 4 (counting from 0)
723 */
724 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
725 if (pdev->device == 0x6121)
726 mv = 0x3;
727 else
728 mv = 0xf;
729 dev_printk(KERN_ERR, &pdev->dev,
730 "MV_AHCI HACK: port_map %x -> %x\n",
731 port_map,
732 port_map & mv);
733
734 port_map &= mv;
735 }
736
737 /* cross check port_map and cap.n_ports */
738 if (port_map) {
739 int map_ports = 0;
740
741 for (i = 0; i < AHCI_MAX_PORTS; i++)
742 if (port_map & (1 << i))
743 map_ports++;
744
745 /* If PI has more ports than n_ports, whine, clear
746 * port_map and let it be generated from n_ports.
747 */
748 if (map_ports > ahci_nr_ports(cap)) {
749 dev_printk(KERN_WARNING, &pdev->dev,
750 "implemented port map (0x%x) contains more "
751 "ports than nr_ports (%u), using nr_ports\n",
752 port_map, ahci_nr_ports(cap));
753 port_map = 0;
754 }
755 }
756
757 /* fabricate port_map from cap.nr_ports */
758 if (!port_map) {
759 port_map = (1 << ahci_nr_ports(cap)) - 1;
760 dev_printk(KERN_WARNING, &pdev->dev,
761 "forcing PORTS_IMPL to 0x%x\n", port_map);
762
763 /* write the fixed up value to the PI register */
764 hpriv->saved_port_map = port_map;
765 }
766
767 /* record values to use during operation */
768 hpriv->cap = cap;
769 hpriv->port_map = port_map;
770 }
771
772 /**
773 * ahci_restore_initial_config - Restore initial config
774 * @host: target ATA host
775 *
776 * Restore initial config stored by ahci_save_initial_config().
777 *
778 * LOCKING:
779 * None.
780 */
781 static void ahci_restore_initial_config(struct ata_host *host)
782 {
783 struct ahci_host_priv *hpriv = host->private_data;
784 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
785
786 writel(hpriv->saved_cap, mmio + HOST_CAP);
787 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
788 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
789 }
790
791 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
792 {
793 static const int offset[] = {
794 [SCR_STATUS] = PORT_SCR_STAT,
795 [SCR_CONTROL] = PORT_SCR_CTL,
796 [SCR_ERROR] = PORT_SCR_ERR,
797 [SCR_ACTIVE] = PORT_SCR_ACT,
798 [SCR_NOTIFICATION] = PORT_SCR_NTF,
799 };
800 struct ahci_host_priv *hpriv = ap->host->private_data;
801
802 if (sc_reg < ARRAY_SIZE(offset) &&
803 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
804 return offset[sc_reg];
805 return 0;
806 }
807
808 static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
809 {
810 void __iomem *port_mmio = ahci_port_base(ap);
811 int offset = ahci_scr_offset(ap, sc_reg);
812
813 if (offset) {
814 *val = readl(port_mmio + offset);
815 return 0;
816 }
817 return -EINVAL;
818 }
819
820 static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
821 {
822 void __iomem *port_mmio = ahci_port_base(ap);
823 int offset = ahci_scr_offset(ap, sc_reg);
824
825 if (offset) {
826 writel(val, port_mmio + offset);
827 return 0;
828 }
829 return -EINVAL;
830 }
831
832 static void ahci_start_engine(struct ata_port *ap)
833 {
834 void __iomem *port_mmio = ahci_port_base(ap);
835 u32 tmp;
836
837 /* start DMA */
838 tmp = readl(port_mmio + PORT_CMD);
839 tmp |= PORT_CMD_START;
840 writel(tmp, port_mmio + PORT_CMD);
841 readl(port_mmio + PORT_CMD); /* flush */
842 }
843
844 static int ahci_stop_engine(struct ata_port *ap)
845 {
846 void __iomem *port_mmio = ahci_port_base(ap);
847 u32 tmp;
848
849 tmp = readl(port_mmio + PORT_CMD);
850
851 /* check if the HBA is idle */
852 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
853 return 0;
854
855 /* setting HBA to idle */
856 tmp &= ~PORT_CMD_START;
857 writel(tmp, port_mmio + PORT_CMD);
858
859 /* wait for engine to stop. This could be as long as 500 msec */
860 tmp = ata_wait_register(port_mmio + PORT_CMD,
861 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
862 if (tmp & PORT_CMD_LIST_ON)
863 return -EIO;
864
865 return 0;
866 }
867
868 static void ahci_start_fis_rx(struct ata_port *ap)
869 {
870 void __iomem *port_mmio = ahci_port_base(ap);
871 struct ahci_host_priv *hpriv = ap->host->private_data;
872 struct ahci_port_priv *pp = ap->private_data;
873 u32 tmp;
874
875 /* set FIS registers */
876 if (hpriv->cap & HOST_CAP_64)
877 writel((pp->cmd_slot_dma >> 16) >> 16,
878 port_mmio + PORT_LST_ADDR_HI);
879 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
880
881 if (hpriv->cap & HOST_CAP_64)
882 writel((pp->rx_fis_dma >> 16) >> 16,
883 port_mmio + PORT_FIS_ADDR_HI);
884 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
885
886 /* enable FIS reception */
887 tmp = readl(port_mmio + PORT_CMD);
888 tmp |= PORT_CMD_FIS_RX;
889 writel(tmp, port_mmio + PORT_CMD);
890
891 /* flush */
892 readl(port_mmio + PORT_CMD);
893 }
894
895 static int ahci_stop_fis_rx(struct ata_port *ap)
896 {
897 void __iomem *port_mmio = ahci_port_base(ap);
898 u32 tmp;
899
900 /* disable FIS reception */
901 tmp = readl(port_mmio + PORT_CMD);
902 tmp &= ~PORT_CMD_FIS_RX;
903 writel(tmp, port_mmio + PORT_CMD);
904
905 /* wait for completion, spec says 500ms, give it 1000 */
906 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
907 PORT_CMD_FIS_ON, 10, 1000);
908 if (tmp & PORT_CMD_FIS_ON)
909 return -EBUSY;
910
911 return 0;
912 }
913
914 static void ahci_power_up(struct ata_port *ap)
915 {
916 struct ahci_host_priv *hpriv = ap->host->private_data;
917 void __iomem *port_mmio = ahci_port_base(ap);
918 u32 cmd;
919
920 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
921
922 /* spin up device */
923 if (hpriv->cap & HOST_CAP_SSS) {
924 cmd |= PORT_CMD_SPIN_UP;
925 writel(cmd, port_mmio + PORT_CMD);
926 }
927
928 /* wake up link */
929 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
930 }
931
932 static void ahci_disable_alpm(struct ata_port *ap)
933 {
934 struct ahci_host_priv *hpriv = ap->host->private_data;
935 void __iomem *port_mmio = ahci_port_base(ap);
936 u32 cmd;
937 struct ahci_port_priv *pp = ap->private_data;
938
939 /* IPM bits should be disabled by libata-core */
940 /* get the existing command bits */
941 cmd = readl(port_mmio + PORT_CMD);
942
943 /* disable ALPM and ASP */
944 cmd &= ~PORT_CMD_ASP;
945 cmd &= ~PORT_CMD_ALPE;
946
947 /* force the interface back to active */
948 cmd |= PORT_CMD_ICC_ACTIVE;
949
950 /* write out new cmd value */
951 writel(cmd, port_mmio + PORT_CMD);
952 cmd = readl(port_mmio + PORT_CMD);
953
954 /* wait 10ms to be sure we've come out of any low power state */
955 msleep(10);
956
957 /* clear out any PhyRdy stuff from interrupt status */
958 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
959
960 /* go ahead and clean out PhyRdy Change from Serror too */
961 ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18)));
962
963 /*
964 * Clear flag to indicate that we should ignore all PhyRdy
965 * state changes
966 */
967 hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
968
969 /*
970 * Enable interrupts on Phy Ready.
971 */
972 pp->intr_mask |= PORT_IRQ_PHYRDY;
973 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
974
975 /*
976 * don't change the link pm policy - we can be called
977 * just to turn of link pm temporarily
978 */
979 }
980
981 static int ahci_enable_alpm(struct ata_port *ap,
982 enum link_pm policy)
983 {
984 struct ahci_host_priv *hpriv = ap->host->private_data;
985 void __iomem *port_mmio = ahci_port_base(ap);
986 u32 cmd;
987 struct ahci_port_priv *pp = ap->private_data;
988 u32 asp;
989
990 /* Make sure the host is capable of link power management */
991 if (!(hpriv->cap & HOST_CAP_ALPM))
992 return -EINVAL;
993
994 switch (policy) {
995 case MAX_PERFORMANCE:
996 case NOT_AVAILABLE:
997 /*
998 * if we came here with NOT_AVAILABLE,
999 * it just means this is the first time we
1000 * have tried to enable - default to max performance,
1001 * and let the user go to lower power modes on request.
1002 */
1003 ahci_disable_alpm(ap);
1004 return 0;
1005 case MIN_POWER:
1006 /* configure HBA to enter SLUMBER */
1007 asp = PORT_CMD_ASP;
1008 break;
1009 case MEDIUM_POWER:
1010 /* configure HBA to enter PARTIAL */
1011 asp = 0;
1012 break;
1013 default:
1014 return -EINVAL;
1015 }
1016
1017 /*
1018 * Disable interrupts on Phy Ready. This keeps us from
1019 * getting woken up due to spurious phy ready interrupts
1020 * TBD - Hot plug should be done via polling now, is
1021 * that even supported?
1022 */
1023 pp->intr_mask &= ~PORT_IRQ_PHYRDY;
1024 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1025
1026 /*
1027 * Set a flag to indicate that we should ignore all PhyRdy
1028 * state changes since these can happen now whenever we
1029 * change link state
1030 */
1031 hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
1032
1033 /* get the existing command bits */
1034 cmd = readl(port_mmio + PORT_CMD);
1035
1036 /*
1037 * Set ASP based on Policy
1038 */
1039 cmd |= asp;
1040
1041 /*
1042 * Setting this bit will instruct the HBA to aggressively
1043 * enter a lower power link state when it's appropriate and
1044 * based on the value set above for ASP
1045 */
1046 cmd |= PORT_CMD_ALPE;
1047
1048 /* write out new cmd value */
1049 writel(cmd, port_mmio + PORT_CMD);
1050 cmd = readl(port_mmio + PORT_CMD);
1051
1052 /* IPM bits should be set by libata-core */
1053 return 0;
1054 }
1055
1056 #ifdef CONFIG_PM
1057 static void ahci_power_down(struct ata_port *ap)
1058 {
1059 struct ahci_host_priv *hpriv = ap->host->private_data;
1060 void __iomem *port_mmio = ahci_port_base(ap);
1061 u32 cmd, scontrol;
1062
1063 if (!(hpriv->cap & HOST_CAP_SSS))
1064 return;
1065
1066 /* put device into listen mode, first set PxSCTL.DET to 0 */
1067 scontrol = readl(port_mmio + PORT_SCR_CTL);
1068 scontrol &= ~0xf;
1069 writel(scontrol, port_mmio + PORT_SCR_CTL);
1070
1071 /* then set PxCMD.SUD to 0 */
1072 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1073 cmd &= ~PORT_CMD_SPIN_UP;
1074 writel(cmd, port_mmio + PORT_CMD);
1075 }
1076 #endif
1077
1078 static void ahci_start_port(struct ata_port *ap)
1079 {
1080 struct ahci_port_priv *pp = ap->private_data;
1081 struct ata_link *link;
1082 struct ahci_em_priv *emp;
1083
1084 /* enable FIS reception */
1085 ahci_start_fis_rx(ap);
1086
1087 /* enable DMA */
1088 ahci_start_engine(ap);
1089
1090 /* turn on LEDs */
1091 if (ap->flags & ATA_FLAG_EM) {
1092 ata_port_for_each_link(link, ap) {
1093 emp = &pp->em_priv[link->pmp];
1094 ahci_transmit_led_message(ap, emp->led_state, 4);
1095 }
1096 }
1097
1098 if (ap->flags & ATA_FLAG_SW_ACTIVITY)
1099 ata_port_for_each_link(link, ap)
1100 ahci_init_sw_activity(link);
1101
1102 }
1103
1104 static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
1105 {
1106 int rc;
1107
1108 /* disable DMA */
1109 rc = ahci_stop_engine(ap);
1110 if (rc) {
1111 *emsg = "failed to stop engine";
1112 return rc;
1113 }
1114
1115 /* disable FIS reception */
1116 rc = ahci_stop_fis_rx(ap);
1117 if (rc) {
1118 *emsg = "failed stop FIS RX";
1119 return rc;
1120 }
1121
1122 return 0;
1123 }
1124
1125 static int ahci_reset_controller(struct ata_host *host)
1126 {
1127 struct pci_dev *pdev = to_pci_dev(host->dev);
1128 struct ahci_host_priv *hpriv = host->private_data;
1129 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1130 u32 tmp;
1131
1132 /* we must be in AHCI mode, before using anything
1133 * AHCI-specific, such as HOST_RESET.
1134 */
1135 ahci_enable_ahci(mmio);
1136
1137 /* global controller reset */
1138 if (!ahci_skip_host_reset) {
1139 tmp = readl(mmio + HOST_CTL);
1140 if ((tmp & HOST_RESET) == 0) {
1141 writel(tmp | HOST_RESET, mmio + HOST_CTL);
1142 readl(mmio + HOST_CTL); /* flush */
1143 }
1144
1145 /* reset must complete within 1 second, or
1146 * the hardware should be considered fried.
1147 */
1148 ssleep(1);
1149
1150 tmp = readl(mmio + HOST_CTL);
1151 if (tmp & HOST_RESET) {
1152 dev_printk(KERN_ERR, host->dev,
1153 "controller reset failed (0x%x)\n", tmp);
1154 return -EIO;
1155 }
1156
1157 /* turn on AHCI mode */
1158 ahci_enable_ahci(mmio);
1159
1160 /* Some registers might be cleared on reset. Restore
1161 * initial values.
1162 */
1163 ahci_restore_initial_config(host);
1164 } else
1165 dev_printk(KERN_INFO, host->dev,
1166 "skipping global host reset\n");
1167
1168 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
1169 u16 tmp16;
1170
1171 /* configure PCS */
1172 pci_read_config_word(pdev, 0x92, &tmp16);
1173 if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
1174 tmp16 |= hpriv->port_map;
1175 pci_write_config_word(pdev, 0x92, tmp16);
1176 }
1177 }
1178
1179 return 0;
1180 }
1181
1182 static void ahci_sw_activity(struct ata_link *link)
1183 {
1184 struct ata_port *ap = link->ap;
1185 struct ahci_port_priv *pp = ap->private_data;
1186 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1187
1188 if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
1189 return;
1190
1191 emp->activity++;
1192 if (!timer_pending(&emp->timer))
1193 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
1194 }
1195
1196 static void ahci_sw_activity_blink(unsigned long arg)
1197 {
1198 struct ata_link *link = (struct ata_link *)arg;
1199 struct ata_port *ap = link->ap;
1200 struct ahci_port_priv *pp = ap->private_data;
1201 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1202 unsigned long led_message = emp->led_state;
1203 u32 activity_led_state;
1204
1205 led_message &= 0xffff0000;
1206 led_message |= ap->port_no | (link->pmp << 8);
1207
1208 /* check to see if we've had activity. If so,
1209 * toggle state of LED and reset timer. If not,
1210 * turn LED to desired idle state.
1211 */
1212 if (emp->saved_activity != emp->activity) {
1213 emp->saved_activity = emp->activity;
1214 /* get the current LED state */
1215 activity_led_state = led_message & 0x00010000;
1216
1217 if (activity_led_state)
1218 activity_led_state = 0;
1219 else
1220 activity_led_state = 1;
1221
1222 /* clear old state */
1223 led_message &= 0xfff8ffff;
1224
1225 /* toggle state */
1226 led_message |= (activity_led_state << 16);
1227 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
1228 } else {
1229 /* switch to idle */
1230 led_message &= 0xfff8ffff;
1231 if (emp->blink_policy == BLINK_OFF)
1232 led_message |= (1 << 16);
1233 }
1234 ahci_transmit_led_message(ap, led_message, 4);
1235 }
1236
1237 static void ahci_init_sw_activity(struct ata_link *link)
1238 {
1239 struct ata_port *ap = link->ap;
1240 struct ahci_port_priv *pp = ap->private_data;
1241 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1242
1243 /* init activity stats, setup timer */
1244 emp->saved_activity = emp->activity = 0;
1245 setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
1246
1247 /* check our blink policy and set flag for link if it's enabled */
1248 if (emp->blink_policy)
1249 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1250 }
1251
1252 static int ahci_reset_em(struct ata_host *host)
1253 {
1254 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1255 u32 em_ctl;
1256
1257 em_ctl = readl(mmio + HOST_EM_CTL);
1258 if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
1259 return -EINVAL;
1260
1261 writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
1262 return 0;
1263 }
1264
1265 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1266 ssize_t size)
1267 {
1268 struct ahci_host_priv *hpriv = ap->host->private_data;
1269 struct ahci_port_priv *pp = ap->private_data;
1270 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1271 u32 em_ctl;
1272 u32 message[] = {0, 0};
1273 unsigned int flags;
1274 int pmp;
1275 struct ahci_em_priv *emp;
1276
1277 /* get the slot number from the message */
1278 pmp = (state & 0x0000ff00) >> 8;
1279 if (pmp < MAX_SLOTS)
1280 emp = &pp->em_priv[pmp];
1281 else
1282 return -EINVAL;
1283
1284 spin_lock_irqsave(ap->lock, flags);
1285
1286 /*
1287 * if we are still busy transmitting a previous message,
1288 * do not allow
1289 */
1290 em_ctl = readl(mmio + HOST_EM_CTL);
1291 if (em_ctl & EM_CTL_TM) {
1292 spin_unlock_irqrestore(ap->lock, flags);
1293 return -EINVAL;
1294 }
1295
1296 /*
1297 * create message header - this is all zero except for
1298 * the message size, which is 4 bytes.
1299 */
1300 message[0] |= (4 << 8);
1301
1302 /* ignore 0:4 of byte zero, fill in port info yourself */
1303 message[1] = ((state & 0xfffffff0) | ap->port_no);
1304
1305 /* write message to EM_LOC */
1306 writel(message[0], mmio + hpriv->em_loc);
1307 writel(message[1], mmio + hpriv->em_loc+4);
1308
1309 /* save off new led state for port/slot */
1310 emp->led_state = message[1];
1311
1312 /*
1313 * tell hardware to transmit the message
1314 */
1315 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1316
1317 spin_unlock_irqrestore(ap->lock, flags);
1318 return size;
1319 }
1320
1321 static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1322 {
1323 struct ahci_port_priv *pp = ap->private_data;
1324 struct ata_link *link;
1325 struct ahci_em_priv *emp;
1326 int rc = 0;
1327
1328 ata_port_for_each_link(link, ap) {
1329 emp = &pp->em_priv[link->pmp];
1330 rc += sprintf(buf, "%lx\n", emp->led_state);
1331 }
1332 return rc;
1333 }
1334
1335 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1336 size_t size)
1337 {
1338 int state;
1339 int pmp;
1340 struct ahci_port_priv *pp = ap->private_data;
1341 struct ahci_em_priv *emp;
1342
1343 state = simple_strtoul(buf, NULL, 0);
1344
1345 /* get the slot number from the message */
1346 pmp = (state & 0x0000ff00) >> 8;
1347 if (pmp < MAX_SLOTS)
1348 emp = &pp->em_priv[pmp];
1349 else
1350 return -EINVAL;
1351
1352 /* mask off the activity bits if we are in sw_activity
1353 * mode, user should turn off sw_activity before setting
1354 * activity led through em_message
1355 */
1356 if (emp->blink_policy)
1357 state &= 0xfff8ffff;
1358
1359 return ahci_transmit_led_message(ap, state, size);
1360 }
1361
1362 static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1363 {
1364 struct ata_link *link = dev->link;
1365 struct ata_port *ap = link->ap;
1366 struct ahci_port_priv *pp = ap->private_data;
1367 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1368 u32 port_led_state = emp->led_state;
1369
1370 /* save the desired Activity LED behavior */
1371 if (val == OFF) {
1372 /* clear LFLAG */
1373 link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1374
1375 /* set the LED to OFF */
1376 port_led_state &= 0xfff80000;
1377 port_led_state |= (ap->port_no | (link->pmp << 8));
1378 ahci_transmit_led_message(ap, port_led_state, 4);
1379 } else {
1380 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1381 if (val == BLINK_OFF) {
1382 /* set LED to ON for idle */
1383 port_led_state &= 0xfff80000;
1384 port_led_state |= (ap->port_no | (link->pmp << 8));
1385 port_led_state |= 0x00010000; /* check this */
1386 ahci_transmit_led_message(ap, port_led_state, 4);
1387 }
1388 }
1389 emp->blink_policy = val;
1390 return 0;
1391 }
1392
1393 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1394 {
1395 struct ata_link *link = dev->link;
1396 struct ata_port *ap = link->ap;
1397 struct ahci_port_priv *pp = ap->private_data;
1398 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1399
1400 /* display the saved value of activity behavior for this
1401 * disk.
1402 */
1403 return sprintf(buf, "%d\n", emp->blink_policy);
1404 }
1405
1406 static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
1407 int port_no, void __iomem *mmio,
1408 void __iomem *port_mmio)
1409 {
1410 const char *emsg = NULL;
1411 int rc;
1412 u32 tmp;
1413
1414 /* make sure port is not active */
1415 rc = ahci_deinit_port(ap, &emsg);
1416 if (rc)
1417 dev_printk(KERN_WARNING, &pdev->dev,
1418 "%s (%d)\n", emsg, rc);
1419
1420 /* clear SError */
1421 tmp = readl(port_mmio + PORT_SCR_ERR);
1422 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1423 writel(tmp, port_mmio + PORT_SCR_ERR);
1424
1425 /* clear port IRQ */
1426 tmp = readl(port_mmio + PORT_IRQ_STAT);
1427 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1428 if (tmp)
1429 writel(tmp, port_mmio + PORT_IRQ_STAT);
1430
1431 writel(1 << port_no, mmio + HOST_IRQ_STAT);
1432 }
1433
1434 static void ahci_init_controller(struct ata_host *host)
1435 {
1436 struct ahci_host_priv *hpriv = host->private_data;
1437 struct pci_dev *pdev = to_pci_dev(host->dev);
1438 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1439 int i;
1440 void __iomem *port_mmio;
1441 u32 tmp;
1442 int mv;
1443
1444 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
1445 if (pdev->device == 0x6121)
1446 mv = 2;
1447 else
1448 mv = 4;
1449 port_mmio = __ahci_port_base(host, mv);
1450
1451 writel(0, port_mmio + PORT_IRQ_MASK);
1452
1453 /* clear port IRQ */
1454 tmp = readl(port_mmio + PORT_IRQ_STAT);
1455 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1456 if (tmp)
1457 writel(tmp, port_mmio + PORT_IRQ_STAT);
1458 }
1459
1460 for (i = 0; i < host->n_ports; i++) {
1461 struct ata_port *ap = host->ports[i];
1462
1463 port_mmio = ahci_port_base(ap);
1464 if (ata_port_is_dummy(ap))
1465 continue;
1466
1467 ahci_port_init(pdev, ap, i, mmio, port_mmio);
1468 }
1469
1470 tmp = readl(mmio + HOST_CTL);
1471 VPRINTK("HOST_CTL 0x%x\n", tmp);
1472 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1473 tmp = readl(mmio + HOST_CTL);
1474 VPRINTK("HOST_CTL 0x%x\n", tmp);
1475 }
1476
1477 static void ahci_dev_config(struct ata_device *dev)
1478 {
1479 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1480
1481 if (hpriv->flags & AHCI_HFLAG_SECT255) {
1482 dev->max_sectors = 255;
1483 ata_dev_printk(dev, KERN_INFO,
1484 "SB600 AHCI: limiting to 255 sectors per cmd\n");
1485 }
1486 }
1487
1488 static unsigned int ahci_dev_classify(struct ata_port *ap)
1489 {
1490 void __iomem *port_mmio = ahci_port_base(ap);
1491 struct ata_taskfile tf;
1492 u32 tmp;
1493
1494 tmp = readl(port_mmio + PORT_SIG);
1495 tf.lbah = (tmp >> 24) & 0xff;
1496 tf.lbam = (tmp >> 16) & 0xff;
1497 tf.lbal = (tmp >> 8) & 0xff;
1498 tf.nsect = (tmp) & 0xff;
1499
1500 return ata_dev_classify(&tf);
1501 }
1502
1503 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1504 u32 opts)
1505 {
1506 dma_addr_t cmd_tbl_dma;
1507
1508 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1509
1510 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1511 pp->cmd_slot[tag].status = 0;
1512 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1513 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1514 }
1515
1516 static int ahci_kick_engine(struct ata_port *ap, int force_restart)
1517 {
1518 void __iomem *port_mmio = ahci_port_base(ap);
1519 struct ahci_host_priv *hpriv = ap->host->private_data;
1520 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1521 u32 tmp;
1522 int busy, rc;
1523
1524 /* do we need to kick the port? */
1525 busy = status & (ATA_BUSY | ATA_DRQ);
1526 if (!busy && !force_restart)
1527 return 0;
1528
1529 /* stop engine */
1530 rc = ahci_stop_engine(ap);
1531 if (rc)
1532 goto out_restart;
1533
1534 /* need to do CLO? */
1535 if (!busy) {
1536 rc = 0;
1537 goto out_restart;
1538 }
1539
1540 if (!(hpriv->cap & HOST_CAP_CLO)) {
1541 rc = -EOPNOTSUPP;
1542 goto out_restart;
1543 }
1544
1545 /* perform CLO */
1546 tmp = readl(port_mmio + PORT_CMD);
1547 tmp |= PORT_CMD_CLO;
1548 writel(tmp, port_mmio + PORT_CMD);
1549
1550 rc = 0;
1551 tmp = ata_wait_register(port_mmio + PORT_CMD,
1552 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1553 if (tmp & PORT_CMD_CLO)
1554 rc = -EIO;
1555
1556 /* restart engine */
1557 out_restart:
1558 ahci_start_engine(ap);
1559 return rc;
1560 }
1561
1562 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1563 struct ata_taskfile *tf, int is_cmd, u16 flags,
1564 unsigned long timeout_msec)
1565 {
1566 const u32 cmd_fis_len = 5; /* five dwords */
1567 struct ahci_port_priv *pp = ap->private_data;
1568 void __iomem *port_mmio = ahci_port_base(ap);
1569 u8 *fis = pp->cmd_tbl;
1570 u32 tmp;
1571
1572 /* prep the command */
1573 ata_tf_to_fis(tf, pmp, is_cmd, fis);
1574 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1575
1576 /* issue & wait */
1577 writel(1, port_mmio + PORT_CMD_ISSUE);
1578
1579 if (timeout_msec) {
1580 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1581 1, timeout_msec);
1582 if (tmp & 0x1) {
1583 ahci_kick_engine(ap, 1);
1584 return -EBUSY;
1585 }
1586 } else
1587 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1588
1589 return 0;
1590 }
1591
1592 static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1593 int pmp, unsigned long deadline,
1594 int (*check_ready)(struct ata_link *link))
1595 {
1596 struct ata_port *ap = link->ap;
1597 const char *reason = NULL;
1598 unsigned long now, msecs;
1599 struct ata_taskfile tf;
1600 int rc;
1601
1602 DPRINTK("ENTER\n");
1603
1604 /* prepare for SRST (AHCI-1.1 10.4.1) */
1605 rc = ahci_kick_engine(ap, 1);
1606 if (rc && rc != -EOPNOTSUPP)
1607 ata_link_printk(link, KERN_WARNING,
1608 "failed to reset engine (errno=%d)\n", rc);
1609
1610 ata_tf_init(link->device, &tf);
1611
1612 /* issue the first D2H Register FIS */
1613 msecs = 0;
1614 now = jiffies;
1615 if (time_after(now, deadline))
1616 msecs = jiffies_to_msecs(deadline - now);
1617
1618 tf.ctl |= ATA_SRST;
1619 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1620 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1621 rc = -EIO;
1622 reason = "1st FIS failed";
1623 goto fail;
1624 }
1625
1626 /* spec says at least 5us, but be generous and sleep for 1ms */
1627 msleep(1);
1628
1629 /* issue the second D2H Register FIS */
1630 tf.ctl &= ~ATA_SRST;
1631 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1632
1633 /* wait for link to become ready */
1634 rc = ata_wait_after_reset(link, deadline, check_ready);
1635 /* link occupied, -ENODEV too is an error */
1636 if (rc) {
1637 reason = "device not ready";
1638 goto fail;
1639 }
1640 *class = ahci_dev_classify(ap);
1641
1642 DPRINTK("EXIT, class=%u\n", *class);
1643 return 0;
1644
1645 fail:
1646 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1647 return rc;
1648 }
1649
1650 static int ahci_check_ready(struct ata_link *link)
1651 {
1652 void __iomem *port_mmio = ahci_port_base(link->ap);
1653 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1654
1655 return ata_check_ready(status);
1656 }
1657
1658 static int ahci_softreset(struct ata_link *link, unsigned int *class,
1659 unsigned long deadline)
1660 {
1661 int pmp = sata_srst_pmp(link);
1662
1663 DPRINTK("ENTER\n");
1664
1665 return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1666 }
1667
1668 static int ahci_sb600_check_ready(struct ata_link *link)
1669 {
1670 void __iomem *port_mmio = ahci_port_base(link->ap);
1671 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1672 u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
1673
1674 /*
1675 * There is no need to check TFDATA if BAD PMP is found due to HW bug,
1676 * which can save timeout delay.
1677 */
1678 if (irq_status & PORT_IRQ_BAD_PMP)
1679 return -EIO;
1680
1681 return ata_check_ready(status);
1682 }
1683
1684 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
1685 unsigned long deadline)
1686 {
1687 struct ata_port *ap = link->ap;
1688 void __iomem *port_mmio = ahci_port_base(ap);
1689 int pmp = sata_srst_pmp(link);
1690 int rc;
1691 u32 irq_sts;
1692
1693 DPRINTK("ENTER\n");
1694
1695 rc = ahci_do_softreset(link, class, pmp, deadline,
1696 ahci_sb600_check_ready);
1697
1698 /*
1699 * Soft reset fails on some ATI chips with IPMS set when PMP
1700 * is enabled but SATA HDD/ODD is connected to SATA port,
1701 * do soft reset again to port 0.
1702 */
1703 if (rc == -EIO) {
1704 irq_sts = readl(port_mmio + PORT_IRQ_STAT);
1705 if (irq_sts & PORT_IRQ_BAD_PMP) {
1706 ata_link_printk(link, KERN_WARNING,
1707 "failed due to HW bug, retry pmp=0\n");
1708 rc = ahci_do_softreset(link, class, 0, deadline,
1709 ahci_check_ready);
1710 }
1711 }
1712
1713 return rc;
1714 }
1715
1716 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1717 unsigned long deadline)
1718 {
1719 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1720 struct ata_port *ap = link->ap;
1721 struct ahci_port_priv *pp = ap->private_data;
1722 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1723 struct ata_taskfile tf;
1724 bool online;
1725 int rc;
1726
1727 DPRINTK("ENTER\n");
1728
1729 ahci_stop_engine(ap);
1730
1731 /* clear D2H reception area to properly wait for D2H FIS */
1732 ata_tf_init(link->device, &tf);
1733 tf.command = 0x80;
1734 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1735
1736 rc = sata_link_hardreset(link, timing, deadline, &online,
1737 ahci_check_ready);
1738
1739 ahci_start_engine(ap);
1740
1741 if (online)
1742 *class = ahci_dev_classify(ap);
1743
1744 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1745 return rc;
1746 }
1747
1748 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
1749 unsigned long deadline)
1750 {
1751 struct ata_port *ap = link->ap;
1752 bool online;
1753 int rc;
1754
1755 DPRINTK("ENTER\n");
1756
1757 ahci_stop_engine(ap);
1758
1759 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1760 deadline, &online, NULL);
1761
1762 ahci_start_engine(ap);
1763
1764 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1765
1766 /* vt8251 doesn't clear BSY on signature FIS reception,
1767 * request follow-up softreset.
1768 */
1769 return online ? -EAGAIN : rc;
1770 }
1771
1772 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
1773 unsigned long deadline)
1774 {
1775 struct ata_port *ap = link->ap;
1776 struct ahci_port_priv *pp = ap->private_data;
1777 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1778 struct ata_taskfile tf;
1779 bool online;
1780 int rc;
1781
1782 ahci_stop_engine(ap);
1783
1784 /* clear D2H reception area to properly wait for D2H FIS */
1785 ata_tf_init(link->device, &tf);
1786 tf.command = 0x80;
1787 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1788
1789 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1790 deadline, &online, NULL);
1791
1792 ahci_start_engine(ap);
1793
1794 /* The pseudo configuration device on SIMG4726 attached to
1795 * ASUS P5W-DH Deluxe doesn't send signature FIS after
1796 * hardreset if no device is attached to the first downstream
1797 * port && the pseudo device locks up on SRST w/ PMP==0. To
1798 * work around this, wait for !BSY only briefly. If BSY isn't
1799 * cleared, perform CLO and proceed to IDENTIFY (achieved by
1800 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
1801 *
1802 * Wait for two seconds. Devices attached to downstream port
1803 * which can't process the following IDENTIFY after this will
1804 * have to be reset again. For most cases, this should
1805 * suffice while making probing snappish enough.
1806 */
1807 if (online) {
1808 rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
1809 ahci_check_ready);
1810 if (rc)
1811 ahci_kick_engine(ap, 0);
1812 }
1813 return rc;
1814 }
1815
1816 static void ahci_postreset(struct ata_link *link, unsigned int *class)
1817 {
1818 struct ata_port *ap = link->ap;
1819 void __iomem *port_mmio = ahci_port_base(ap);
1820 u32 new_tmp, tmp;
1821
1822 ata_std_postreset(link, class);
1823
1824 /* Make sure port's ATAPI bit is set appropriately */
1825 new_tmp = tmp = readl(port_mmio + PORT_CMD);
1826 if (*class == ATA_DEV_ATAPI)
1827 new_tmp |= PORT_CMD_ATAPI;
1828 else
1829 new_tmp &= ~PORT_CMD_ATAPI;
1830 if (new_tmp != tmp) {
1831 writel(new_tmp, port_mmio + PORT_CMD);
1832 readl(port_mmio + PORT_CMD); /* flush */
1833 }
1834 }
1835
1836 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
1837 {
1838 struct scatterlist *sg;
1839 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
1840 unsigned int si;
1841
1842 VPRINTK("ENTER\n");
1843
1844 /*
1845 * Next, the S/G list.
1846 */
1847 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1848 dma_addr_t addr = sg_dma_address(sg);
1849 u32 sg_len = sg_dma_len(sg);
1850
1851 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
1852 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
1853 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
1854 }
1855
1856 return si;
1857 }
1858
1859 static void ahci_qc_prep(struct ata_queued_cmd *qc)
1860 {
1861 struct ata_port *ap = qc->ap;
1862 struct ahci_port_priv *pp = ap->private_data;
1863 int is_atapi = ata_is_atapi(qc->tf.protocol);
1864 void *cmd_tbl;
1865 u32 opts;
1866 const u32 cmd_fis_len = 5; /* five dwords */
1867 unsigned int n_elem;
1868
1869 /*
1870 * Fill in command table information. First, the header,
1871 * a SATA Register - Host to Device command FIS.
1872 */
1873 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
1874
1875 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
1876 if (is_atapi) {
1877 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
1878 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
1879 }
1880
1881 n_elem = 0;
1882 if (qc->flags & ATA_QCFLAG_DMAMAP)
1883 n_elem = ahci_fill_sg(qc, cmd_tbl);
1884
1885 /*
1886 * Fill in command slot information.
1887 */
1888 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
1889 if (qc->tf.flags & ATA_TFLAG_WRITE)
1890 opts |= AHCI_CMD_WRITE;
1891 if (is_atapi)
1892 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
1893
1894 ahci_fill_cmd_slot(pp, qc->tag, opts);
1895 }
1896
1897 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
1898 {
1899 struct ahci_host_priv *hpriv = ap->host->private_data;
1900 struct ahci_port_priv *pp = ap->private_data;
1901 struct ata_eh_info *host_ehi = &ap->link.eh_info;
1902 struct ata_link *link = NULL;
1903 struct ata_queued_cmd *active_qc;
1904 struct ata_eh_info *active_ehi;
1905 u32 serror;
1906
1907 /* determine active link */
1908 ata_port_for_each_link(link, ap)
1909 if (ata_link_active(link))
1910 break;
1911 if (!link)
1912 link = &ap->link;
1913
1914 active_qc = ata_qc_from_tag(ap, link->active_tag);
1915 active_ehi = &link->eh_info;
1916
1917 /* record irq stat */
1918 ata_ehi_clear_desc(host_ehi);
1919 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
1920
1921 /* AHCI needs SError cleared; otherwise, it might lock up */
1922 ahci_scr_read(ap, SCR_ERROR, &serror);
1923 ahci_scr_write(ap, SCR_ERROR, serror);
1924 host_ehi->serror |= serror;
1925
1926 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
1927 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
1928 irq_stat &= ~PORT_IRQ_IF_ERR;
1929
1930 if (irq_stat & PORT_IRQ_TF_ERR) {
1931 /* If qc is active, charge it; otherwise, the active
1932 * link. There's no active qc on NCQ errors. It will
1933 * be determined by EH by reading log page 10h.
1934 */
1935 if (active_qc)
1936 active_qc->err_mask |= AC_ERR_DEV;
1937 else
1938 active_ehi->err_mask |= AC_ERR_DEV;
1939
1940 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
1941 host_ehi->serror &= ~SERR_INTERNAL;
1942 }
1943
1944 if (irq_stat & PORT_IRQ_UNK_FIS) {
1945 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
1946
1947 active_ehi->err_mask |= AC_ERR_HSM;
1948 active_ehi->action |= ATA_EH_RESET;
1949 ata_ehi_push_desc(active_ehi,
1950 "unknown FIS %08x %08x %08x %08x" ,
1951 unk[0], unk[1], unk[2], unk[3]);
1952 }
1953
1954 if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
1955 active_ehi->err_mask |= AC_ERR_HSM;
1956 active_ehi->action |= ATA_EH_RESET;
1957 ata_ehi_push_desc(active_ehi, "incorrect PMP");
1958 }
1959
1960 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
1961 host_ehi->err_mask |= AC_ERR_HOST_BUS;
1962 host_ehi->action |= ATA_EH_RESET;
1963 ata_ehi_push_desc(host_ehi, "host bus error");
1964 }
1965
1966 if (irq_stat & PORT_IRQ_IF_ERR) {
1967 host_ehi->err_mask |= AC_ERR_ATA_BUS;
1968 host_ehi->action |= ATA_EH_RESET;
1969 ata_ehi_push_desc(host_ehi, "interface fatal error");
1970 }
1971
1972 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
1973 ata_ehi_hotplugged(host_ehi);
1974 ata_ehi_push_desc(host_ehi, "%s",
1975 irq_stat & PORT_IRQ_CONNECT ?
1976 "connection status changed" : "PHY RDY changed");
1977 }
1978
1979 /* okay, let's hand over to EH */
1980
1981 if (irq_stat & PORT_IRQ_FREEZE)
1982 ata_port_freeze(ap);
1983 else
1984 ata_port_abort(ap);
1985 }
1986
1987 static void ahci_port_intr(struct ata_port *ap)
1988 {
1989 void __iomem *port_mmio = ahci_port_base(ap);
1990 struct ata_eh_info *ehi = &ap->link.eh_info;
1991 struct ahci_port_priv *pp = ap->private_data;
1992 struct ahci_host_priv *hpriv = ap->host->private_data;
1993 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
1994 u32 status, qc_active;
1995 int rc;
1996
1997 status = readl(port_mmio + PORT_IRQ_STAT);
1998 writel(status, port_mmio + PORT_IRQ_STAT);
1999
2000 /* ignore BAD_PMP while resetting */
2001 if (unlikely(resetting))
2002 status &= ~PORT_IRQ_BAD_PMP;
2003
2004 /* If we are getting PhyRdy, this is
2005 * just a power state change, we should
2006 * clear out this, plus the PhyRdy/Comm
2007 * Wake bits from Serror
2008 */
2009 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
2010 (status & PORT_IRQ_PHYRDY)) {
2011 status &= ~PORT_IRQ_PHYRDY;
2012 ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18)));
2013 }
2014
2015 if (unlikely(status & PORT_IRQ_ERROR)) {
2016 ahci_error_intr(ap, status);
2017 return;
2018 }
2019
2020 if (status & PORT_IRQ_SDB_FIS) {
2021 /* If SNotification is available, leave notification
2022 * handling to sata_async_notification(). If not,
2023 * emulate it by snooping SDB FIS RX area.
2024 *
2025 * Snooping FIS RX area is probably cheaper than
2026 * poking SNotification but some constrollers which
2027 * implement SNotification, ICH9 for example, don't
2028 * store AN SDB FIS into receive area.
2029 */
2030 if (hpriv->cap & HOST_CAP_SNTF)
2031 sata_async_notification(ap);
2032 else {
2033 /* If the 'N' bit in word 0 of the FIS is set,
2034 * we just received asynchronous notification.
2035 * Tell libata about it.
2036 */
2037 const __le32 *f = pp->rx_fis + RX_FIS_SDB;
2038 u32 f0 = le32_to_cpu(f[0]);
2039
2040 if (f0 & (1 << 15))
2041 sata_async_notification(ap);
2042 }
2043 }
2044
2045 /* pp->active_link is valid iff any command is in flight */
2046 if (ap->qc_active && pp->active_link->sactive)
2047 qc_active = readl(port_mmio + PORT_SCR_ACT);
2048 else
2049 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
2050
2051 rc = ata_qc_complete_multiple(ap, qc_active);
2052
2053 /* while resetting, invalid completions are expected */
2054 if (unlikely(rc < 0 && !resetting)) {
2055 ehi->err_mask |= AC_ERR_HSM;
2056 ehi->action |= ATA_EH_RESET;
2057 ata_port_freeze(ap);
2058 }
2059 }
2060
2061 static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
2062 {
2063 struct ata_host *host = dev_instance;
2064 struct ahci_host_priv *hpriv;
2065 unsigned int i, handled = 0;
2066 void __iomem *mmio;
2067 u32 irq_stat, irq_masked;
2068
2069 VPRINTK("ENTER\n");
2070
2071 hpriv = host->private_data;
2072 mmio = host->iomap[AHCI_PCI_BAR];
2073
2074 /* sigh. 0xffffffff is a valid return from h/w */
2075 irq_stat = readl(mmio + HOST_IRQ_STAT);
2076 if (!irq_stat)
2077 return IRQ_NONE;
2078
2079 irq_masked = irq_stat & hpriv->port_map;
2080
2081 spin_lock(&host->lock);
2082
2083 for (i = 0; i < host->n_ports; i++) {
2084 struct ata_port *ap;
2085
2086 if (!(irq_masked & (1 << i)))
2087 continue;
2088
2089 ap = host->ports[i];
2090 if (ap) {
2091 ahci_port_intr(ap);
2092 VPRINTK("port %u\n", i);
2093 } else {
2094 VPRINTK("port %u (no irq)\n", i);
2095 if (ata_ratelimit())
2096 dev_printk(KERN_WARNING, host->dev,
2097 "interrupt on disabled port %u\n", i);
2098 }
2099
2100 handled = 1;
2101 }
2102
2103 /* HOST_IRQ_STAT behaves as level triggered latch meaning that
2104 * it should be cleared after all the port events are cleared;
2105 * otherwise, it will raise a spurious interrupt after each
2106 * valid one. Please read section 10.6.2 of ahci 1.1 for more
2107 * information.
2108 *
2109 * Also, use the unmasked value to clear interrupt as spurious
2110 * pending event on a dummy port might cause screaming IRQ.
2111 */
2112 writel(irq_stat, mmio + HOST_IRQ_STAT);
2113
2114 spin_unlock(&host->lock);
2115
2116 VPRINTK("EXIT\n");
2117
2118 return IRQ_RETVAL(handled);
2119 }
2120
2121 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
2122 {
2123 struct ata_port *ap = qc->ap;
2124 void __iomem *port_mmio = ahci_port_base(ap);
2125 struct ahci_port_priv *pp = ap->private_data;
2126
2127 /* Keep track of the currently active link. It will be used
2128 * in completion path to determine whether NCQ phase is in
2129 * progress.
2130 */
2131 pp->active_link = qc->dev->link;
2132
2133 if (qc->tf.protocol == ATA_PROT_NCQ)
2134 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
2135 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
2136 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
2137
2138 ahci_sw_activity(qc->dev->link);
2139
2140 return 0;
2141 }
2142
2143 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
2144 {
2145 struct ahci_port_priv *pp = qc->ap->private_data;
2146 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
2147
2148 ata_tf_from_fis(d2h_fis, &qc->result_tf);
2149 return true;
2150 }
2151
2152 static void ahci_freeze(struct ata_port *ap)
2153 {
2154 void __iomem *port_mmio = ahci_port_base(ap);
2155
2156 /* turn IRQ off */
2157 writel(0, port_mmio + PORT_IRQ_MASK);
2158 }
2159
2160 static void ahci_thaw(struct ata_port *ap)
2161 {
2162 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
2163 void __iomem *port_mmio = ahci_port_base(ap);
2164 u32 tmp;
2165 struct ahci_port_priv *pp = ap->private_data;
2166
2167 /* clear IRQ */
2168 tmp = readl(port_mmio + PORT_IRQ_STAT);
2169 writel(tmp, port_mmio + PORT_IRQ_STAT);
2170 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
2171
2172 /* turn IRQ back on */
2173 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2174 }
2175
2176 static void ahci_error_handler(struct ata_port *ap)
2177 {
2178 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
2179 /* restart engine */
2180 ahci_stop_engine(ap);
2181 ahci_start_engine(ap);
2182 }
2183
2184 sata_pmp_error_handler(ap);
2185 }
2186
2187 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
2188 {
2189 struct ata_port *ap = qc->ap;
2190
2191 /* make DMA engine forget about the failed command */
2192 if (qc->flags & ATA_QCFLAG_FAILED)
2193 ahci_kick_engine(ap, 1);
2194 }
2195
2196 static void ahci_pmp_attach(struct ata_port *ap)
2197 {
2198 void __iomem *port_mmio = ahci_port_base(ap);
2199 struct ahci_port_priv *pp = ap->private_data;
2200 u32 cmd;
2201
2202 cmd = readl(port_mmio + PORT_CMD);
2203 cmd |= PORT_CMD_PMP;
2204 writel(cmd, port_mmio + PORT_CMD);
2205
2206 pp->intr_mask |= PORT_IRQ_BAD_PMP;
2207 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2208 }
2209
2210 static void ahci_pmp_detach(struct ata_port *ap)
2211 {
2212 void __iomem *port_mmio = ahci_port_base(ap);
2213 struct ahci_port_priv *pp = ap->private_data;
2214 u32 cmd;
2215
2216 cmd = readl(port_mmio + PORT_CMD);
2217 cmd &= ~PORT_CMD_PMP;
2218 writel(cmd, port_mmio + PORT_CMD);
2219
2220 pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
2221 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2222 }
2223
2224 static int ahci_port_resume(struct ata_port *ap)
2225 {
2226 ahci_power_up(ap);
2227 ahci_start_port(ap);
2228
2229 if (sata_pmp_attached(ap))
2230 ahci_pmp_attach(ap);
2231 else
2232 ahci_pmp_detach(ap);
2233
2234 return 0;
2235 }
2236
2237 #ifdef CONFIG_PM
2238 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2239 {
2240 const char *emsg = NULL;
2241 int rc;
2242
2243 rc = ahci_deinit_port(ap, &emsg);
2244 if (rc == 0)
2245 ahci_power_down(ap);
2246 else {
2247 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
2248 ahci_start_port(ap);
2249 }
2250
2251 return rc;
2252 }
2253
2254 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
2255 {
2256 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2257 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2258 u32 ctl;
2259
2260 if (mesg.event & PM_EVENT_SLEEP) {
2261 /* AHCI spec rev1.1 section 8.3.3:
2262 * Software must disable interrupts prior to requesting a
2263 * transition of the HBA to D3 state.
2264 */
2265 ctl = readl(mmio + HOST_CTL);
2266 ctl &= ~HOST_IRQ_EN;
2267 writel(ctl, mmio + HOST_CTL);
2268 readl(mmio + HOST_CTL); /* flush */
2269 }
2270
2271 return ata_pci_device_suspend(pdev, mesg);
2272 }
2273
2274 static int ahci_pci_device_resume(struct pci_dev *pdev)
2275 {
2276 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2277 int rc;
2278
2279 rc = ata_pci_device_do_resume(pdev);
2280 if (rc)
2281 return rc;
2282
2283 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2284 rc = ahci_reset_controller(host);
2285 if (rc)
2286 return rc;
2287
2288 ahci_init_controller(host);
2289 }
2290
2291 ata_host_resume(host);
2292
2293 return 0;
2294 }
2295 #endif
2296
2297 static int ahci_port_start(struct ata_port *ap)
2298 {
2299 struct device *dev = ap->host->dev;
2300 struct ahci_port_priv *pp;
2301 void *mem;
2302 dma_addr_t mem_dma;
2303
2304 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2305 if (!pp)
2306 return -ENOMEM;
2307
2308 mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
2309 GFP_KERNEL);
2310 if (!mem)
2311 return -ENOMEM;
2312 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
2313
2314 /*
2315 * First item in chunk of DMA memory: 32-slot command table,
2316 * 32 bytes each in size
2317 */
2318 pp->cmd_slot = mem;
2319 pp->cmd_slot_dma = mem_dma;
2320
2321 mem += AHCI_CMD_SLOT_SZ;
2322 mem_dma += AHCI_CMD_SLOT_SZ;
2323
2324 /*
2325 * Second item: Received-FIS area
2326 */
2327 pp->rx_fis = mem;
2328 pp->rx_fis_dma = mem_dma;
2329
2330 mem += AHCI_RX_FIS_SZ;
2331 mem_dma += AHCI_RX_FIS_SZ;
2332
2333 /*
2334 * Third item: data area for storing a single command
2335 * and its scatter-gather table
2336 */
2337 pp->cmd_tbl = mem;
2338 pp->cmd_tbl_dma = mem_dma;
2339
2340 /*
2341 * Save off initial list of interrupts to be enabled.
2342 * This could be changed later
2343 */
2344 pp->intr_mask = DEF_PORT_IRQ;
2345
2346 ap->private_data = pp;
2347
2348 /* engage engines, captain */
2349 return ahci_port_resume(ap);
2350 }
2351
2352 static void ahci_port_stop(struct ata_port *ap)
2353 {
2354 const char *emsg = NULL;
2355 int rc;
2356
2357 /* de-initialize port */
2358 rc = ahci_deinit_port(ap, &emsg);
2359 if (rc)
2360 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
2361 }
2362
2363 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
2364 {
2365 int rc;
2366
2367 if (using_dac &&
2368 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2369 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2370 if (rc) {
2371 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2372 if (rc) {
2373 dev_printk(KERN_ERR, &pdev->dev,
2374 "64-bit DMA enable failed\n");
2375 return rc;
2376 }
2377 }
2378 } else {
2379 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2380 if (rc) {
2381 dev_printk(KERN_ERR, &pdev->dev,
2382 "32-bit DMA enable failed\n");
2383 return rc;
2384 }
2385 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2386 if (rc) {
2387 dev_printk(KERN_ERR, &pdev->dev,
2388 "32-bit consistent DMA enable failed\n");
2389 return rc;
2390 }
2391 }
2392 return 0;
2393 }
2394
2395 static void ahci_print_info(struct ata_host *host)
2396 {
2397 struct ahci_host_priv *hpriv = host->private_data;
2398 struct pci_dev *pdev = to_pci_dev(host->dev);
2399 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2400 u32 vers, cap, impl, speed;
2401 const char *speed_s;
2402 u16 cc;
2403 const char *scc_s;
2404
2405 vers = readl(mmio + HOST_VERSION);
2406 cap = hpriv->cap;
2407 impl = hpriv->port_map;
2408
2409 speed = (cap >> 20) & 0xf;
2410 if (speed == 1)
2411 speed_s = "1.5";
2412 else if (speed == 2)
2413 speed_s = "3";
2414 else
2415 speed_s = "?";
2416
2417 pci_read_config_word(pdev, 0x0a, &cc);
2418 if (cc == PCI_CLASS_STORAGE_IDE)
2419 scc_s = "IDE";
2420 else if (cc == PCI_CLASS_STORAGE_SATA)
2421 scc_s = "SATA";
2422 else if (cc == PCI_CLASS_STORAGE_RAID)
2423 scc_s = "RAID";
2424 else
2425 scc_s = "unknown";
2426
2427 dev_printk(KERN_INFO, &pdev->dev,
2428 "AHCI %02x%02x.%02x%02x "
2429 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2430 ,
2431
2432 (vers >> 24) & 0xff,
2433 (vers >> 16) & 0xff,
2434 (vers >> 8) & 0xff,
2435 vers & 0xff,
2436
2437 ((cap >> 8) & 0x1f) + 1,
2438 (cap & 0x1f) + 1,
2439 speed_s,
2440 impl,
2441 scc_s);
2442
2443 dev_printk(KERN_INFO, &pdev->dev,
2444 "flags: "
2445 "%s%s%s%s%s%s%s"
2446 "%s%s%s%s%s%s%s"
2447 "%s\n"
2448 ,
2449
2450 cap & (1 << 31) ? "64bit " : "",
2451 cap & (1 << 30) ? "ncq " : "",
2452 cap & (1 << 29) ? "sntf " : "",
2453 cap & (1 << 28) ? "ilck " : "",
2454 cap & (1 << 27) ? "stag " : "",
2455 cap & (1 << 26) ? "pm " : "",
2456 cap & (1 << 25) ? "led " : "",
2457
2458 cap & (1 << 24) ? "clo " : "",
2459 cap & (1 << 19) ? "nz " : "",
2460 cap & (1 << 18) ? "only " : "",
2461 cap & (1 << 17) ? "pmp " : "",
2462 cap & (1 << 15) ? "pio " : "",
2463 cap & (1 << 14) ? "slum " : "",
2464 cap & (1 << 13) ? "part " : "",
2465 cap & (1 << 6) ? "ems ": ""
2466 );
2467 }
2468
2469 /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
2470 * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't
2471 * support PMP and the 4726 either directly exports the device
2472 * attached to the first downstream port or acts as a hardware storage
2473 * controller and emulate a single ATA device (can be RAID 0/1 or some
2474 * other configuration).
2475 *
2476 * When there's no device attached to the first downstream port of the
2477 * 4726, "Config Disk" appears, which is a pseudo ATA device to
2478 * configure the 4726. However, ATA emulation of the device is very
2479 * lame. It doesn't send signature D2H Reg FIS after the initial
2480 * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
2481 *
2482 * The following function works around the problem by always using
2483 * hardreset on the port and not depending on receiving signature FIS
2484 * afterward. If signature FIS isn't received soon, ATA class is
2485 * assumed without follow-up softreset.
2486 */
2487 static void ahci_p5wdh_workaround(struct ata_host *host)
2488 {
2489 static struct dmi_system_id sysids[] = {
2490 {
2491 .ident = "P5W DH Deluxe",
2492 .matches = {
2493 DMI_MATCH(DMI_SYS_VENDOR,
2494 "ASUSTEK COMPUTER INC"),
2495 DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
2496 },
2497 },
2498 { }
2499 };
2500 struct pci_dev *pdev = to_pci_dev(host->dev);
2501
2502 if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
2503 dmi_check_system(sysids)) {
2504 struct ata_port *ap = host->ports[1];
2505
2506 dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
2507 "Deluxe on-board SIMG4726 workaround\n");
2508
2509 ap->ops = &ahci_p5wdh_ops;
2510 ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
2511 }
2512 }
2513
2514 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2515 {
2516 static int printed_version;
2517 unsigned int board_id = ent->driver_data;
2518 struct ata_port_info pi = ahci_port_info[board_id];
2519 const struct ata_port_info *ppi[] = { &pi, NULL };
2520 struct device *dev = &pdev->dev;
2521 struct ahci_host_priv *hpriv;
2522 struct ata_host *host;
2523 int n_ports, i, rc;
2524
2525 VPRINTK("ENTER\n");
2526
2527 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
2528
2529 if (!printed_version++)
2530 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2531
2532 /* acquire resources */
2533 rc = pcim_enable_device(pdev);
2534 if (rc)
2535 return rc;
2536
2537 /* AHCI controllers often implement SFF compatible interface.
2538 * Grab all PCI BARs just in case.
2539 */
2540 rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
2541 if (rc == -EBUSY)
2542 pcim_pin_device(pdev);
2543 if (rc)
2544 return rc;
2545
2546 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
2547 (pdev->device == 0x2652 || pdev->device == 0x2653)) {
2548 u8 map;
2549
2550 /* ICH6s share the same PCI ID for both piix and ahci
2551 * modes. Enabling ahci mode while MAP indicates
2552 * combined mode is a bad idea. Yield to ata_piix.
2553 */
2554 pci_read_config_byte(pdev, ICH_MAP, &map);
2555 if (map & 0x3) {
2556 dev_printk(KERN_INFO, &pdev->dev, "controller is in "
2557 "combined mode, can't enable AHCI mode\n");
2558 return -ENODEV;
2559 }
2560 }
2561
2562 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
2563 if (!hpriv)
2564 return -ENOMEM;
2565 hpriv->flags |= (unsigned long)pi.private_data;
2566
2567 /* MCP65 revision A1 and A2 can't do MSI */
2568 if (board_id == board_ahci_mcp65 &&
2569 (pdev->revision == 0xa1 || pdev->revision == 0xa2))
2570 hpriv->flags |= AHCI_HFLAG_NO_MSI;
2571
2572 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
2573 pci_intx(pdev, 1);
2574
2575 /* save initial config */
2576 ahci_save_initial_config(pdev, hpriv);
2577
2578 /* prepare host */
2579 if (hpriv->cap & HOST_CAP_NCQ)
2580 pi.flags |= ATA_FLAG_NCQ;
2581
2582 if (hpriv->cap & HOST_CAP_PMP)
2583 pi.flags |= ATA_FLAG_PMP;
2584
2585 if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
2586 u8 messages;
2587 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
2588 u32 em_loc = readl(mmio + HOST_EM_LOC);
2589 u32 em_ctl = readl(mmio + HOST_EM_CTL);
2590
2591 messages = (em_ctl & 0x000f0000) >> 16;
2592
2593 /* we only support LED message type right now */
2594 if ((messages & 0x01) && (ahci_em_messages == 1)) {
2595 /* store em_loc */
2596 hpriv->em_loc = ((em_loc >> 16) * 4);
2597 pi.flags |= ATA_FLAG_EM;
2598 if (!(em_ctl & EM_CTL_ALHD))
2599 pi.flags |= ATA_FLAG_SW_ACTIVITY;
2600 }
2601 }
2602
2603 /* CAP.NP sometimes indicate the index of the last enabled
2604 * port, at other times, that of the last possible port, so
2605 * determining the maximum port number requires looking at
2606 * both CAP.NP and port_map.
2607 */
2608 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
2609
2610 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2611 if (!host)
2612 return -ENOMEM;
2613 host->iomap = pcim_iomap_table(pdev);
2614 host->private_data = hpriv;
2615
2616 if (pi.flags & ATA_FLAG_EM)
2617 ahci_reset_em(host);
2618
2619 for (i = 0; i < host->n_ports; i++) {
2620 struct ata_port *ap = host->ports[i];
2621
2622 ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
2623 ata_port_pbar_desc(ap, AHCI_PCI_BAR,
2624 0x100 + ap->port_no * 0x80, "port");
2625
2626 /* set initial link pm policy */
2627 ap->pm_policy = NOT_AVAILABLE;
2628
2629 /* set enclosure management message type */
2630 if (ap->flags & ATA_FLAG_EM)
2631 ap->em_message_type = ahci_em_messages;
2632
2633
2634 /* disabled/not-implemented port */
2635 if (!(hpriv->port_map & (1 << i)))
2636 ap->ops = &ata_dummy_port_ops;
2637 }
2638
2639 /* apply workaround for ASUS P5W DH Deluxe mainboard */
2640 ahci_p5wdh_workaround(host);
2641
2642 /* initialize adapter */
2643 rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
2644 if (rc)
2645 return rc;
2646
2647 rc = ahci_reset_controller(host);
2648 if (rc)
2649 return rc;
2650
2651 ahci_init_controller(host);
2652 ahci_print_info(host);
2653
2654 pci_set_master(pdev);
2655 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
2656 &ahci_sht);
2657 }
2658
2659 static int __init ahci_init(void)
2660 {
2661 return pci_register_driver(&ahci_pci_driver);
2662 }
2663
2664 static void __exit ahci_exit(void)
2665 {
2666 pci_unregister_driver(&ahci_pci_driver);
2667 }
2668
2669
2670 MODULE_AUTHOR("Jeff Garzik");
2671 MODULE_DESCRIPTION("AHCI SATA low-level driver");
2672 MODULE_LICENSE("GPL");
2673 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
2674 MODULE_VERSION(DRV_VERSION);
2675
2676 module_init(ahci_init);
2677 module_exit(ahci_exit);
This page took 0.0926 seconds and 5 git commands to generate.