Merge branch 'ioat' into fixes
[deliverable/linux.git] / drivers / ata / ahci.c
1 /*
2 * ahci.c - AHCI SATA support
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004-2005 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32 *
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/device.h>
44 #include <linux/dmi.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_cmnd.h>
47 #include <linux/libata.h>
48
49 #define DRV_NAME "ahci"
50 #define DRV_VERSION "3.0"
51
52 /* Enclosure Management Control */
53 #define EM_CTRL_MSG_TYPE 0x000f0000
54
55 /* Enclosure Management LED Message Type */
56 #define EM_MSG_LED_HBA_PORT 0x0000000f
57 #define EM_MSG_LED_PMP_SLOT 0x0000ff00
58 #define EM_MSG_LED_VALUE 0xffff0000
59 #define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
60 #define EM_MSG_LED_VALUE_OFF 0xfff80000
61 #define EM_MSG_LED_VALUE_ON 0x00010000
62
63 static int ahci_skip_host_reset;
64 static int ahci_ignore_sss;
65
66 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
67 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
68
69 module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
70 MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
71
72 static int ahci_enable_alpm(struct ata_port *ap,
73 enum link_pm policy);
74 static void ahci_disable_alpm(struct ata_port *ap);
75 static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
76 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
77 size_t size);
78 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
79 ssize_t size);
80
81 enum {
82 AHCI_PCI_BAR = 5,
83 AHCI_MAX_PORTS = 32,
84 AHCI_MAX_SG = 168, /* hardware max is 64K */
85 AHCI_DMA_BOUNDARY = 0xffffffff,
86 AHCI_MAX_CMDS = 32,
87 AHCI_CMD_SZ = 32,
88 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
89 AHCI_RX_FIS_SZ = 256,
90 AHCI_CMD_TBL_CDB = 0x40,
91 AHCI_CMD_TBL_HDR_SZ = 0x80,
92 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
93 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
94 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
95 AHCI_RX_FIS_SZ,
96 AHCI_IRQ_ON_SG = (1 << 31),
97 AHCI_CMD_ATAPI = (1 << 5),
98 AHCI_CMD_WRITE = (1 << 6),
99 AHCI_CMD_PREFETCH = (1 << 7),
100 AHCI_CMD_RESET = (1 << 8),
101 AHCI_CMD_CLR_BUSY = (1 << 10),
102
103 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
104 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
105 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
106
107 board_ahci = 0,
108 board_ahci_vt8251 = 1,
109 board_ahci_ign_iferr = 2,
110 board_ahci_sb600 = 3,
111 board_ahci_mv = 4,
112 board_ahci_sb700 = 5, /* for SB700 and SB800 */
113 board_ahci_mcp65 = 6,
114 board_ahci_nopmp = 7,
115 board_ahci_yesncq = 8,
116
117 /* global controller registers */
118 HOST_CAP = 0x00, /* host capabilities */
119 HOST_CTL = 0x04, /* global host control */
120 HOST_IRQ_STAT = 0x08, /* interrupt status */
121 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
122 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
123 HOST_EM_LOC = 0x1c, /* Enclosure Management location */
124 HOST_EM_CTL = 0x20, /* Enclosure Management Control */
125 HOST_CAP2 = 0x24, /* host capabilities, extended */
126
127 /* HOST_CTL bits */
128 HOST_RESET = (1 << 0), /* reset controller; self-clear */
129 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
130 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
131
132 /* HOST_CAP bits */
133 HOST_CAP_SXS = (1 << 5), /* Supports External SATA */
134 HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
135 HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */
136 HOST_CAP_PART = (1 << 13), /* Partial state capable */
137 HOST_CAP_SSC = (1 << 14), /* Slumber state capable */
138 HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */
139 HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */
140 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
141 HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */
142 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
143 HOST_CAP_LED = (1 << 25), /* Supports activity LED */
144 HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
145 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
146 HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */
147 HOST_CAP_SNTF = (1 << 29), /* SNotification register */
148 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
149 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
150
151 /* HOST_CAP2 bits */
152 HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */
153 HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */
154 HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */
155
156 /* registers for each SATA port */
157 PORT_LST_ADDR = 0x00, /* command list DMA addr */
158 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
159 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
160 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
161 PORT_IRQ_STAT = 0x10, /* interrupt status */
162 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
163 PORT_CMD = 0x18, /* port command */
164 PORT_TFDATA = 0x20, /* taskfile data */
165 PORT_SIG = 0x24, /* device TF signature */
166 PORT_CMD_ISSUE = 0x38, /* command issue */
167 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
168 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
169 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
170 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
171 PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
172
173 /* PORT_IRQ_{STAT,MASK} bits */
174 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
175 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
176 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
177 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
178 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
179 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
180 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
181 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
182
183 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
184 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
185 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
186 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
187 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
188 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
189 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
190 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
191 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
192
193 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
194 PORT_IRQ_IF_ERR |
195 PORT_IRQ_CONNECT |
196 PORT_IRQ_PHYRDY |
197 PORT_IRQ_UNK_FIS |
198 PORT_IRQ_BAD_PMP,
199 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
200 PORT_IRQ_TF_ERR |
201 PORT_IRQ_HBUS_DATA_ERR,
202 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
203 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
204 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
205
206 /* PORT_CMD bits */
207 PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
208 PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
209 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
210 PORT_CMD_PMP = (1 << 17), /* PMP attached */
211 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
212 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
213 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
214 PORT_CMD_CLO = (1 << 3), /* Command list override */
215 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
216 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
217 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
218
219 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
220 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
221 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
222 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
223
224 /* hpriv->flags bits */
225 AHCI_HFLAG_NO_NCQ = (1 << 0),
226 AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
227 AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
228 AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
229 AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
230 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
231 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
232 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
233 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
234 AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
235 AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
236 AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
237 link offline */
238
239 /* ap->flags bits */
240
241 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
242 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
243 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
244 ATA_FLAG_IPM,
245
246 ICH_MAP = 0x90, /* ICH MAP register */
247
248 /* em constants */
249 EM_MAX_SLOTS = 8,
250 EM_MAX_RETRY = 5,
251
252 /* em_ctl bits */
253 EM_CTL_RST = (1 << 9), /* Reset */
254 EM_CTL_TM = (1 << 8), /* Transmit Message */
255 EM_CTL_ALHD = (1 << 26), /* Activity LED */
256 };
257
258 struct ahci_cmd_hdr {
259 __le32 opts;
260 __le32 status;
261 __le32 tbl_addr;
262 __le32 tbl_addr_hi;
263 __le32 reserved[4];
264 };
265
266 struct ahci_sg {
267 __le32 addr;
268 __le32 addr_hi;
269 __le32 reserved;
270 __le32 flags_size;
271 };
272
273 struct ahci_em_priv {
274 enum sw_activity blink_policy;
275 struct timer_list timer;
276 unsigned long saved_activity;
277 unsigned long activity;
278 unsigned long led_state;
279 };
280
281 struct ahci_host_priv {
282 unsigned int flags; /* AHCI_HFLAG_* */
283 u32 cap; /* cap to use */
284 u32 cap2; /* cap2 to use */
285 u32 port_map; /* port map to use */
286 u32 saved_cap; /* saved initial cap */
287 u32 saved_cap2; /* saved initial cap2 */
288 u32 saved_port_map; /* saved initial port_map */
289 u32 em_loc; /* enclosure management location */
290 };
291
292 struct ahci_port_priv {
293 struct ata_link *active_link;
294 struct ahci_cmd_hdr *cmd_slot;
295 dma_addr_t cmd_slot_dma;
296 void *cmd_tbl;
297 dma_addr_t cmd_tbl_dma;
298 void *rx_fis;
299 dma_addr_t rx_fis_dma;
300 /* for NCQ spurious interrupt analysis */
301 unsigned int ncq_saw_d2h:1;
302 unsigned int ncq_saw_dmas:1;
303 unsigned int ncq_saw_sdb:1;
304 u32 intr_mask; /* interrupts to enable */
305 /* enclosure management info per PM slot */
306 struct ahci_em_priv em_priv[EM_MAX_SLOTS];
307 };
308
309 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
310 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
311 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
312 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
313 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
314 static int ahci_port_start(struct ata_port *ap);
315 static void ahci_port_stop(struct ata_port *ap);
316 static void ahci_qc_prep(struct ata_queued_cmd *qc);
317 static void ahci_freeze(struct ata_port *ap);
318 static void ahci_thaw(struct ata_port *ap);
319 static void ahci_pmp_attach(struct ata_port *ap);
320 static void ahci_pmp_detach(struct ata_port *ap);
321 static int ahci_softreset(struct ata_link *link, unsigned int *class,
322 unsigned long deadline);
323 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
324 unsigned long deadline);
325 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
326 unsigned long deadline);
327 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
328 unsigned long deadline);
329 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
330 unsigned long deadline);
331 static void ahci_postreset(struct ata_link *link, unsigned int *class);
332 static void ahci_error_handler(struct ata_port *ap);
333 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
334 static int ahci_port_resume(struct ata_port *ap);
335 static void ahci_dev_config(struct ata_device *dev);
336 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
337 u32 opts);
338 #ifdef CONFIG_PM
339 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
340 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
341 static int ahci_pci_device_resume(struct pci_dev *pdev);
342 #endif
343 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
344 static ssize_t ahci_activity_store(struct ata_device *dev,
345 enum sw_activity val);
346 static void ahci_init_sw_activity(struct ata_link *link);
347
348 static ssize_t ahci_show_host_caps(struct device *dev,
349 struct device_attribute *attr, char *buf);
350 static ssize_t ahci_show_host_cap2(struct device *dev,
351 struct device_attribute *attr, char *buf);
352 static ssize_t ahci_show_host_version(struct device *dev,
353 struct device_attribute *attr, char *buf);
354 static ssize_t ahci_show_port_cmd(struct device *dev,
355 struct device_attribute *attr, char *buf);
356
357 DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
358 DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
359 DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
360 DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
361
362 static struct device_attribute *ahci_shost_attrs[] = {
363 &dev_attr_link_power_management_policy,
364 &dev_attr_em_message_type,
365 &dev_attr_em_message,
366 &dev_attr_ahci_host_caps,
367 &dev_attr_ahci_host_cap2,
368 &dev_attr_ahci_host_version,
369 &dev_attr_ahci_port_cmd,
370 NULL
371 };
372
373 static struct device_attribute *ahci_sdev_attrs[] = {
374 &dev_attr_sw_activity,
375 &dev_attr_unload_heads,
376 NULL
377 };
378
379 static struct scsi_host_template ahci_sht = {
380 ATA_NCQ_SHT(DRV_NAME),
381 .can_queue = AHCI_MAX_CMDS - 1,
382 .sg_tablesize = AHCI_MAX_SG,
383 .dma_boundary = AHCI_DMA_BOUNDARY,
384 .shost_attrs = ahci_shost_attrs,
385 .sdev_attrs = ahci_sdev_attrs,
386 };
387
388 static struct ata_port_operations ahci_ops = {
389 .inherits = &sata_pmp_port_ops,
390
391 .qc_defer = sata_pmp_qc_defer_cmd_switch,
392 .qc_prep = ahci_qc_prep,
393 .qc_issue = ahci_qc_issue,
394 .qc_fill_rtf = ahci_qc_fill_rtf,
395
396 .freeze = ahci_freeze,
397 .thaw = ahci_thaw,
398 .softreset = ahci_softreset,
399 .hardreset = ahci_hardreset,
400 .postreset = ahci_postreset,
401 .pmp_softreset = ahci_softreset,
402 .error_handler = ahci_error_handler,
403 .post_internal_cmd = ahci_post_internal_cmd,
404 .dev_config = ahci_dev_config,
405
406 .scr_read = ahci_scr_read,
407 .scr_write = ahci_scr_write,
408 .pmp_attach = ahci_pmp_attach,
409 .pmp_detach = ahci_pmp_detach,
410
411 .enable_pm = ahci_enable_alpm,
412 .disable_pm = ahci_disable_alpm,
413 .em_show = ahci_led_show,
414 .em_store = ahci_led_store,
415 .sw_activity_show = ahci_activity_show,
416 .sw_activity_store = ahci_activity_store,
417 #ifdef CONFIG_PM
418 .port_suspend = ahci_port_suspend,
419 .port_resume = ahci_port_resume,
420 #endif
421 .port_start = ahci_port_start,
422 .port_stop = ahci_port_stop,
423 };
424
425 static struct ata_port_operations ahci_vt8251_ops = {
426 .inherits = &ahci_ops,
427 .hardreset = ahci_vt8251_hardreset,
428 };
429
430 static struct ata_port_operations ahci_p5wdh_ops = {
431 .inherits = &ahci_ops,
432 .hardreset = ahci_p5wdh_hardreset,
433 };
434
435 static struct ata_port_operations ahci_sb600_ops = {
436 .inherits = &ahci_ops,
437 .softreset = ahci_sb600_softreset,
438 .pmp_softreset = ahci_sb600_softreset,
439 };
440
441 #define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
442
443 static const struct ata_port_info ahci_port_info[] = {
444 [board_ahci] =
445 {
446 .flags = AHCI_FLAG_COMMON,
447 .pio_mask = ATA_PIO4,
448 .udma_mask = ATA_UDMA6,
449 .port_ops = &ahci_ops,
450 },
451 [board_ahci_vt8251] =
452 {
453 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
454 .flags = AHCI_FLAG_COMMON,
455 .pio_mask = ATA_PIO4,
456 .udma_mask = ATA_UDMA6,
457 .port_ops = &ahci_vt8251_ops,
458 },
459 [board_ahci_ign_iferr] =
460 {
461 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
462 .flags = AHCI_FLAG_COMMON,
463 .pio_mask = ATA_PIO4,
464 .udma_mask = ATA_UDMA6,
465 .port_ops = &ahci_ops,
466 },
467 [board_ahci_sb600] =
468 {
469 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
470 AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 |
471 AHCI_HFLAG_32BIT_ONLY),
472 .flags = AHCI_FLAG_COMMON,
473 .pio_mask = ATA_PIO4,
474 .udma_mask = ATA_UDMA6,
475 .port_ops = &ahci_sb600_ops,
476 },
477 [board_ahci_mv] =
478 {
479 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
480 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
481 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
482 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
483 .pio_mask = ATA_PIO4,
484 .udma_mask = ATA_UDMA6,
485 .port_ops = &ahci_ops,
486 },
487 [board_ahci_sb700] = /* for SB700 and SB800 */
488 {
489 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
490 .flags = AHCI_FLAG_COMMON,
491 .pio_mask = ATA_PIO4,
492 .udma_mask = ATA_UDMA6,
493 .port_ops = &ahci_sb600_ops,
494 },
495 [board_ahci_mcp65] =
496 {
497 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
498 .flags = AHCI_FLAG_COMMON,
499 .pio_mask = ATA_PIO4,
500 .udma_mask = ATA_UDMA6,
501 .port_ops = &ahci_ops,
502 },
503 [board_ahci_nopmp] =
504 {
505 AHCI_HFLAGS (AHCI_HFLAG_NO_PMP),
506 .flags = AHCI_FLAG_COMMON,
507 .pio_mask = ATA_PIO4,
508 .udma_mask = ATA_UDMA6,
509 .port_ops = &ahci_ops,
510 },
511 /* board_ahci_yesncq */
512 {
513 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
514 .flags = AHCI_FLAG_COMMON,
515 .pio_mask = ATA_PIO4,
516 .udma_mask = ATA_UDMA6,
517 .port_ops = &ahci_ops,
518 },
519 };
520
521 static const struct pci_device_id ahci_pci_tbl[] = {
522 /* Intel */
523 { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
524 { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
525 { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
526 { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
527 { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
528 { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
529 { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
530 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
531 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
532 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
533 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
534 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
535 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
536 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
537 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
538 { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
539 { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
540 { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
541 { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
542 { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
543 { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
544 { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
545 { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
546 { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
547 { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
548 { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
549 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
550 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
551 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
552 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
553 { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */
554 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
555 { PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */
556 { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
557 { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
558 { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
559 { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */
560 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
561 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
562 { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
563
564 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
565 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
566 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
567
568 /* ATI */
569 { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
570 { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
571 { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
572 { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
573 { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
574 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
575 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
576
577 /* AMD */
578 { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
579 /* AMD is using RAID class only for ahci controllers */
580 { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
581 PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
582
583 /* VIA */
584 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
585 { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
586
587 /* NVIDIA */
588 { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */
589 { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */
590 { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */
591 { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */
592 { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */
593 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
594 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
595 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
596 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq }, /* MCP67 */
597 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq }, /* MCP67 */
598 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq }, /* MCP67 */
599 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq }, /* MCP67 */
600 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq }, /* MCP67 */
601 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq }, /* MCP67 */
602 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq }, /* MCP67 */
603 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq }, /* MCP67 */
604 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq }, /* MCP67 */
605 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */
606 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */
607 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */
608 { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_yesncq }, /* Linux ID */
609 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */
610 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */
611 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */
612 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq }, /* MCP73 */
613 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq }, /* MCP73 */
614 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq }, /* MCP73 */
615 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq }, /* MCP73 */
616 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq }, /* MCP73 */
617 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq }, /* MCP73 */
618 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq }, /* MCP73 */
619 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq }, /* MCP73 */
620 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq }, /* MCP73 */
621 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
622 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
623 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
624 { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */
625 { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */
626 { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */
627 { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */
628 { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */
629 { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */
630 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
631 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
632 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
633 { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
634 { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
635 { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
636 { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
637 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
638 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
639 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
640 { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
641 { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
642 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
643 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
644 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
645 { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci }, /* MCP89 */
646 { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci }, /* MCP89 */
647 { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci }, /* MCP89 */
648 { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci }, /* MCP89 */
649 { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci }, /* MCP89 */
650 { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci }, /* MCP89 */
651 { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci }, /* MCP89 */
652 { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci }, /* MCP89 */
653 { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci }, /* MCP89 */
654 { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci }, /* MCP89 */
655 { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci }, /* MCP89 */
656 { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci }, /* MCP89 */
657
658 /* SiS */
659 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
660 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
661 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
662
663 /* Marvell */
664 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
665 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
666
667 /* Promise */
668 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
669
670 /* Generic, PCI class code for AHCI */
671 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
672 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
673
674 { } /* terminate list */
675 };
676
677
678 static struct pci_driver ahci_pci_driver = {
679 .name = DRV_NAME,
680 .id_table = ahci_pci_tbl,
681 .probe = ahci_init_one,
682 .remove = ata_pci_remove_one,
683 #ifdef CONFIG_PM
684 .suspend = ahci_pci_device_suspend,
685 .resume = ahci_pci_device_resume,
686 #endif
687 };
688
689 static int ahci_em_messages = 1;
690 module_param(ahci_em_messages, int, 0444);
691 /* add other LED protocol types when they become supported */
692 MODULE_PARM_DESC(ahci_em_messages,
693 "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
694
695 #if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
696 static int marvell_enable;
697 #else
698 static int marvell_enable = 1;
699 #endif
700 module_param(marvell_enable, int, 0644);
701 MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
702
703
704 static inline int ahci_nr_ports(u32 cap)
705 {
706 return (cap & 0x1f) + 1;
707 }
708
709 static inline void __iomem *__ahci_port_base(struct ata_host *host,
710 unsigned int port_no)
711 {
712 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
713
714 return mmio + 0x100 + (port_no * 0x80);
715 }
716
717 static inline void __iomem *ahci_port_base(struct ata_port *ap)
718 {
719 return __ahci_port_base(ap->host, ap->port_no);
720 }
721
722 static void ahci_enable_ahci(void __iomem *mmio)
723 {
724 int i;
725 u32 tmp;
726
727 /* turn on AHCI_EN */
728 tmp = readl(mmio + HOST_CTL);
729 if (tmp & HOST_AHCI_EN)
730 return;
731
732 /* Some controllers need AHCI_EN to be written multiple times.
733 * Try a few times before giving up.
734 */
735 for (i = 0; i < 5; i++) {
736 tmp |= HOST_AHCI_EN;
737 writel(tmp, mmio + HOST_CTL);
738 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
739 if (tmp & HOST_AHCI_EN)
740 return;
741 msleep(10);
742 }
743
744 WARN_ON(1);
745 }
746
747 static ssize_t ahci_show_host_caps(struct device *dev,
748 struct device_attribute *attr, char *buf)
749 {
750 struct Scsi_Host *shost = class_to_shost(dev);
751 struct ata_port *ap = ata_shost_to_port(shost);
752 struct ahci_host_priv *hpriv = ap->host->private_data;
753
754 return sprintf(buf, "%x\n", hpriv->cap);
755 }
756
757 static ssize_t ahci_show_host_cap2(struct device *dev,
758 struct device_attribute *attr, char *buf)
759 {
760 struct Scsi_Host *shost = class_to_shost(dev);
761 struct ata_port *ap = ata_shost_to_port(shost);
762 struct ahci_host_priv *hpriv = ap->host->private_data;
763
764 return sprintf(buf, "%x\n", hpriv->cap2);
765 }
766
767 static ssize_t ahci_show_host_version(struct device *dev,
768 struct device_attribute *attr, char *buf)
769 {
770 struct Scsi_Host *shost = class_to_shost(dev);
771 struct ata_port *ap = ata_shost_to_port(shost);
772 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
773
774 return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
775 }
776
777 static ssize_t ahci_show_port_cmd(struct device *dev,
778 struct device_attribute *attr, char *buf)
779 {
780 struct Scsi_Host *shost = class_to_shost(dev);
781 struct ata_port *ap = ata_shost_to_port(shost);
782 void __iomem *port_mmio = ahci_port_base(ap);
783
784 return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
785 }
786
787 /**
788 * ahci_save_initial_config - Save and fixup initial config values
789 * @pdev: target PCI device
790 * @hpriv: host private area to store config values
791 *
792 * Some registers containing configuration info might be setup by
793 * BIOS and might be cleared on reset. This function saves the
794 * initial values of those registers into @hpriv such that they
795 * can be restored after controller reset.
796 *
797 * If inconsistent, config values are fixed up by this function.
798 *
799 * LOCKING:
800 * None.
801 */
802 static void ahci_save_initial_config(struct pci_dev *pdev,
803 struct ahci_host_priv *hpriv)
804 {
805 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
806 u32 cap, cap2, vers, port_map;
807 int i;
808 int mv;
809
810 /* make sure AHCI mode is enabled before accessing CAP */
811 ahci_enable_ahci(mmio);
812
813 /* Values prefixed with saved_ are written back to host after
814 * reset. Values without are used for driver operation.
815 */
816 hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
817 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
818
819 /* CAP2 register is only defined for AHCI 1.2 and later */
820 vers = readl(mmio + HOST_VERSION);
821 if ((vers >> 16) > 1 ||
822 ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
823 hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
824 else
825 hpriv->saved_cap2 = cap2 = 0;
826
827 /* some chips have errata preventing 64bit use */
828 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
829 dev_printk(KERN_INFO, &pdev->dev,
830 "controller can't do 64bit DMA, forcing 32bit\n");
831 cap &= ~HOST_CAP_64;
832 }
833
834 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
835 dev_printk(KERN_INFO, &pdev->dev,
836 "controller can't do NCQ, turning off CAP_NCQ\n");
837 cap &= ~HOST_CAP_NCQ;
838 }
839
840 if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
841 dev_printk(KERN_INFO, &pdev->dev,
842 "controller can do NCQ, turning on CAP_NCQ\n");
843 cap |= HOST_CAP_NCQ;
844 }
845
846 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
847 dev_printk(KERN_INFO, &pdev->dev,
848 "controller can't do PMP, turning off CAP_PMP\n");
849 cap &= ~HOST_CAP_PMP;
850 }
851
852 if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
853 port_map != 1) {
854 dev_printk(KERN_INFO, &pdev->dev,
855 "JMB361 has only one port, port_map 0x%x -> 0x%x\n",
856 port_map, 1);
857 port_map = 1;
858 }
859
860 /*
861 * Temporary Marvell 6145 hack: PATA port presence
862 * is asserted through the standard AHCI port
863 * presence register, as bit 4 (counting from 0)
864 */
865 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
866 if (pdev->device == 0x6121)
867 mv = 0x3;
868 else
869 mv = 0xf;
870 dev_printk(KERN_ERR, &pdev->dev,
871 "MV_AHCI HACK: port_map %x -> %x\n",
872 port_map,
873 port_map & mv);
874 dev_printk(KERN_ERR, &pdev->dev,
875 "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
876
877 port_map &= mv;
878 }
879
880 /* cross check port_map and cap.n_ports */
881 if (port_map) {
882 int map_ports = 0;
883
884 for (i = 0; i < AHCI_MAX_PORTS; i++)
885 if (port_map & (1 << i))
886 map_ports++;
887
888 /* If PI has more ports than n_ports, whine, clear
889 * port_map and let it be generated from n_ports.
890 */
891 if (map_ports > ahci_nr_ports(cap)) {
892 dev_printk(KERN_WARNING, &pdev->dev,
893 "implemented port map (0x%x) contains more "
894 "ports than nr_ports (%u), using nr_ports\n",
895 port_map, ahci_nr_ports(cap));
896 port_map = 0;
897 }
898 }
899
900 /* fabricate port_map from cap.nr_ports */
901 if (!port_map) {
902 port_map = (1 << ahci_nr_ports(cap)) - 1;
903 dev_printk(KERN_WARNING, &pdev->dev,
904 "forcing PORTS_IMPL to 0x%x\n", port_map);
905
906 /* write the fixed up value to the PI register */
907 hpriv->saved_port_map = port_map;
908 }
909
910 /* record values to use during operation */
911 hpriv->cap = cap;
912 hpriv->cap2 = cap2;
913 hpriv->port_map = port_map;
914 }
915
916 /**
917 * ahci_restore_initial_config - Restore initial config
918 * @host: target ATA host
919 *
920 * Restore initial config stored by ahci_save_initial_config().
921 *
922 * LOCKING:
923 * None.
924 */
925 static void ahci_restore_initial_config(struct ata_host *host)
926 {
927 struct ahci_host_priv *hpriv = host->private_data;
928 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
929
930 writel(hpriv->saved_cap, mmio + HOST_CAP);
931 if (hpriv->saved_cap2)
932 writel(hpriv->saved_cap2, mmio + HOST_CAP2);
933 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
934 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
935 }
936
937 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
938 {
939 static const int offset[] = {
940 [SCR_STATUS] = PORT_SCR_STAT,
941 [SCR_CONTROL] = PORT_SCR_CTL,
942 [SCR_ERROR] = PORT_SCR_ERR,
943 [SCR_ACTIVE] = PORT_SCR_ACT,
944 [SCR_NOTIFICATION] = PORT_SCR_NTF,
945 };
946 struct ahci_host_priv *hpriv = ap->host->private_data;
947
948 if (sc_reg < ARRAY_SIZE(offset) &&
949 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
950 return offset[sc_reg];
951 return 0;
952 }
953
954 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
955 {
956 void __iomem *port_mmio = ahci_port_base(link->ap);
957 int offset = ahci_scr_offset(link->ap, sc_reg);
958
959 if (offset) {
960 *val = readl(port_mmio + offset);
961 return 0;
962 }
963 return -EINVAL;
964 }
965
966 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
967 {
968 void __iomem *port_mmio = ahci_port_base(link->ap);
969 int offset = ahci_scr_offset(link->ap, sc_reg);
970
971 if (offset) {
972 writel(val, port_mmio + offset);
973 return 0;
974 }
975 return -EINVAL;
976 }
977
978 static void ahci_start_engine(struct ata_port *ap)
979 {
980 void __iomem *port_mmio = ahci_port_base(ap);
981 u32 tmp;
982
983 /* start DMA */
984 tmp = readl(port_mmio + PORT_CMD);
985 tmp |= PORT_CMD_START;
986 writel(tmp, port_mmio + PORT_CMD);
987 readl(port_mmio + PORT_CMD); /* flush */
988 }
989
990 static int ahci_stop_engine(struct ata_port *ap)
991 {
992 void __iomem *port_mmio = ahci_port_base(ap);
993 u32 tmp;
994
995 tmp = readl(port_mmio + PORT_CMD);
996
997 /* check if the HBA is idle */
998 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
999 return 0;
1000
1001 /* setting HBA to idle */
1002 tmp &= ~PORT_CMD_START;
1003 writel(tmp, port_mmio + PORT_CMD);
1004
1005 /* wait for engine to stop. This could be as long as 500 msec */
1006 tmp = ata_wait_register(port_mmio + PORT_CMD,
1007 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
1008 if (tmp & PORT_CMD_LIST_ON)
1009 return -EIO;
1010
1011 return 0;
1012 }
1013
1014 static void ahci_start_fis_rx(struct ata_port *ap)
1015 {
1016 void __iomem *port_mmio = ahci_port_base(ap);
1017 struct ahci_host_priv *hpriv = ap->host->private_data;
1018 struct ahci_port_priv *pp = ap->private_data;
1019 u32 tmp;
1020
1021 /* set FIS registers */
1022 if (hpriv->cap & HOST_CAP_64)
1023 writel((pp->cmd_slot_dma >> 16) >> 16,
1024 port_mmio + PORT_LST_ADDR_HI);
1025 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
1026
1027 if (hpriv->cap & HOST_CAP_64)
1028 writel((pp->rx_fis_dma >> 16) >> 16,
1029 port_mmio + PORT_FIS_ADDR_HI);
1030 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
1031
1032 /* enable FIS reception */
1033 tmp = readl(port_mmio + PORT_CMD);
1034 tmp |= PORT_CMD_FIS_RX;
1035 writel(tmp, port_mmio + PORT_CMD);
1036
1037 /* flush */
1038 readl(port_mmio + PORT_CMD);
1039 }
1040
1041 static int ahci_stop_fis_rx(struct ata_port *ap)
1042 {
1043 void __iomem *port_mmio = ahci_port_base(ap);
1044 u32 tmp;
1045
1046 /* disable FIS reception */
1047 tmp = readl(port_mmio + PORT_CMD);
1048 tmp &= ~PORT_CMD_FIS_RX;
1049 writel(tmp, port_mmio + PORT_CMD);
1050
1051 /* wait for completion, spec says 500ms, give it 1000 */
1052 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
1053 PORT_CMD_FIS_ON, 10, 1000);
1054 if (tmp & PORT_CMD_FIS_ON)
1055 return -EBUSY;
1056
1057 return 0;
1058 }
1059
1060 static void ahci_power_up(struct ata_port *ap)
1061 {
1062 struct ahci_host_priv *hpriv = ap->host->private_data;
1063 void __iomem *port_mmio = ahci_port_base(ap);
1064 u32 cmd;
1065
1066 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1067
1068 /* spin up device */
1069 if (hpriv->cap & HOST_CAP_SSS) {
1070 cmd |= PORT_CMD_SPIN_UP;
1071 writel(cmd, port_mmio + PORT_CMD);
1072 }
1073
1074 /* wake up link */
1075 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
1076 }
1077
1078 static void ahci_disable_alpm(struct ata_port *ap)
1079 {
1080 struct ahci_host_priv *hpriv = ap->host->private_data;
1081 void __iomem *port_mmio = ahci_port_base(ap);
1082 u32 cmd;
1083 struct ahci_port_priv *pp = ap->private_data;
1084
1085 /* IPM bits should be disabled by libata-core */
1086 /* get the existing command bits */
1087 cmd = readl(port_mmio + PORT_CMD);
1088
1089 /* disable ALPM and ASP */
1090 cmd &= ~PORT_CMD_ASP;
1091 cmd &= ~PORT_CMD_ALPE;
1092
1093 /* force the interface back to active */
1094 cmd |= PORT_CMD_ICC_ACTIVE;
1095
1096 /* write out new cmd value */
1097 writel(cmd, port_mmio + PORT_CMD);
1098 cmd = readl(port_mmio + PORT_CMD);
1099
1100 /* wait 10ms to be sure we've come out of any low power state */
1101 msleep(10);
1102
1103 /* clear out any PhyRdy stuff from interrupt status */
1104 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
1105
1106 /* go ahead and clean out PhyRdy Change from Serror too */
1107 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
1108
1109 /*
1110 * Clear flag to indicate that we should ignore all PhyRdy
1111 * state changes
1112 */
1113 hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
1114
1115 /*
1116 * Enable interrupts on Phy Ready.
1117 */
1118 pp->intr_mask |= PORT_IRQ_PHYRDY;
1119 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1120
1121 /*
1122 * don't change the link pm policy - we can be called
1123 * just to turn of link pm temporarily
1124 */
1125 }
1126
1127 static int ahci_enable_alpm(struct ata_port *ap,
1128 enum link_pm policy)
1129 {
1130 struct ahci_host_priv *hpriv = ap->host->private_data;
1131 void __iomem *port_mmio = ahci_port_base(ap);
1132 u32 cmd;
1133 struct ahci_port_priv *pp = ap->private_data;
1134 u32 asp;
1135
1136 /* Make sure the host is capable of link power management */
1137 if (!(hpriv->cap & HOST_CAP_ALPM))
1138 return -EINVAL;
1139
1140 switch (policy) {
1141 case MAX_PERFORMANCE:
1142 case NOT_AVAILABLE:
1143 /*
1144 * if we came here with NOT_AVAILABLE,
1145 * it just means this is the first time we
1146 * have tried to enable - default to max performance,
1147 * and let the user go to lower power modes on request.
1148 */
1149 ahci_disable_alpm(ap);
1150 return 0;
1151 case MIN_POWER:
1152 /* configure HBA to enter SLUMBER */
1153 asp = PORT_CMD_ASP;
1154 break;
1155 case MEDIUM_POWER:
1156 /* configure HBA to enter PARTIAL */
1157 asp = 0;
1158 break;
1159 default:
1160 return -EINVAL;
1161 }
1162
1163 /*
1164 * Disable interrupts on Phy Ready. This keeps us from
1165 * getting woken up due to spurious phy ready interrupts
1166 * TBD - Hot plug should be done via polling now, is
1167 * that even supported?
1168 */
1169 pp->intr_mask &= ~PORT_IRQ_PHYRDY;
1170 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1171
1172 /*
1173 * Set a flag to indicate that we should ignore all PhyRdy
1174 * state changes since these can happen now whenever we
1175 * change link state
1176 */
1177 hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
1178
1179 /* get the existing command bits */
1180 cmd = readl(port_mmio + PORT_CMD);
1181
1182 /*
1183 * Set ASP based on Policy
1184 */
1185 cmd |= asp;
1186
1187 /*
1188 * Setting this bit will instruct the HBA to aggressively
1189 * enter a lower power link state when it's appropriate and
1190 * based on the value set above for ASP
1191 */
1192 cmd |= PORT_CMD_ALPE;
1193
1194 /* write out new cmd value */
1195 writel(cmd, port_mmio + PORT_CMD);
1196 cmd = readl(port_mmio + PORT_CMD);
1197
1198 /* IPM bits should be set by libata-core */
1199 return 0;
1200 }
1201
1202 #ifdef CONFIG_PM
1203 static void ahci_power_down(struct ata_port *ap)
1204 {
1205 struct ahci_host_priv *hpriv = ap->host->private_data;
1206 void __iomem *port_mmio = ahci_port_base(ap);
1207 u32 cmd, scontrol;
1208
1209 if (!(hpriv->cap & HOST_CAP_SSS))
1210 return;
1211
1212 /* put device into listen mode, first set PxSCTL.DET to 0 */
1213 scontrol = readl(port_mmio + PORT_SCR_CTL);
1214 scontrol &= ~0xf;
1215 writel(scontrol, port_mmio + PORT_SCR_CTL);
1216
1217 /* then set PxCMD.SUD to 0 */
1218 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1219 cmd &= ~PORT_CMD_SPIN_UP;
1220 writel(cmd, port_mmio + PORT_CMD);
1221 }
1222 #endif
1223
1224 static void ahci_start_port(struct ata_port *ap)
1225 {
1226 struct ahci_port_priv *pp = ap->private_data;
1227 struct ata_link *link;
1228 struct ahci_em_priv *emp;
1229 ssize_t rc;
1230 int i;
1231
1232 /* enable FIS reception */
1233 ahci_start_fis_rx(ap);
1234
1235 /* enable DMA */
1236 ahci_start_engine(ap);
1237
1238 /* turn on LEDs */
1239 if (ap->flags & ATA_FLAG_EM) {
1240 ata_for_each_link(link, ap, EDGE) {
1241 emp = &pp->em_priv[link->pmp];
1242
1243 /* EM Transmit bit maybe busy during init */
1244 for (i = 0; i < EM_MAX_RETRY; i++) {
1245 rc = ahci_transmit_led_message(ap,
1246 emp->led_state,
1247 4);
1248 if (rc == -EBUSY)
1249 msleep(1);
1250 else
1251 break;
1252 }
1253 }
1254 }
1255
1256 if (ap->flags & ATA_FLAG_SW_ACTIVITY)
1257 ata_for_each_link(link, ap, EDGE)
1258 ahci_init_sw_activity(link);
1259
1260 }
1261
1262 static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
1263 {
1264 int rc;
1265
1266 /* disable DMA */
1267 rc = ahci_stop_engine(ap);
1268 if (rc) {
1269 *emsg = "failed to stop engine";
1270 return rc;
1271 }
1272
1273 /* disable FIS reception */
1274 rc = ahci_stop_fis_rx(ap);
1275 if (rc) {
1276 *emsg = "failed stop FIS RX";
1277 return rc;
1278 }
1279
1280 return 0;
1281 }
1282
1283 static int ahci_reset_controller(struct ata_host *host)
1284 {
1285 struct pci_dev *pdev = to_pci_dev(host->dev);
1286 struct ahci_host_priv *hpriv = host->private_data;
1287 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1288 u32 tmp;
1289
1290 /* we must be in AHCI mode, before using anything
1291 * AHCI-specific, such as HOST_RESET.
1292 */
1293 ahci_enable_ahci(mmio);
1294
1295 /* global controller reset */
1296 if (!ahci_skip_host_reset) {
1297 tmp = readl(mmio + HOST_CTL);
1298 if ((tmp & HOST_RESET) == 0) {
1299 writel(tmp | HOST_RESET, mmio + HOST_CTL);
1300 readl(mmio + HOST_CTL); /* flush */
1301 }
1302
1303 /*
1304 * to perform host reset, OS should set HOST_RESET
1305 * and poll until this bit is read to be "0".
1306 * reset must complete within 1 second, or
1307 * the hardware should be considered fried.
1308 */
1309 tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
1310 HOST_RESET, 10, 1000);
1311
1312 if (tmp & HOST_RESET) {
1313 dev_printk(KERN_ERR, host->dev,
1314 "controller reset failed (0x%x)\n", tmp);
1315 return -EIO;
1316 }
1317
1318 /* turn on AHCI mode */
1319 ahci_enable_ahci(mmio);
1320
1321 /* Some registers might be cleared on reset. Restore
1322 * initial values.
1323 */
1324 ahci_restore_initial_config(host);
1325 } else
1326 dev_printk(KERN_INFO, host->dev,
1327 "skipping global host reset\n");
1328
1329 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
1330 u16 tmp16;
1331
1332 /* configure PCS */
1333 pci_read_config_word(pdev, 0x92, &tmp16);
1334 if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
1335 tmp16 |= hpriv->port_map;
1336 pci_write_config_word(pdev, 0x92, tmp16);
1337 }
1338 }
1339
1340 return 0;
1341 }
1342
1343 static void ahci_sw_activity(struct ata_link *link)
1344 {
1345 struct ata_port *ap = link->ap;
1346 struct ahci_port_priv *pp = ap->private_data;
1347 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1348
1349 if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
1350 return;
1351
1352 emp->activity++;
1353 if (!timer_pending(&emp->timer))
1354 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
1355 }
1356
1357 static void ahci_sw_activity_blink(unsigned long arg)
1358 {
1359 struct ata_link *link = (struct ata_link *)arg;
1360 struct ata_port *ap = link->ap;
1361 struct ahci_port_priv *pp = ap->private_data;
1362 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1363 unsigned long led_message = emp->led_state;
1364 u32 activity_led_state;
1365 unsigned long flags;
1366
1367 led_message &= EM_MSG_LED_VALUE;
1368 led_message |= ap->port_no | (link->pmp << 8);
1369
1370 /* check to see if we've had activity. If so,
1371 * toggle state of LED and reset timer. If not,
1372 * turn LED to desired idle state.
1373 */
1374 spin_lock_irqsave(ap->lock, flags);
1375 if (emp->saved_activity != emp->activity) {
1376 emp->saved_activity = emp->activity;
1377 /* get the current LED state */
1378 activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
1379
1380 if (activity_led_state)
1381 activity_led_state = 0;
1382 else
1383 activity_led_state = 1;
1384
1385 /* clear old state */
1386 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1387
1388 /* toggle state */
1389 led_message |= (activity_led_state << 16);
1390 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
1391 } else {
1392 /* switch to idle */
1393 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1394 if (emp->blink_policy == BLINK_OFF)
1395 led_message |= (1 << 16);
1396 }
1397 spin_unlock_irqrestore(ap->lock, flags);
1398 ahci_transmit_led_message(ap, led_message, 4);
1399 }
1400
1401 static void ahci_init_sw_activity(struct ata_link *link)
1402 {
1403 struct ata_port *ap = link->ap;
1404 struct ahci_port_priv *pp = ap->private_data;
1405 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1406
1407 /* init activity stats, setup timer */
1408 emp->saved_activity = emp->activity = 0;
1409 setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
1410
1411 /* check our blink policy and set flag for link if it's enabled */
1412 if (emp->blink_policy)
1413 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1414 }
1415
1416 static int ahci_reset_em(struct ata_host *host)
1417 {
1418 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1419 u32 em_ctl;
1420
1421 em_ctl = readl(mmio + HOST_EM_CTL);
1422 if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
1423 return -EINVAL;
1424
1425 writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
1426 return 0;
1427 }
1428
1429 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1430 ssize_t size)
1431 {
1432 struct ahci_host_priv *hpriv = ap->host->private_data;
1433 struct ahci_port_priv *pp = ap->private_data;
1434 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1435 u32 em_ctl;
1436 u32 message[] = {0, 0};
1437 unsigned long flags;
1438 int pmp;
1439 struct ahci_em_priv *emp;
1440
1441 /* get the slot number from the message */
1442 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1443 if (pmp < EM_MAX_SLOTS)
1444 emp = &pp->em_priv[pmp];
1445 else
1446 return -EINVAL;
1447
1448 spin_lock_irqsave(ap->lock, flags);
1449
1450 /*
1451 * if we are still busy transmitting a previous message,
1452 * do not allow
1453 */
1454 em_ctl = readl(mmio + HOST_EM_CTL);
1455 if (em_ctl & EM_CTL_TM) {
1456 spin_unlock_irqrestore(ap->lock, flags);
1457 return -EBUSY;
1458 }
1459
1460 /*
1461 * create message header - this is all zero except for
1462 * the message size, which is 4 bytes.
1463 */
1464 message[0] |= (4 << 8);
1465
1466 /* ignore 0:4 of byte zero, fill in port info yourself */
1467 message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
1468
1469 /* write message to EM_LOC */
1470 writel(message[0], mmio + hpriv->em_loc);
1471 writel(message[1], mmio + hpriv->em_loc+4);
1472
1473 /* save off new led state for port/slot */
1474 emp->led_state = state;
1475
1476 /*
1477 * tell hardware to transmit the message
1478 */
1479 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1480
1481 spin_unlock_irqrestore(ap->lock, flags);
1482 return size;
1483 }
1484
1485 static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1486 {
1487 struct ahci_port_priv *pp = ap->private_data;
1488 struct ata_link *link;
1489 struct ahci_em_priv *emp;
1490 int rc = 0;
1491
1492 ata_for_each_link(link, ap, EDGE) {
1493 emp = &pp->em_priv[link->pmp];
1494 rc += sprintf(buf, "%lx\n", emp->led_state);
1495 }
1496 return rc;
1497 }
1498
1499 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1500 size_t size)
1501 {
1502 int state;
1503 int pmp;
1504 struct ahci_port_priv *pp = ap->private_data;
1505 struct ahci_em_priv *emp;
1506
1507 state = simple_strtoul(buf, NULL, 0);
1508
1509 /* get the slot number from the message */
1510 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1511 if (pmp < EM_MAX_SLOTS)
1512 emp = &pp->em_priv[pmp];
1513 else
1514 return -EINVAL;
1515
1516 /* mask off the activity bits if we are in sw_activity
1517 * mode, user should turn off sw_activity before setting
1518 * activity led through em_message
1519 */
1520 if (emp->blink_policy)
1521 state &= ~EM_MSG_LED_VALUE_ACTIVITY;
1522
1523 return ahci_transmit_led_message(ap, state, size);
1524 }
1525
1526 static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1527 {
1528 struct ata_link *link = dev->link;
1529 struct ata_port *ap = link->ap;
1530 struct ahci_port_priv *pp = ap->private_data;
1531 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1532 u32 port_led_state = emp->led_state;
1533
1534 /* save the desired Activity LED behavior */
1535 if (val == OFF) {
1536 /* clear LFLAG */
1537 link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1538
1539 /* set the LED to OFF */
1540 port_led_state &= EM_MSG_LED_VALUE_OFF;
1541 port_led_state |= (ap->port_no | (link->pmp << 8));
1542 ahci_transmit_led_message(ap, port_led_state, 4);
1543 } else {
1544 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1545 if (val == BLINK_OFF) {
1546 /* set LED to ON for idle */
1547 port_led_state &= EM_MSG_LED_VALUE_OFF;
1548 port_led_state |= (ap->port_no | (link->pmp << 8));
1549 port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
1550 ahci_transmit_led_message(ap, port_led_state, 4);
1551 }
1552 }
1553 emp->blink_policy = val;
1554 return 0;
1555 }
1556
1557 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1558 {
1559 struct ata_link *link = dev->link;
1560 struct ata_port *ap = link->ap;
1561 struct ahci_port_priv *pp = ap->private_data;
1562 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1563
1564 /* display the saved value of activity behavior for this
1565 * disk.
1566 */
1567 return sprintf(buf, "%d\n", emp->blink_policy);
1568 }
1569
1570 static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
1571 int port_no, void __iomem *mmio,
1572 void __iomem *port_mmio)
1573 {
1574 const char *emsg = NULL;
1575 int rc;
1576 u32 tmp;
1577
1578 /* make sure port is not active */
1579 rc = ahci_deinit_port(ap, &emsg);
1580 if (rc)
1581 dev_printk(KERN_WARNING, &pdev->dev,
1582 "%s (%d)\n", emsg, rc);
1583
1584 /* clear SError */
1585 tmp = readl(port_mmio + PORT_SCR_ERR);
1586 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1587 writel(tmp, port_mmio + PORT_SCR_ERR);
1588
1589 /* clear port IRQ */
1590 tmp = readl(port_mmio + PORT_IRQ_STAT);
1591 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1592 if (tmp)
1593 writel(tmp, port_mmio + PORT_IRQ_STAT);
1594
1595 writel(1 << port_no, mmio + HOST_IRQ_STAT);
1596 }
1597
1598 static void ahci_init_controller(struct ata_host *host)
1599 {
1600 struct ahci_host_priv *hpriv = host->private_data;
1601 struct pci_dev *pdev = to_pci_dev(host->dev);
1602 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1603 int i;
1604 void __iomem *port_mmio;
1605 u32 tmp;
1606 int mv;
1607
1608 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
1609 if (pdev->device == 0x6121)
1610 mv = 2;
1611 else
1612 mv = 4;
1613 port_mmio = __ahci_port_base(host, mv);
1614
1615 writel(0, port_mmio + PORT_IRQ_MASK);
1616
1617 /* clear port IRQ */
1618 tmp = readl(port_mmio + PORT_IRQ_STAT);
1619 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1620 if (tmp)
1621 writel(tmp, port_mmio + PORT_IRQ_STAT);
1622 }
1623
1624 for (i = 0; i < host->n_ports; i++) {
1625 struct ata_port *ap = host->ports[i];
1626
1627 port_mmio = ahci_port_base(ap);
1628 if (ata_port_is_dummy(ap))
1629 continue;
1630
1631 ahci_port_init(pdev, ap, i, mmio, port_mmio);
1632 }
1633
1634 tmp = readl(mmio + HOST_CTL);
1635 VPRINTK("HOST_CTL 0x%x\n", tmp);
1636 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1637 tmp = readl(mmio + HOST_CTL);
1638 VPRINTK("HOST_CTL 0x%x\n", tmp);
1639 }
1640
1641 static void ahci_dev_config(struct ata_device *dev)
1642 {
1643 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1644
1645 if (hpriv->flags & AHCI_HFLAG_SECT255) {
1646 dev->max_sectors = 255;
1647 ata_dev_printk(dev, KERN_INFO,
1648 "SB600 AHCI: limiting to 255 sectors per cmd\n");
1649 }
1650 }
1651
1652 static unsigned int ahci_dev_classify(struct ata_port *ap)
1653 {
1654 void __iomem *port_mmio = ahci_port_base(ap);
1655 struct ata_taskfile tf;
1656 u32 tmp;
1657
1658 tmp = readl(port_mmio + PORT_SIG);
1659 tf.lbah = (tmp >> 24) & 0xff;
1660 tf.lbam = (tmp >> 16) & 0xff;
1661 tf.lbal = (tmp >> 8) & 0xff;
1662 tf.nsect = (tmp) & 0xff;
1663
1664 return ata_dev_classify(&tf);
1665 }
1666
1667 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1668 u32 opts)
1669 {
1670 dma_addr_t cmd_tbl_dma;
1671
1672 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1673
1674 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1675 pp->cmd_slot[tag].status = 0;
1676 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1677 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1678 }
1679
1680 static int ahci_kick_engine(struct ata_port *ap)
1681 {
1682 void __iomem *port_mmio = ahci_port_base(ap);
1683 struct ahci_host_priv *hpriv = ap->host->private_data;
1684 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1685 u32 tmp;
1686 int busy, rc;
1687
1688 /* stop engine */
1689 rc = ahci_stop_engine(ap);
1690 if (rc)
1691 goto out_restart;
1692
1693 /* need to do CLO?
1694 * always do CLO if PMP is attached (AHCI-1.3 9.2)
1695 */
1696 busy = status & (ATA_BUSY | ATA_DRQ);
1697 if (!busy && !sata_pmp_attached(ap)) {
1698 rc = 0;
1699 goto out_restart;
1700 }
1701
1702 if (!(hpriv->cap & HOST_CAP_CLO)) {
1703 rc = -EOPNOTSUPP;
1704 goto out_restart;
1705 }
1706
1707 /* perform CLO */
1708 tmp = readl(port_mmio + PORT_CMD);
1709 tmp |= PORT_CMD_CLO;
1710 writel(tmp, port_mmio + PORT_CMD);
1711
1712 rc = 0;
1713 tmp = ata_wait_register(port_mmio + PORT_CMD,
1714 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1715 if (tmp & PORT_CMD_CLO)
1716 rc = -EIO;
1717
1718 /* restart engine */
1719 out_restart:
1720 ahci_start_engine(ap);
1721 return rc;
1722 }
1723
1724 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1725 struct ata_taskfile *tf, int is_cmd, u16 flags,
1726 unsigned long timeout_msec)
1727 {
1728 const u32 cmd_fis_len = 5; /* five dwords */
1729 struct ahci_port_priv *pp = ap->private_data;
1730 void __iomem *port_mmio = ahci_port_base(ap);
1731 u8 *fis = pp->cmd_tbl;
1732 u32 tmp;
1733
1734 /* prep the command */
1735 ata_tf_to_fis(tf, pmp, is_cmd, fis);
1736 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1737
1738 /* issue & wait */
1739 writel(1, port_mmio + PORT_CMD_ISSUE);
1740
1741 if (timeout_msec) {
1742 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1743 1, timeout_msec);
1744 if (tmp & 0x1) {
1745 ahci_kick_engine(ap);
1746 return -EBUSY;
1747 }
1748 } else
1749 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1750
1751 return 0;
1752 }
1753
1754 static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1755 int pmp, unsigned long deadline,
1756 int (*check_ready)(struct ata_link *link))
1757 {
1758 struct ata_port *ap = link->ap;
1759 struct ahci_host_priv *hpriv = ap->host->private_data;
1760 const char *reason = NULL;
1761 unsigned long now, msecs;
1762 struct ata_taskfile tf;
1763 int rc;
1764
1765 DPRINTK("ENTER\n");
1766
1767 /* prepare for SRST (AHCI-1.1 10.4.1) */
1768 rc = ahci_kick_engine(ap);
1769 if (rc && rc != -EOPNOTSUPP)
1770 ata_link_printk(link, KERN_WARNING,
1771 "failed to reset engine (errno=%d)\n", rc);
1772
1773 ata_tf_init(link->device, &tf);
1774
1775 /* issue the first D2H Register FIS */
1776 msecs = 0;
1777 now = jiffies;
1778 if (time_after(now, deadline))
1779 msecs = jiffies_to_msecs(deadline - now);
1780
1781 tf.ctl |= ATA_SRST;
1782 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1783 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1784 rc = -EIO;
1785 reason = "1st FIS failed";
1786 goto fail;
1787 }
1788
1789 /* spec says at least 5us, but be generous and sleep for 1ms */
1790 msleep(1);
1791
1792 /* issue the second D2H Register FIS */
1793 tf.ctl &= ~ATA_SRST;
1794 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1795
1796 /* wait for link to become ready */
1797 rc = ata_wait_after_reset(link, deadline, check_ready);
1798 if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
1799 /*
1800 * Workaround for cases where link online status can't
1801 * be trusted. Treat device readiness timeout as link
1802 * offline.
1803 */
1804 ata_link_printk(link, KERN_INFO,
1805 "device not ready, treating as offline\n");
1806 *class = ATA_DEV_NONE;
1807 } else if (rc) {
1808 /* link occupied, -ENODEV too is an error */
1809 reason = "device not ready";
1810 goto fail;
1811 } else
1812 *class = ahci_dev_classify(ap);
1813
1814 DPRINTK("EXIT, class=%u\n", *class);
1815 return 0;
1816
1817 fail:
1818 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1819 return rc;
1820 }
1821
1822 static int ahci_check_ready(struct ata_link *link)
1823 {
1824 void __iomem *port_mmio = ahci_port_base(link->ap);
1825 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1826
1827 return ata_check_ready(status);
1828 }
1829
1830 static int ahci_softreset(struct ata_link *link, unsigned int *class,
1831 unsigned long deadline)
1832 {
1833 int pmp = sata_srst_pmp(link);
1834
1835 DPRINTK("ENTER\n");
1836
1837 return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1838 }
1839
1840 static int ahci_sb600_check_ready(struct ata_link *link)
1841 {
1842 void __iomem *port_mmio = ahci_port_base(link->ap);
1843 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1844 u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
1845
1846 /*
1847 * There is no need to check TFDATA if BAD PMP is found due to HW bug,
1848 * which can save timeout delay.
1849 */
1850 if (irq_status & PORT_IRQ_BAD_PMP)
1851 return -EIO;
1852
1853 return ata_check_ready(status);
1854 }
1855
1856 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
1857 unsigned long deadline)
1858 {
1859 struct ata_port *ap = link->ap;
1860 void __iomem *port_mmio = ahci_port_base(ap);
1861 int pmp = sata_srst_pmp(link);
1862 int rc;
1863 u32 irq_sts;
1864
1865 DPRINTK("ENTER\n");
1866
1867 rc = ahci_do_softreset(link, class, pmp, deadline,
1868 ahci_sb600_check_ready);
1869
1870 /*
1871 * Soft reset fails on some ATI chips with IPMS set when PMP
1872 * is enabled but SATA HDD/ODD is connected to SATA port,
1873 * do soft reset again to port 0.
1874 */
1875 if (rc == -EIO) {
1876 irq_sts = readl(port_mmio + PORT_IRQ_STAT);
1877 if (irq_sts & PORT_IRQ_BAD_PMP) {
1878 ata_link_printk(link, KERN_WARNING,
1879 "applying SB600 PMP SRST workaround "
1880 "and retrying\n");
1881 rc = ahci_do_softreset(link, class, 0, deadline,
1882 ahci_check_ready);
1883 }
1884 }
1885
1886 return rc;
1887 }
1888
1889 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1890 unsigned long deadline)
1891 {
1892 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1893 struct ata_port *ap = link->ap;
1894 struct ahci_port_priv *pp = ap->private_data;
1895 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1896 struct ata_taskfile tf;
1897 bool online;
1898 int rc;
1899
1900 DPRINTK("ENTER\n");
1901
1902 ahci_stop_engine(ap);
1903
1904 /* clear D2H reception area to properly wait for D2H FIS */
1905 ata_tf_init(link->device, &tf);
1906 tf.command = 0x80;
1907 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1908
1909 rc = sata_link_hardreset(link, timing, deadline, &online,
1910 ahci_check_ready);
1911
1912 ahci_start_engine(ap);
1913
1914 if (online)
1915 *class = ahci_dev_classify(ap);
1916
1917 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1918 return rc;
1919 }
1920
1921 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
1922 unsigned long deadline)
1923 {
1924 struct ata_port *ap = link->ap;
1925 bool online;
1926 int rc;
1927
1928 DPRINTK("ENTER\n");
1929
1930 ahci_stop_engine(ap);
1931
1932 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1933 deadline, &online, NULL);
1934
1935 ahci_start_engine(ap);
1936
1937 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1938
1939 /* vt8251 doesn't clear BSY on signature FIS reception,
1940 * request follow-up softreset.
1941 */
1942 return online ? -EAGAIN : rc;
1943 }
1944
1945 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
1946 unsigned long deadline)
1947 {
1948 struct ata_port *ap = link->ap;
1949 struct ahci_port_priv *pp = ap->private_data;
1950 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1951 struct ata_taskfile tf;
1952 bool online;
1953 int rc;
1954
1955 ahci_stop_engine(ap);
1956
1957 /* clear D2H reception area to properly wait for D2H FIS */
1958 ata_tf_init(link->device, &tf);
1959 tf.command = 0x80;
1960 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1961
1962 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1963 deadline, &online, NULL);
1964
1965 ahci_start_engine(ap);
1966
1967 /* The pseudo configuration device on SIMG4726 attached to
1968 * ASUS P5W-DH Deluxe doesn't send signature FIS after
1969 * hardreset if no device is attached to the first downstream
1970 * port && the pseudo device locks up on SRST w/ PMP==0. To
1971 * work around this, wait for !BSY only briefly. If BSY isn't
1972 * cleared, perform CLO and proceed to IDENTIFY (achieved by
1973 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
1974 *
1975 * Wait for two seconds. Devices attached to downstream port
1976 * which can't process the following IDENTIFY after this will
1977 * have to be reset again. For most cases, this should
1978 * suffice while making probing snappish enough.
1979 */
1980 if (online) {
1981 rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
1982 ahci_check_ready);
1983 if (rc)
1984 ahci_kick_engine(ap);
1985 }
1986 return rc;
1987 }
1988
1989 static void ahci_postreset(struct ata_link *link, unsigned int *class)
1990 {
1991 struct ata_port *ap = link->ap;
1992 void __iomem *port_mmio = ahci_port_base(ap);
1993 u32 new_tmp, tmp;
1994
1995 ata_std_postreset(link, class);
1996
1997 /* Make sure port's ATAPI bit is set appropriately */
1998 new_tmp = tmp = readl(port_mmio + PORT_CMD);
1999 if (*class == ATA_DEV_ATAPI)
2000 new_tmp |= PORT_CMD_ATAPI;
2001 else
2002 new_tmp &= ~PORT_CMD_ATAPI;
2003 if (new_tmp != tmp) {
2004 writel(new_tmp, port_mmio + PORT_CMD);
2005 readl(port_mmio + PORT_CMD); /* flush */
2006 }
2007 }
2008
2009 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
2010 {
2011 struct scatterlist *sg;
2012 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
2013 unsigned int si;
2014
2015 VPRINTK("ENTER\n");
2016
2017 /*
2018 * Next, the S/G list.
2019 */
2020 for_each_sg(qc->sg, sg, qc->n_elem, si) {
2021 dma_addr_t addr = sg_dma_address(sg);
2022 u32 sg_len = sg_dma_len(sg);
2023
2024 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
2025 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
2026 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
2027 }
2028
2029 return si;
2030 }
2031
2032 static void ahci_qc_prep(struct ata_queued_cmd *qc)
2033 {
2034 struct ata_port *ap = qc->ap;
2035 struct ahci_port_priv *pp = ap->private_data;
2036 int is_atapi = ata_is_atapi(qc->tf.protocol);
2037 void *cmd_tbl;
2038 u32 opts;
2039 const u32 cmd_fis_len = 5; /* five dwords */
2040 unsigned int n_elem;
2041
2042 /*
2043 * Fill in command table information. First, the header,
2044 * a SATA Register - Host to Device command FIS.
2045 */
2046 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
2047
2048 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
2049 if (is_atapi) {
2050 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
2051 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
2052 }
2053
2054 n_elem = 0;
2055 if (qc->flags & ATA_QCFLAG_DMAMAP)
2056 n_elem = ahci_fill_sg(qc, cmd_tbl);
2057
2058 /*
2059 * Fill in command slot information.
2060 */
2061 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
2062 if (qc->tf.flags & ATA_TFLAG_WRITE)
2063 opts |= AHCI_CMD_WRITE;
2064 if (is_atapi)
2065 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
2066
2067 ahci_fill_cmd_slot(pp, qc->tag, opts);
2068 }
2069
2070 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
2071 {
2072 struct ahci_host_priv *hpriv = ap->host->private_data;
2073 struct ahci_port_priv *pp = ap->private_data;
2074 struct ata_eh_info *host_ehi = &ap->link.eh_info;
2075 struct ata_link *link = NULL;
2076 struct ata_queued_cmd *active_qc;
2077 struct ata_eh_info *active_ehi;
2078 u32 serror;
2079
2080 /* determine active link */
2081 ata_for_each_link(link, ap, EDGE)
2082 if (ata_link_active(link))
2083 break;
2084 if (!link)
2085 link = &ap->link;
2086
2087 active_qc = ata_qc_from_tag(ap, link->active_tag);
2088 active_ehi = &link->eh_info;
2089
2090 /* record irq stat */
2091 ata_ehi_clear_desc(host_ehi);
2092 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
2093
2094 /* AHCI needs SError cleared; otherwise, it might lock up */
2095 ahci_scr_read(&ap->link, SCR_ERROR, &serror);
2096 ahci_scr_write(&ap->link, SCR_ERROR, serror);
2097 host_ehi->serror |= serror;
2098
2099 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
2100 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
2101 irq_stat &= ~PORT_IRQ_IF_ERR;
2102
2103 if (irq_stat & PORT_IRQ_TF_ERR) {
2104 /* If qc is active, charge it; otherwise, the active
2105 * link. There's no active qc on NCQ errors. It will
2106 * be determined by EH by reading log page 10h.
2107 */
2108 if (active_qc)
2109 active_qc->err_mask |= AC_ERR_DEV;
2110 else
2111 active_ehi->err_mask |= AC_ERR_DEV;
2112
2113 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
2114 host_ehi->serror &= ~SERR_INTERNAL;
2115 }
2116
2117 if (irq_stat & PORT_IRQ_UNK_FIS) {
2118 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
2119
2120 active_ehi->err_mask |= AC_ERR_HSM;
2121 active_ehi->action |= ATA_EH_RESET;
2122 ata_ehi_push_desc(active_ehi,
2123 "unknown FIS %08x %08x %08x %08x" ,
2124 unk[0], unk[1], unk[2], unk[3]);
2125 }
2126
2127 if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
2128 active_ehi->err_mask |= AC_ERR_HSM;
2129 active_ehi->action |= ATA_EH_RESET;
2130 ata_ehi_push_desc(active_ehi, "incorrect PMP");
2131 }
2132
2133 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
2134 host_ehi->err_mask |= AC_ERR_HOST_BUS;
2135 host_ehi->action |= ATA_EH_RESET;
2136 ata_ehi_push_desc(host_ehi, "host bus error");
2137 }
2138
2139 if (irq_stat & PORT_IRQ_IF_ERR) {
2140 host_ehi->err_mask |= AC_ERR_ATA_BUS;
2141 host_ehi->action |= ATA_EH_RESET;
2142 ata_ehi_push_desc(host_ehi, "interface fatal error");
2143 }
2144
2145 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
2146 ata_ehi_hotplugged(host_ehi);
2147 ata_ehi_push_desc(host_ehi, "%s",
2148 irq_stat & PORT_IRQ_CONNECT ?
2149 "connection status changed" : "PHY RDY changed");
2150 }
2151
2152 /* okay, let's hand over to EH */
2153
2154 if (irq_stat & PORT_IRQ_FREEZE)
2155 ata_port_freeze(ap);
2156 else
2157 ata_port_abort(ap);
2158 }
2159
2160 static void ahci_port_intr(struct ata_port *ap)
2161 {
2162 void __iomem *port_mmio = ahci_port_base(ap);
2163 struct ata_eh_info *ehi = &ap->link.eh_info;
2164 struct ahci_port_priv *pp = ap->private_data;
2165 struct ahci_host_priv *hpriv = ap->host->private_data;
2166 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
2167 u32 status, qc_active;
2168 int rc;
2169
2170 status = readl(port_mmio + PORT_IRQ_STAT);
2171 writel(status, port_mmio + PORT_IRQ_STAT);
2172
2173 /* ignore BAD_PMP while resetting */
2174 if (unlikely(resetting))
2175 status &= ~PORT_IRQ_BAD_PMP;
2176
2177 /* If we are getting PhyRdy, this is
2178 * just a power state change, we should
2179 * clear out this, plus the PhyRdy/Comm
2180 * Wake bits from Serror
2181 */
2182 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
2183 (status & PORT_IRQ_PHYRDY)) {
2184 status &= ~PORT_IRQ_PHYRDY;
2185 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
2186 }
2187
2188 if (unlikely(status & PORT_IRQ_ERROR)) {
2189 ahci_error_intr(ap, status);
2190 return;
2191 }
2192
2193 if (status & PORT_IRQ_SDB_FIS) {
2194 /* If SNotification is available, leave notification
2195 * handling to sata_async_notification(). If not,
2196 * emulate it by snooping SDB FIS RX area.
2197 *
2198 * Snooping FIS RX area is probably cheaper than
2199 * poking SNotification but some constrollers which
2200 * implement SNotification, ICH9 for example, don't
2201 * store AN SDB FIS into receive area.
2202 */
2203 if (hpriv->cap & HOST_CAP_SNTF)
2204 sata_async_notification(ap);
2205 else {
2206 /* If the 'N' bit in word 0 of the FIS is set,
2207 * we just received asynchronous notification.
2208 * Tell libata about it.
2209 */
2210 const __le32 *f = pp->rx_fis + RX_FIS_SDB;
2211 u32 f0 = le32_to_cpu(f[0]);
2212
2213 if (f0 & (1 << 15))
2214 sata_async_notification(ap);
2215 }
2216 }
2217
2218 /* pp->active_link is valid iff any command is in flight */
2219 if (ap->qc_active && pp->active_link->sactive)
2220 qc_active = readl(port_mmio + PORT_SCR_ACT);
2221 else
2222 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
2223
2224 rc = ata_qc_complete_multiple(ap, qc_active);
2225
2226 /* while resetting, invalid completions are expected */
2227 if (unlikely(rc < 0 && !resetting)) {
2228 ehi->err_mask |= AC_ERR_HSM;
2229 ehi->action |= ATA_EH_RESET;
2230 ata_port_freeze(ap);
2231 }
2232 }
2233
2234 static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
2235 {
2236 struct ata_host *host = dev_instance;
2237 struct ahci_host_priv *hpriv;
2238 unsigned int i, handled = 0;
2239 void __iomem *mmio;
2240 u32 irq_stat, irq_masked;
2241
2242 VPRINTK("ENTER\n");
2243
2244 hpriv = host->private_data;
2245 mmio = host->iomap[AHCI_PCI_BAR];
2246
2247 /* sigh. 0xffffffff is a valid return from h/w */
2248 irq_stat = readl(mmio + HOST_IRQ_STAT);
2249 if (!irq_stat)
2250 return IRQ_NONE;
2251
2252 irq_masked = irq_stat & hpriv->port_map;
2253
2254 spin_lock(&host->lock);
2255
2256 for (i = 0; i < host->n_ports; i++) {
2257 struct ata_port *ap;
2258
2259 if (!(irq_masked & (1 << i)))
2260 continue;
2261
2262 ap = host->ports[i];
2263 if (ap) {
2264 ahci_port_intr(ap);
2265 VPRINTK("port %u\n", i);
2266 } else {
2267 VPRINTK("port %u (no irq)\n", i);
2268 if (ata_ratelimit())
2269 dev_printk(KERN_WARNING, host->dev,
2270 "interrupt on disabled port %u\n", i);
2271 }
2272
2273 handled = 1;
2274 }
2275
2276 /* HOST_IRQ_STAT behaves as level triggered latch meaning that
2277 * it should be cleared after all the port events are cleared;
2278 * otherwise, it will raise a spurious interrupt after each
2279 * valid one. Please read section 10.6.2 of ahci 1.1 for more
2280 * information.
2281 *
2282 * Also, use the unmasked value to clear interrupt as spurious
2283 * pending event on a dummy port might cause screaming IRQ.
2284 */
2285 writel(irq_stat, mmio + HOST_IRQ_STAT);
2286
2287 spin_unlock(&host->lock);
2288
2289 VPRINTK("EXIT\n");
2290
2291 return IRQ_RETVAL(handled);
2292 }
2293
2294 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
2295 {
2296 struct ata_port *ap = qc->ap;
2297 void __iomem *port_mmio = ahci_port_base(ap);
2298 struct ahci_port_priv *pp = ap->private_data;
2299
2300 /* Keep track of the currently active link. It will be used
2301 * in completion path to determine whether NCQ phase is in
2302 * progress.
2303 */
2304 pp->active_link = qc->dev->link;
2305
2306 if (qc->tf.protocol == ATA_PROT_NCQ)
2307 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
2308 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
2309
2310 ahci_sw_activity(qc->dev->link);
2311
2312 return 0;
2313 }
2314
2315 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
2316 {
2317 struct ahci_port_priv *pp = qc->ap->private_data;
2318 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
2319
2320 ata_tf_from_fis(d2h_fis, &qc->result_tf);
2321 return true;
2322 }
2323
2324 static void ahci_freeze(struct ata_port *ap)
2325 {
2326 void __iomem *port_mmio = ahci_port_base(ap);
2327
2328 /* turn IRQ off */
2329 writel(0, port_mmio + PORT_IRQ_MASK);
2330 }
2331
2332 static void ahci_thaw(struct ata_port *ap)
2333 {
2334 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
2335 void __iomem *port_mmio = ahci_port_base(ap);
2336 u32 tmp;
2337 struct ahci_port_priv *pp = ap->private_data;
2338
2339 /* clear IRQ */
2340 tmp = readl(port_mmio + PORT_IRQ_STAT);
2341 writel(tmp, port_mmio + PORT_IRQ_STAT);
2342 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
2343
2344 /* turn IRQ back on */
2345 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2346 }
2347
2348 static void ahci_error_handler(struct ata_port *ap)
2349 {
2350 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
2351 /* restart engine */
2352 ahci_stop_engine(ap);
2353 ahci_start_engine(ap);
2354 }
2355
2356 sata_pmp_error_handler(ap);
2357 }
2358
2359 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
2360 {
2361 struct ata_port *ap = qc->ap;
2362
2363 /* make DMA engine forget about the failed command */
2364 if (qc->flags & ATA_QCFLAG_FAILED)
2365 ahci_kick_engine(ap);
2366 }
2367
2368 static void ahci_pmp_attach(struct ata_port *ap)
2369 {
2370 void __iomem *port_mmio = ahci_port_base(ap);
2371 struct ahci_port_priv *pp = ap->private_data;
2372 u32 cmd;
2373
2374 cmd = readl(port_mmio + PORT_CMD);
2375 cmd |= PORT_CMD_PMP;
2376 writel(cmd, port_mmio + PORT_CMD);
2377
2378 pp->intr_mask |= PORT_IRQ_BAD_PMP;
2379 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2380 }
2381
2382 static void ahci_pmp_detach(struct ata_port *ap)
2383 {
2384 void __iomem *port_mmio = ahci_port_base(ap);
2385 struct ahci_port_priv *pp = ap->private_data;
2386 u32 cmd;
2387
2388 cmd = readl(port_mmio + PORT_CMD);
2389 cmd &= ~PORT_CMD_PMP;
2390 writel(cmd, port_mmio + PORT_CMD);
2391
2392 pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
2393 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2394 }
2395
2396 static int ahci_port_resume(struct ata_port *ap)
2397 {
2398 ahci_power_up(ap);
2399 ahci_start_port(ap);
2400
2401 if (sata_pmp_attached(ap))
2402 ahci_pmp_attach(ap);
2403 else
2404 ahci_pmp_detach(ap);
2405
2406 return 0;
2407 }
2408
2409 #ifdef CONFIG_PM
2410 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2411 {
2412 const char *emsg = NULL;
2413 int rc;
2414
2415 rc = ahci_deinit_port(ap, &emsg);
2416 if (rc == 0)
2417 ahci_power_down(ap);
2418 else {
2419 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
2420 ahci_start_port(ap);
2421 }
2422
2423 return rc;
2424 }
2425
2426 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
2427 {
2428 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2429 struct ahci_host_priv *hpriv = host->private_data;
2430 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2431 u32 ctl;
2432
2433 if (mesg.event & PM_EVENT_SUSPEND &&
2434 hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
2435 dev_printk(KERN_ERR, &pdev->dev,
2436 "BIOS update required for suspend/resume\n");
2437 return -EIO;
2438 }
2439
2440 if (mesg.event & PM_EVENT_SLEEP) {
2441 /* AHCI spec rev1.1 section 8.3.3:
2442 * Software must disable interrupts prior to requesting a
2443 * transition of the HBA to D3 state.
2444 */
2445 ctl = readl(mmio + HOST_CTL);
2446 ctl &= ~HOST_IRQ_EN;
2447 writel(ctl, mmio + HOST_CTL);
2448 readl(mmio + HOST_CTL); /* flush */
2449 }
2450
2451 return ata_pci_device_suspend(pdev, mesg);
2452 }
2453
2454 static int ahci_pci_device_resume(struct pci_dev *pdev)
2455 {
2456 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2457 int rc;
2458
2459 rc = ata_pci_device_do_resume(pdev);
2460 if (rc)
2461 return rc;
2462
2463 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2464 rc = ahci_reset_controller(host);
2465 if (rc)
2466 return rc;
2467
2468 ahci_init_controller(host);
2469 }
2470
2471 ata_host_resume(host);
2472
2473 return 0;
2474 }
2475 #endif
2476
2477 static int ahci_port_start(struct ata_port *ap)
2478 {
2479 struct device *dev = ap->host->dev;
2480 struct ahci_port_priv *pp;
2481 void *mem;
2482 dma_addr_t mem_dma;
2483
2484 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2485 if (!pp)
2486 return -ENOMEM;
2487
2488 mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
2489 GFP_KERNEL);
2490 if (!mem)
2491 return -ENOMEM;
2492 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
2493
2494 /*
2495 * First item in chunk of DMA memory: 32-slot command table,
2496 * 32 bytes each in size
2497 */
2498 pp->cmd_slot = mem;
2499 pp->cmd_slot_dma = mem_dma;
2500
2501 mem += AHCI_CMD_SLOT_SZ;
2502 mem_dma += AHCI_CMD_SLOT_SZ;
2503
2504 /*
2505 * Second item: Received-FIS area
2506 */
2507 pp->rx_fis = mem;
2508 pp->rx_fis_dma = mem_dma;
2509
2510 mem += AHCI_RX_FIS_SZ;
2511 mem_dma += AHCI_RX_FIS_SZ;
2512
2513 /*
2514 * Third item: data area for storing a single command
2515 * and its scatter-gather table
2516 */
2517 pp->cmd_tbl = mem;
2518 pp->cmd_tbl_dma = mem_dma;
2519
2520 /*
2521 * Save off initial list of interrupts to be enabled.
2522 * This could be changed later
2523 */
2524 pp->intr_mask = DEF_PORT_IRQ;
2525
2526 ap->private_data = pp;
2527
2528 /* engage engines, captain */
2529 return ahci_port_resume(ap);
2530 }
2531
2532 static void ahci_port_stop(struct ata_port *ap)
2533 {
2534 const char *emsg = NULL;
2535 int rc;
2536
2537 /* de-initialize port */
2538 rc = ahci_deinit_port(ap, &emsg);
2539 if (rc)
2540 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
2541 }
2542
2543 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
2544 {
2545 int rc;
2546
2547 if (using_dac &&
2548 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
2549 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2550 if (rc) {
2551 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2552 if (rc) {
2553 dev_printk(KERN_ERR, &pdev->dev,
2554 "64-bit DMA enable failed\n");
2555 return rc;
2556 }
2557 }
2558 } else {
2559 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2560 if (rc) {
2561 dev_printk(KERN_ERR, &pdev->dev,
2562 "32-bit DMA enable failed\n");
2563 return rc;
2564 }
2565 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2566 if (rc) {
2567 dev_printk(KERN_ERR, &pdev->dev,
2568 "32-bit consistent DMA enable failed\n");
2569 return rc;
2570 }
2571 }
2572 return 0;
2573 }
2574
2575 static void ahci_print_info(struct ata_host *host)
2576 {
2577 struct ahci_host_priv *hpriv = host->private_data;
2578 struct pci_dev *pdev = to_pci_dev(host->dev);
2579 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2580 u32 vers, cap, cap2, impl, speed;
2581 const char *speed_s;
2582 u16 cc;
2583 const char *scc_s;
2584
2585 vers = readl(mmio + HOST_VERSION);
2586 cap = hpriv->cap;
2587 cap2 = hpriv->cap2;
2588 impl = hpriv->port_map;
2589
2590 speed = (cap >> 20) & 0xf;
2591 if (speed == 1)
2592 speed_s = "1.5";
2593 else if (speed == 2)
2594 speed_s = "3";
2595 else if (speed == 3)
2596 speed_s = "6";
2597 else
2598 speed_s = "?";
2599
2600 pci_read_config_word(pdev, 0x0a, &cc);
2601 if (cc == PCI_CLASS_STORAGE_IDE)
2602 scc_s = "IDE";
2603 else if (cc == PCI_CLASS_STORAGE_SATA)
2604 scc_s = "SATA";
2605 else if (cc == PCI_CLASS_STORAGE_RAID)
2606 scc_s = "RAID";
2607 else
2608 scc_s = "unknown";
2609
2610 dev_printk(KERN_INFO, &pdev->dev,
2611 "AHCI %02x%02x.%02x%02x "
2612 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2613 ,
2614
2615 (vers >> 24) & 0xff,
2616 (vers >> 16) & 0xff,
2617 (vers >> 8) & 0xff,
2618 vers & 0xff,
2619
2620 ((cap >> 8) & 0x1f) + 1,
2621 (cap & 0x1f) + 1,
2622 speed_s,
2623 impl,
2624 scc_s);
2625
2626 dev_printk(KERN_INFO, &pdev->dev,
2627 "flags: "
2628 "%s%s%s%s%s%s%s"
2629 "%s%s%s%s%s%s%s"
2630 "%s%s%s%s%s%s\n"
2631 ,
2632
2633 cap & HOST_CAP_64 ? "64bit " : "",
2634 cap & HOST_CAP_NCQ ? "ncq " : "",
2635 cap & HOST_CAP_SNTF ? "sntf " : "",
2636 cap & HOST_CAP_MPS ? "ilck " : "",
2637 cap & HOST_CAP_SSS ? "stag " : "",
2638 cap & HOST_CAP_ALPM ? "pm " : "",
2639 cap & HOST_CAP_LED ? "led " : "",
2640 cap & HOST_CAP_CLO ? "clo " : "",
2641 cap & HOST_CAP_ONLY ? "only " : "",
2642 cap & HOST_CAP_PMP ? "pmp " : "",
2643 cap & HOST_CAP_FBS ? "fbs " : "",
2644 cap & HOST_CAP_PIO_MULTI ? "pio " : "",
2645 cap & HOST_CAP_SSC ? "slum " : "",
2646 cap & HOST_CAP_PART ? "part " : "",
2647 cap & HOST_CAP_CCC ? "ccc " : "",
2648 cap & HOST_CAP_EMS ? "ems " : "",
2649 cap & HOST_CAP_SXS ? "sxs " : "",
2650 cap2 & HOST_CAP2_APST ? "apst " : "",
2651 cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
2652 cap2 & HOST_CAP2_BOH ? "boh " : ""
2653 );
2654 }
2655
2656 /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
2657 * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't
2658 * support PMP and the 4726 either directly exports the device
2659 * attached to the first downstream port or acts as a hardware storage
2660 * controller and emulate a single ATA device (can be RAID 0/1 or some
2661 * other configuration).
2662 *
2663 * When there's no device attached to the first downstream port of the
2664 * 4726, "Config Disk" appears, which is a pseudo ATA device to
2665 * configure the 4726. However, ATA emulation of the device is very
2666 * lame. It doesn't send signature D2H Reg FIS after the initial
2667 * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
2668 *
2669 * The following function works around the problem by always using
2670 * hardreset on the port and not depending on receiving signature FIS
2671 * afterward. If signature FIS isn't received soon, ATA class is
2672 * assumed without follow-up softreset.
2673 */
2674 static void ahci_p5wdh_workaround(struct ata_host *host)
2675 {
2676 static struct dmi_system_id sysids[] = {
2677 {
2678 .ident = "P5W DH Deluxe",
2679 .matches = {
2680 DMI_MATCH(DMI_SYS_VENDOR,
2681 "ASUSTEK COMPUTER INC"),
2682 DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
2683 },
2684 },
2685 { }
2686 };
2687 struct pci_dev *pdev = to_pci_dev(host->dev);
2688
2689 if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
2690 dmi_check_system(sysids)) {
2691 struct ata_port *ap = host->ports[1];
2692
2693 dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
2694 "Deluxe on-board SIMG4726 workaround\n");
2695
2696 ap->ops = &ahci_p5wdh_ops;
2697 ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
2698 }
2699 }
2700
2701 /* only some SB600 ahci controllers can do 64bit DMA */
2702 static bool ahci_sb600_enable_64bit(struct pci_dev *pdev)
2703 {
2704 static const struct dmi_system_id sysids[] = {
2705 /*
2706 * The oldest version known to be broken is 0901 and
2707 * working is 1501 which was released on 2007-10-26.
2708 * Enable 64bit DMA on 1501 and anything newer.
2709 *
2710 * Please read bko#9412 for more info.
2711 */
2712 {
2713 .ident = "ASUS M2A-VM",
2714 .matches = {
2715 DMI_MATCH(DMI_BOARD_VENDOR,
2716 "ASUSTeK Computer INC."),
2717 DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"),
2718 },
2719 .driver_data = "20071026", /* yyyymmdd */
2720 },
2721 /*
2722 * All BIOS versions for the MSI K9A2 Platinum (MS-7376)
2723 * support 64bit DMA.
2724 *
2725 * BIOS versions earlier than 1.5 had the Manufacturer DMI
2726 * fields as "MICRO-STAR INTERANTIONAL CO.,LTD".
2727 * This spelling mistake was fixed in BIOS version 1.5, so
2728 * 1.5 and later have the Manufacturer as
2729 * "MICRO-STAR INTERNATIONAL CO.,LTD".
2730 * So try to match on DMI_BOARD_VENDOR of "MICRO-STAR INTER".
2731 *
2732 * BIOS versions earlier than 1.9 had a Board Product Name
2733 * DMI field of "MS-7376". This was changed to be
2734 * "K9A2 Platinum (MS-7376)" in version 1.9, but we can still
2735 * match on DMI_BOARD_NAME of "MS-7376".
2736 */
2737 {
2738 .ident = "MSI K9A2 Platinum",
2739 .matches = {
2740 DMI_MATCH(DMI_BOARD_VENDOR,
2741 "MICRO-STAR INTER"),
2742 DMI_MATCH(DMI_BOARD_NAME, "MS-7376"),
2743 },
2744 },
2745 { }
2746 };
2747 const struct dmi_system_id *match;
2748 int year, month, date;
2749 char buf[9];
2750
2751 match = dmi_first_match(sysids);
2752 if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) ||
2753 !match)
2754 return false;
2755
2756 if (!match->driver_data)
2757 goto enable_64bit;
2758
2759 dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
2760 snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
2761
2762 if (strcmp(buf, match->driver_data) >= 0)
2763 goto enable_64bit;
2764 else {
2765 dev_printk(KERN_WARNING, &pdev->dev, "%s: BIOS too old, "
2766 "forcing 32bit DMA, update BIOS\n", match->ident);
2767 return false;
2768 }
2769
2770 enable_64bit:
2771 dev_printk(KERN_WARNING, &pdev->dev, "%s: enabling 64bit DMA\n",
2772 match->ident);
2773 return true;
2774 }
2775
2776 static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
2777 {
2778 static const struct dmi_system_id broken_systems[] = {
2779 {
2780 .ident = "HP Compaq nx6310",
2781 .matches = {
2782 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2783 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6310"),
2784 },
2785 /* PCI slot number of the controller */
2786 .driver_data = (void *)0x1FUL,
2787 },
2788 {
2789 .ident = "HP Compaq 6720s",
2790 .matches = {
2791 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2792 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6720s"),
2793 },
2794 /* PCI slot number of the controller */
2795 .driver_data = (void *)0x1FUL,
2796 },
2797
2798 { } /* terminate list */
2799 };
2800 const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
2801
2802 if (dmi) {
2803 unsigned long slot = (unsigned long)dmi->driver_data;
2804 /* apply the quirk only to on-board controllers */
2805 return slot == PCI_SLOT(pdev->devfn);
2806 }
2807
2808 return false;
2809 }
2810
2811 static bool ahci_broken_suspend(struct pci_dev *pdev)
2812 {
2813 static const struct dmi_system_id sysids[] = {
2814 /*
2815 * On HP dv[4-6] and HDX18 with earlier BIOSen, link
2816 * to the harddisk doesn't become online after
2817 * resuming from STR. Warn and fail suspend.
2818 */
2819 {
2820 .ident = "dv4",
2821 .matches = {
2822 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2823 DMI_MATCH(DMI_PRODUCT_NAME,
2824 "HP Pavilion dv4 Notebook PC"),
2825 },
2826 .driver_data = "F.30", /* cutoff BIOS version */
2827 },
2828 {
2829 .ident = "dv5",
2830 .matches = {
2831 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2832 DMI_MATCH(DMI_PRODUCT_NAME,
2833 "HP Pavilion dv5 Notebook PC"),
2834 },
2835 .driver_data = "F.16", /* cutoff BIOS version */
2836 },
2837 {
2838 .ident = "dv6",
2839 .matches = {
2840 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2841 DMI_MATCH(DMI_PRODUCT_NAME,
2842 "HP Pavilion dv6 Notebook PC"),
2843 },
2844 .driver_data = "F.21", /* cutoff BIOS version */
2845 },
2846 {
2847 .ident = "HDX18",
2848 .matches = {
2849 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2850 DMI_MATCH(DMI_PRODUCT_NAME,
2851 "HP HDX18 Notebook PC"),
2852 },
2853 .driver_data = "F.23", /* cutoff BIOS version */
2854 },
2855 { } /* terminate list */
2856 };
2857 const struct dmi_system_id *dmi = dmi_first_match(sysids);
2858 const char *ver;
2859
2860 if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
2861 return false;
2862
2863 ver = dmi_get_system_info(DMI_BIOS_VERSION);
2864
2865 return !ver || strcmp(ver, dmi->driver_data) < 0;
2866 }
2867
2868 static bool ahci_broken_online(struct pci_dev *pdev)
2869 {
2870 #define ENCODE_BUSDEVFN(bus, slot, func) \
2871 (void *)(unsigned long)(((bus) << 8) | PCI_DEVFN((slot), (func)))
2872 static const struct dmi_system_id sysids[] = {
2873 /*
2874 * There are several gigabyte boards which use
2875 * SIMG5723s configured as hardware RAID. Certain
2876 * 5723 firmware revisions shipped there keep the link
2877 * online but fail to answer properly to SRST or
2878 * IDENTIFY when no device is attached downstream
2879 * causing libata to retry quite a few times leading
2880 * to excessive detection delay.
2881 *
2882 * As these firmwares respond to the second reset try
2883 * with invalid device signature, considering unknown
2884 * sig as offline works around the problem acceptably.
2885 */
2886 {
2887 .ident = "EP45-DQ6",
2888 .matches = {
2889 DMI_MATCH(DMI_BOARD_VENDOR,
2890 "Gigabyte Technology Co., Ltd."),
2891 DMI_MATCH(DMI_BOARD_NAME, "EP45-DQ6"),
2892 },
2893 .driver_data = ENCODE_BUSDEVFN(0x0a, 0x00, 0),
2894 },
2895 {
2896 .ident = "EP45-DS5",
2897 .matches = {
2898 DMI_MATCH(DMI_BOARD_VENDOR,
2899 "Gigabyte Technology Co., Ltd."),
2900 DMI_MATCH(DMI_BOARD_NAME, "EP45-DS5"),
2901 },
2902 .driver_data = ENCODE_BUSDEVFN(0x03, 0x00, 0),
2903 },
2904 { } /* terminate list */
2905 };
2906 #undef ENCODE_BUSDEVFN
2907 const struct dmi_system_id *dmi = dmi_first_match(sysids);
2908 unsigned int val;
2909
2910 if (!dmi)
2911 return false;
2912
2913 val = (unsigned long)dmi->driver_data;
2914
2915 return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
2916 }
2917
2918 #ifdef CONFIG_ATA_ACPI
2919 static void ahci_gtf_filter_workaround(struct ata_host *host)
2920 {
2921 static const struct dmi_system_id sysids[] = {
2922 /*
2923 * Aspire 3810T issues a bunch of SATA enable commands
2924 * via _GTF including an invalid one and one which is
2925 * rejected by the device. Among the successful ones
2926 * is FPDMA non-zero offset enable which when enabled
2927 * only on the drive side leads to NCQ command
2928 * failures. Filter it out.
2929 */
2930 {
2931 .ident = "Aspire 3810T",
2932 .matches = {
2933 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
2934 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3810T"),
2935 },
2936 .driver_data = (void *)ATA_ACPI_FILTER_FPDMA_OFFSET,
2937 },
2938 { }
2939 };
2940 const struct dmi_system_id *dmi = dmi_first_match(sysids);
2941 unsigned int filter;
2942 int i;
2943
2944 if (!dmi)
2945 return;
2946
2947 filter = (unsigned long)dmi->driver_data;
2948 dev_printk(KERN_INFO, host->dev,
2949 "applying extra ACPI _GTF filter 0x%x for %s\n",
2950 filter, dmi->ident);
2951
2952 for (i = 0; i < host->n_ports; i++) {
2953 struct ata_port *ap = host->ports[i];
2954 struct ata_link *link;
2955 struct ata_device *dev;
2956
2957 ata_for_each_link(link, ap, EDGE)
2958 ata_for_each_dev(dev, link, ALL)
2959 dev->gtf_filter |= filter;
2960 }
2961 }
2962 #else
2963 static inline void ahci_gtf_filter_workaround(struct ata_host *host)
2964 {}
2965 #endif
2966
2967 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2968 {
2969 static int printed_version;
2970 unsigned int board_id = ent->driver_data;
2971 struct ata_port_info pi = ahci_port_info[board_id];
2972 const struct ata_port_info *ppi[] = { &pi, NULL };
2973 struct device *dev = &pdev->dev;
2974 struct ahci_host_priv *hpriv;
2975 struct ata_host *host;
2976 int n_ports, i, rc;
2977
2978 VPRINTK("ENTER\n");
2979
2980 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
2981
2982 if (!printed_version++)
2983 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2984
2985 /* The AHCI driver can only drive the SATA ports, the PATA driver
2986 can drive them all so if both drivers are selected make sure
2987 AHCI stays out of the way */
2988 if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
2989 return -ENODEV;
2990
2991 /* acquire resources */
2992 rc = pcim_enable_device(pdev);
2993 if (rc)
2994 return rc;
2995
2996 /* AHCI controllers often implement SFF compatible interface.
2997 * Grab all PCI BARs just in case.
2998 */
2999 rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
3000 if (rc == -EBUSY)
3001 pcim_pin_device(pdev);
3002 if (rc)
3003 return rc;
3004
3005 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
3006 (pdev->device == 0x2652 || pdev->device == 0x2653)) {
3007 u8 map;
3008
3009 /* ICH6s share the same PCI ID for both piix and ahci
3010 * modes. Enabling ahci mode while MAP indicates
3011 * combined mode is a bad idea. Yield to ata_piix.
3012 */
3013 pci_read_config_byte(pdev, ICH_MAP, &map);
3014 if (map & 0x3) {
3015 dev_printk(KERN_INFO, &pdev->dev, "controller is in "
3016 "combined mode, can't enable AHCI mode\n");
3017 return -ENODEV;
3018 }
3019 }
3020
3021 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
3022 if (!hpriv)
3023 return -ENOMEM;
3024 hpriv->flags |= (unsigned long)pi.private_data;
3025
3026 /* MCP65 revision A1 and A2 can't do MSI */
3027 if (board_id == board_ahci_mcp65 &&
3028 (pdev->revision == 0xa1 || pdev->revision == 0xa2))
3029 hpriv->flags |= AHCI_HFLAG_NO_MSI;
3030
3031 /* SB800 does NOT need the workaround to ignore SERR_INTERNAL */
3032 if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
3033 hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
3034
3035 /* only some SB600s can do 64bit DMA */
3036 if (ahci_sb600_enable_64bit(pdev))
3037 hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY;
3038
3039 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
3040 pci_intx(pdev, 1);
3041
3042 /* save initial config */
3043 ahci_save_initial_config(pdev, hpriv);
3044
3045 /* prepare host */
3046 if (hpriv->cap & HOST_CAP_NCQ)
3047 pi.flags |= ATA_FLAG_NCQ | ATA_FLAG_FPDMA_AA;
3048
3049 if (hpriv->cap & HOST_CAP_PMP)
3050 pi.flags |= ATA_FLAG_PMP;
3051
3052 if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
3053 u8 messages;
3054 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
3055 u32 em_loc = readl(mmio + HOST_EM_LOC);
3056 u32 em_ctl = readl(mmio + HOST_EM_CTL);
3057
3058 messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
3059
3060 /* we only support LED message type right now */
3061 if ((messages & 0x01) && (ahci_em_messages == 1)) {
3062 /* store em_loc */
3063 hpriv->em_loc = ((em_loc >> 16) * 4);
3064 pi.flags |= ATA_FLAG_EM;
3065 if (!(em_ctl & EM_CTL_ALHD))
3066 pi.flags |= ATA_FLAG_SW_ACTIVITY;
3067 }
3068 }
3069
3070 if (ahci_broken_system_poweroff(pdev)) {
3071 pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
3072 dev_info(&pdev->dev,
3073 "quirky BIOS, skipping spindown on poweroff\n");
3074 }
3075
3076 if (ahci_broken_suspend(pdev)) {
3077 hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
3078 dev_printk(KERN_WARNING, &pdev->dev,
3079 "BIOS update required for suspend/resume\n");
3080 }
3081
3082 if (ahci_broken_online(pdev)) {
3083 hpriv->flags |= AHCI_HFLAG_SRST_TOUT_IS_OFFLINE;
3084 dev_info(&pdev->dev,
3085 "online status unreliable, applying workaround\n");
3086 }
3087
3088 /* CAP.NP sometimes indicate the index of the last enabled
3089 * port, at other times, that of the last possible port, so
3090 * determining the maximum port number requires looking at
3091 * both CAP.NP and port_map.
3092 */
3093 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
3094
3095 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3096 if (!host)
3097 return -ENOMEM;
3098 host->iomap = pcim_iomap_table(pdev);
3099 host->private_data = hpriv;
3100
3101 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
3102 host->flags |= ATA_HOST_PARALLEL_SCAN;
3103 else
3104 printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
3105
3106 if (pi.flags & ATA_FLAG_EM)
3107 ahci_reset_em(host);
3108
3109 for (i = 0; i < host->n_ports; i++) {
3110 struct ata_port *ap = host->ports[i];
3111
3112 ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
3113 ata_port_pbar_desc(ap, AHCI_PCI_BAR,
3114 0x100 + ap->port_no * 0x80, "port");
3115
3116 /* set initial link pm policy */
3117 ap->pm_policy = NOT_AVAILABLE;
3118
3119 /* set enclosure management message type */
3120 if (ap->flags & ATA_FLAG_EM)
3121 ap->em_message_type = ahci_em_messages;
3122
3123
3124 /* disabled/not-implemented port */
3125 if (!(hpriv->port_map & (1 << i)))
3126 ap->ops = &ata_dummy_port_ops;
3127 }
3128
3129 /* apply workaround for ASUS P5W DH Deluxe mainboard */
3130 ahci_p5wdh_workaround(host);
3131
3132 /* apply gtf filter quirk */
3133 ahci_gtf_filter_workaround(host);
3134
3135 /* initialize adapter */
3136 rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
3137 if (rc)
3138 return rc;
3139
3140 rc = ahci_reset_controller(host);
3141 if (rc)
3142 return rc;
3143
3144 ahci_init_controller(host);
3145 ahci_print_info(host);
3146
3147 pci_set_master(pdev);
3148 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
3149 &ahci_sht);
3150 }
3151
3152 static int __init ahci_init(void)
3153 {
3154 return pci_register_driver(&ahci_pci_driver);
3155 }
3156
3157 static void __exit ahci_exit(void)
3158 {
3159 pci_unregister_driver(&ahci_pci_driver);
3160 }
3161
3162
3163 MODULE_AUTHOR("Jeff Garzik");
3164 MODULE_DESCRIPTION("AHCI SATA low-level driver");
3165 MODULE_LICENSE("GPL");
3166 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
3167 MODULE_VERSION(DRV_VERSION);
3168
3169 module_init(ahci_init);
3170 module_exit(ahci_exit);
This page took 0.133109 seconds and 6 git commands to generate.