Merge branch 'nfs-for-2.6.33'
[deliverable/linux.git] / drivers / ata / ahci.c
1 /*
2 * ahci.c - AHCI SATA support
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004-2005 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32 *
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/device.h>
44 #include <linux/dmi.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_cmnd.h>
47 #include <linux/libata.h>
48
49 #define DRV_NAME "ahci"
50 #define DRV_VERSION "3.0"
51
52 /* Enclosure Management Control */
53 #define EM_CTRL_MSG_TYPE 0x000f0000
54
55 /* Enclosure Management LED Message Type */
56 #define EM_MSG_LED_HBA_PORT 0x0000000f
57 #define EM_MSG_LED_PMP_SLOT 0x0000ff00
58 #define EM_MSG_LED_VALUE 0xffff0000
59 #define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
60 #define EM_MSG_LED_VALUE_OFF 0xfff80000
61 #define EM_MSG_LED_VALUE_ON 0x00010000
62
63 static int ahci_skip_host_reset;
64 static int ahci_ignore_sss;
65
66 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
67 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
68
69 module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
70 MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
71
72 static int ahci_enable_alpm(struct ata_port *ap,
73 enum link_pm policy);
74 static void ahci_disable_alpm(struct ata_port *ap);
75 static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
76 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
77 size_t size);
78 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
79 ssize_t size);
80
81 enum {
82 AHCI_PCI_BAR = 5,
83 AHCI_MAX_PORTS = 32,
84 AHCI_MAX_SG = 168, /* hardware max is 64K */
85 AHCI_DMA_BOUNDARY = 0xffffffff,
86 AHCI_MAX_CMDS = 32,
87 AHCI_CMD_SZ = 32,
88 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
89 AHCI_RX_FIS_SZ = 256,
90 AHCI_CMD_TBL_CDB = 0x40,
91 AHCI_CMD_TBL_HDR_SZ = 0x80,
92 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
93 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
94 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
95 AHCI_RX_FIS_SZ,
96 AHCI_IRQ_ON_SG = (1 << 31),
97 AHCI_CMD_ATAPI = (1 << 5),
98 AHCI_CMD_WRITE = (1 << 6),
99 AHCI_CMD_PREFETCH = (1 << 7),
100 AHCI_CMD_RESET = (1 << 8),
101 AHCI_CMD_CLR_BUSY = (1 << 10),
102
103 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
104 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
105 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
106
107 board_ahci = 0,
108 board_ahci_vt8251 = 1,
109 board_ahci_ign_iferr = 2,
110 board_ahci_sb600 = 3,
111 board_ahci_mv = 4,
112 board_ahci_sb700 = 5, /* for SB700 and SB800 */
113 board_ahci_mcp65 = 6,
114 board_ahci_nopmp = 7,
115 board_ahci_yesncq = 8,
116 board_ahci_nosntf = 9,
117
118 /* global controller registers */
119 HOST_CAP = 0x00, /* host capabilities */
120 HOST_CTL = 0x04, /* global host control */
121 HOST_IRQ_STAT = 0x08, /* interrupt status */
122 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
123 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
124 HOST_EM_LOC = 0x1c, /* Enclosure Management location */
125 HOST_EM_CTL = 0x20, /* Enclosure Management Control */
126 HOST_CAP2 = 0x24, /* host capabilities, extended */
127
128 /* HOST_CTL bits */
129 HOST_RESET = (1 << 0), /* reset controller; self-clear */
130 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
131 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
132
133 /* HOST_CAP bits */
134 HOST_CAP_SXS = (1 << 5), /* Supports External SATA */
135 HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
136 HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */
137 HOST_CAP_PART = (1 << 13), /* Partial state capable */
138 HOST_CAP_SSC = (1 << 14), /* Slumber state capable */
139 HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */
140 HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */
141 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
142 HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */
143 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
144 HOST_CAP_LED = (1 << 25), /* Supports activity LED */
145 HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
146 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
147 HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */
148 HOST_CAP_SNTF = (1 << 29), /* SNotification register */
149 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
150 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
151
152 /* HOST_CAP2 bits */
153 HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */
154 HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */
155 HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */
156
157 /* registers for each SATA port */
158 PORT_LST_ADDR = 0x00, /* command list DMA addr */
159 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
160 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
161 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
162 PORT_IRQ_STAT = 0x10, /* interrupt status */
163 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
164 PORT_CMD = 0x18, /* port command */
165 PORT_TFDATA = 0x20, /* taskfile data */
166 PORT_SIG = 0x24, /* device TF signature */
167 PORT_CMD_ISSUE = 0x38, /* command issue */
168 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
169 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
170 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
171 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
172 PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
173
174 /* PORT_IRQ_{STAT,MASK} bits */
175 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
176 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
177 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
178 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
179 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
180 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
181 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
182 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
183
184 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
185 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
186 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
187 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
188 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
189 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
190 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
191 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
192 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
193
194 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
195 PORT_IRQ_IF_ERR |
196 PORT_IRQ_CONNECT |
197 PORT_IRQ_PHYRDY |
198 PORT_IRQ_UNK_FIS |
199 PORT_IRQ_BAD_PMP,
200 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
201 PORT_IRQ_TF_ERR |
202 PORT_IRQ_HBUS_DATA_ERR,
203 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
204 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
205 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
206
207 /* PORT_CMD bits */
208 PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
209 PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
210 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
211 PORT_CMD_PMP = (1 << 17), /* PMP attached */
212 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
213 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
214 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
215 PORT_CMD_CLO = (1 << 3), /* Command list override */
216 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
217 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
218 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
219
220 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
221 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
222 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
223 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
224
225 /* hpriv->flags bits */
226 AHCI_HFLAG_NO_NCQ = (1 << 0),
227 AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
228 AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
229 AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
230 AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
231 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
232 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
233 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
234 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
235 AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
236 AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
237 AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
238 link offline */
239 AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
240
241 /* ap->flags bits */
242
243 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
244 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
245 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
246 ATA_FLAG_IPM,
247
248 ICH_MAP = 0x90, /* ICH MAP register */
249
250 /* em constants */
251 EM_MAX_SLOTS = 8,
252 EM_MAX_RETRY = 5,
253
254 /* em_ctl bits */
255 EM_CTL_RST = (1 << 9), /* Reset */
256 EM_CTL_TM = (1 << 8), /* Transmit Message */
257 EM_CTL_ALHD = (1 << 26), /* Activity LED */
258 };
259
260 struct ahci_cmd_hdr {
261 __le32 opts;
262 __le32 status;
263 __le32 tbl_addr;
264 __le32 tbl_addr_hi;
265 __le32 reserved[4];
266 };
267
268 struct ahci_sg {
269 __le32 addr;
270 __le32 addr_hi;
271 __le32 reserved;
272 __le32 flags_size;
273 };
274
275 struct ahci_em_priv {
276 enum sw_activity blink_policy;
277 struct timer_list timer;
278 unsigned long saved_activity;
279 unsigned long activity;
280 unsigned long led_state;
281 };
282
283 struct ahci_host_priv {
284 unsigned int flags; /* AHCI_HFLAG_* */
285 u32 cap; /* cap to use */
286 u32 cap2; /* cap2 to use */
287 u32 port_map; /* port map to use */
288 u32 saved_cap; /* saved initial cap */
289 u32 saved_cap2; /* saved initial cap2 */
290 u32 saved_port_map; /* saved initial port_map */
291 u32 em_loc; /* enclosure management location */
292 };
293
294 struct ahci_port_priv {
295 struct ata_link *active_link;
296 struct ahci_cmd_hdr *cmd_slot;
297 dma_addr_t cmd_slot_dma;
298 void *cmd_tbl;
299 dma_addr_t cmd_tbl_dma;
300 void *rx_fis;
301 dma_addr_t rx_fis_dma;
302 /* for NCQ spurious interrupt analysis */
303 unsigned int ncq_saw_d2h:1;
304 unsigned int ncq_saw_dmas:1;
305 unsigned int ncq_saw_sdb:1;
306 u32 intr_mask; /* interrupts to enable */
307 /* enclosure management info per PM slot */
308 struct ahci_em_priv em_priv[EM_MAX_SLOTS];
309 };
310
311 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
312 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
313 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
314 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
315 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
316 static int ahci_port_start(struct ata_port *ap);
317 static void ahci_port_stop(struct ata_port *ap);
318 static void ahci_qc_prep(struct ata_queued_cmd *qc);
319 static void ahci_freeze(struct ata_port *ap);
320 static void ahci_thaw(struct ata_port *ap);
321 static void ahci_pmp_attach(struct ata_port *ap);
322 static void ahci_pmp_detach(struct ata_port *ap);
323 static int ahci_softreset(struct ata_link *link, unsigned int *class,
324 unsigned long deadline);
325 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
326 unsigned long deadline);
327 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
328 unsigned long deadline);
329 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
330 unsigned long deadline);
331 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
332 unsigned long deadline);
333 static void ahci_postreset(struct ata_link *link, unsigned int *class);
334 static void ahci_error_handler(struct ata_port *ap);
335 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
336 static int ahci_port_resume(struct ata_port *ap);
337 static void ahci_dev_config(struct ata_device *dev);
338 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
339 u32 opts);
340 #ifdef CONFIG_PM
341 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
342 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
343 static int ahci_pci_device_resume(struct pci_dev *pdev);
344 #endif
345 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
346 static ssize_t ahci_activity_store(struct ata_device *dev,
347 enum sw_activity val);
348 static void ahci_init_sw_activity(struct ata_link *link);
349
350 static ssize_t ahci_show_host_caps(struct device *dev,
351 struct device_attribute *attr, char *buf);
352 static ssize_t ahci_show_host_cap2(struct device *dev,
353 struct device_attribute *attr, char *buf);
354 static ssize_t ahci_show_host_version(struct device *dev,
355 struct device_attribute *attr, char *buf);
356 static ssize_t ahci_show_port_cmd(struct device *dev,
357 struct device_attribute *attr, char *buf);
358
359 DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
360 DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
361 DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
362 DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
363
364 static struct device_attribute *ahci_shost_attrs[] = {
365 &dev_attr_link_power_management_policy,
366 &dev_attr_em_message_type,
367 &dev_attr_em_message,
368 &dev_attr_ahci_host_caps,
369 &dev_attr_ahci_host_cap2,
370 &dev_attr_ahci_host_version,
371 &dev_attr_ahci_port_cmd,
372 NULL
373 };
374
375 static struct device_attribute *ahci_sdev_attrs[] = {
376 &dev_attr_sw_activity,
377 &dev_attr_unload_heads,
378 NULL
379 };
380
381 static struct scsi_host_template ahci_sht = {
382 ATA_NCQ_SHT(DRV_NAME),
383 .can_queue = AHCI_MAX_CMDS - 1,
384 .sg_tablesize = AHCI_MAX_SG,
385 .dma_boundary = AHCI_DMA_BOUNDARY,
386 .shost_attrs = ahci_shost_attrs,
387 .sdev_attrs = ahci_sdev_attrs,
388 };
389
390 static struct ata_port_operations ahci_ops = {
391 .inherits = &sata_pmp_port_ops,
392
393 .qc_defer = sata_pmp_qc_defer_cmd_switch,
394 .qc_prep = ahci_qc_prep,
395 .qc_issue = ahci_qc_issue,
396 .qc_fill_rtf = ahci_qc_fill_rtf,
397
398 .freeze = ahci_freeze,
399 .thaw = ahci_thaw,
400 .softreset = ahci_softreset,
401 .hardreset = ahci_hardreset,
402 .postreset = ahci_postreset,
403 .pmp_softreset = ahci_softreset,
404 .error_handler = ahci_error_handler,
405 .post_internal_cmd = ahci_post_internal_cmd,
406 .dev_config = ahci_dev_config,
407
408 .scr_read = ahci_scr_read,
409 .scr_write = ahci_scr_write,
410 .pmp_attach = ahci_pmp_attach,
411 .pmp_detach = ahci_pmp_detach,
412
413 .enable_pm = ahci_enable_alpm,
414 .disable_pm = ahci_disable_alpm,
415 .em_show = ahci_led_show,
416 .em_store = ahci_led_store,
417 .sw_activity_show = ahci_activity_show,
418 .sw_activity_store = ahci_activity_store,
419 #ifdef CONFIG_PM
420 .port_suspend = ahci_port_suspend,
421 .port_resume = ahci_port_resume,
422 #endif
423 .port_start = ahci_port_start,
424 .port_stop = ahci_port_stop,
425 };
426
427 static struct ata_port_operations ahci_vt8251_ops = {
428 .inherits = &ahci_ops,
429 .hardreset = ahci_vt8251_hardreset,
430 };
431
432 static struct ata_port_operations ahci_p5wdh_ops = {
433 .inherits = &ahci_ops,
434 .hardreset = ahci_p5wdh_hardreset,
435 };
436
437 static struct ata_port_operations ahci_sb600_ops = {
438 .inherits = &ahci_ops,
439 .softreset = ahci_sb600_softreset,
440 .pmp_softreset = ahci_sb600_softreset,
441 };
442
443 #define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
444
445 static const struct ata_port_info ahci_port_info[] = {
446 [board_ahci] =
447 {
448 .flags = AHCI_FLAG_COMMON,
449 .pio_mask = ATA_PIO4,
450 .udma_mask = ATA_UDMA6,
451 .port_ops = &ahci_ops,
452 },
453 [board_ahci_vt8251] =
454 {
455 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
456 .flags = AHCI_FLAG_COMMON,
457 .pio_mask = ATA_PIO4,
458 .udma_mask = ATA_UDMA6,
459 .port_ops = &ahci_vt8251_ops,
460 },
461 [board_ahci_ign_iferr] =
462 {
463 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
464 .flags = AHCI_FLAG_COMMON,
465 .pio_mask = ATA_PIO4,
466 .udma_mask = ATA_UDMA6,
467 .port_ops = &ahci_ops,
468 },
469 [board_ahci_sb600] =
470 {
471 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
472 AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 |
473 AHCI_HFLAG_32BIT_ONLY),
474 .flags = AHCI_FLAG_COMMON,
475 .pio_mask = ATA_PIO4,
476 .udma_mask = ATA_UDMA6,
477 .port_ops = &ahci_sb600_ops,
478 },
479 [board_ahci_mv] =
480 {
481 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
482 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
483 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
484 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
485 .pio_mask = ATA_PIO4,
486 .udma_mask = ATA_UDMA6,
487 .port_ops = &ahci_ops,
488 },
489 [board_ahci_sb700] = /* for SB700 and SB800 */
490 {
491 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
492 .flags = AHCI_FLAG_COMMON,
493 .pio_mask = ATA_PIO4,
494 .udma_mask = ATA_UDMA6,
495 .port_ops = &ahci_sb600_ops,
496 },
497 [board_ahci_mcp65] =
498 {
499 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
500 .flags = AHCI_FLAG_COMMON,
501 .pio_mask = ATA_PIO4,
502 .udma_mask = ATA_UDMA6,
503 .port_ops = &ahci_ops,
504 },
505 [board_ahci_nopmp] =
506 {
507 AHCI_HFLAGS (AHCI_HFLAG_NO_PMP),
508 .flags = AHCI_FLAG_COMMON,
509 .pio_mask = ATA_PIO4,
510 .udma_mask = ATA_UDMA6,
511 .port_ops = &ahci_ops,
512 },
513 [board_ahci_yesncq] =
514 {
515 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
516 .flags = AHCI_FLAG_COMMON,
517 .pio_mask = ATA_PIO4,
518 .udma_mask = ATA_UDMA6,
519 .port_ops = &ahci_ops,
520 },
521 [board_ahci_nosntf] =
522 {
523 AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
524 .flags = AHCI_FLAG_COMMON,
525 .pio_mask = ATA_PIO4,
526 .udma_mask = ATA_UDMA6,
527 .port_ops = &ahci_ops,
528 },
529 };
530
531 static const struct pci_device_id ahci_pci_tbl[] = {
532 /* Intel */
533 { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
534 { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
535 { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
536 { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
537 { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
538 { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
539 { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
540 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
541 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
542 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
543 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
544 { PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8 */
545 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
546 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
547 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
548 { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
549 { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
550 { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
551 { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
552 { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
553 { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
554 { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
555 { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
556 { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
557 { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
558 { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
559 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
560 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
561 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
562 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
563 { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */
564 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
565 { PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */
566 { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
567 { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
568 { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
569 { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */
570 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
571 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
572 { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
573
574 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
575 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
576 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
577
578 /* ATI */
579 { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
580 { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
581 { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
582 { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
583 { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
584 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
585 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
586
587 /* AMD */
588 { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
589 /* AMD is using RAID class only for ahci controllers */
590 { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
591 PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
592
593 /* VIA */
594 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
595 { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
596
597 /* NVIDIA */
598 { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */
599 { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */
600 { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */
601 { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */
602 { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */
603 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
604 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
605 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
606 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq }, /* MCP67 */
607 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq }, /* MCP67 */
608 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq }, /* MCP67 */
609 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq }, /* MCP67 */
610 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq }, /* MCP67 */
611 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq }, /* MCP67 */
612 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq }, /* MCP67 */
613 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq }, /* MCP67 */
614 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq }, /* MCP67 */
615 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */
616 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */
617 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */
618 { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_yesncq }, /* Linux ID */
619 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */
620 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */
621 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */
622 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq }, /* MCP73 */
623 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq }, /* MCP73 */
624 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq }, /* MCP73 */
625 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq }, /* MCP73 */
626 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq }, /* MCP73 */
627 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq }, /* MCP73 */
628 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq }, /* MCP73 */
629 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq }, /* MCP73 */
630 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq }, /* MCP73 */
631 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
632 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
633 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
634 { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */
635 { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */
636 { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */
637 { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */
638 { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */
639 { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */
640 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
641 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
642 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
643 { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
644 { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
645 { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
646 { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
647 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
648 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
649 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
650 { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
651 { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
652 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
653 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
654 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
655 { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci }, /* MCP89 */
656 { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci }, /* MCP89 */
657 { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci }, /* MCP89 */
658 { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci }, /* MCP89 */
659 { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci }, /* MCP89 */
660 { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci }, /* MCP89 */
661 { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci }, /* MCP89 */
662 { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci }, /* MCP89 */
663 { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci }, /* MCP89 */
664 { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci }, /* MCP89 */
665 { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci }, /* MCP89 */
666 { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci }, /* MCP89 */
667
668 /* SiS */
669 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
670 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
671 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
672
673 /* Marvell */
674 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
675 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
676
677 /* Promise */
678 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
679
680 /* Generic, PCI class code for AHCI */
681 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
682 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
683
684 { } /* terminate list */
685 };
686
687
688 static struct pci_driver ahci_pci_driver = {
689 .name = DRV_NAME,
690 .id_table = ahci_pci_tbl,
691 .probe = ahci_init_one,
692 .remove = ata_pci_remove_one,
693 #ifdef CONFIG_PM
694 .suspend = ahci_pci_device_suspend,
695 .resume = ahci_pci_device_resume,
696 #endif
697 };
698
699 static int ahci_em_messages = 1;
700 module_param(ahci_em_messages, int, 0444);
701 /* add other LED protocol types when they become supported */
702 MODULE_PARM_DESC(ahci_em_messages,
703 "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
704
705 #if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
706 static int marvell_enable;
707 #else
708 static int marvell_enable = 1;
709 #endif
710 module_param(marvell_enable, int, 0644);
711 MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
712
713
714 static inline int ahci_nr_ports(u32 cap)
715 {
716 return (cap & 0x1f) + 1;
717 }
718
719 static inline void __iomem *__ahci_port_base(struct ata_host *host,
720 unsigned int port_no)
721 {
722 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
723
724 return mmio + 0x100 + (port_no * 0x80);
725 }
726
727 static inline void __iomem *ahci_port_base(struct ata_port *ap)
728 {
729 return __ahci_port_base(ap->host, ap->port_no);
730 }
731
732 static void ahci_enable_ahci(void __iomem *mmio)
733 {
734 int i;
735 u32 tmp;
736
737 /* turn on AHCI_EN */
738 tmp = readl(mmio + HOST_CTL);
739 if (tmp & HOST_AHCI_EN)
740 return;
741
742 /* Some controllers need AHCI_EN to be written multiple times.
743 * Try a few times before giving up.
744 */
745 for (i = 0; i < 5; i++) {
746 tmp |= HOST_AHCI_EN;
747 writel(tmp, mmio + HOST_CTL);
748 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
749 if (tmp & HOST_AHCI_EN)
750 return;
751 msleep(10);
752 }
753
754 WARN_ON(1);
755 }
756
757 static ssize_t ahci_show_host_caps(struct device *dev,
758 struct device_attribute *attr, char *buf)
759 {
760 struct Scsi_Host *shost = class_to_shost(dev);
761 struct ata_port *ap = ata_shost_to_port(shost);
762 struct ahci_host_priv *hpriv = ap->host->private_data;
763
764 return sprintf(buf, "%x\n", hpriv->cap);
765 }
766
767 static ssize_t ahci_show_host_cap2(struct device *dev,
768 struct device_attribute *attr, char *buf)
769 {
770 struct Scsi_Host *shost = class_to_shost(dev);
771 struct ata_port *ap = ata_shost_to_port(shost);
772 struct ahci_host_priv *hpriv = ap->host->private_data;
773
774 return sprintf(buf, "%x\n", hpriv->cap2);
775 }
776
777 static ssize_t ahci_show_host_version(struct device *dev,
778 struct device_attribute *attr, char *buf)
779 {
780 struct Scsi_Host *shost = class_to_shost(dev);
781 struct ata_port *ap = ata_shost_to_port(shost);
782 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
783
784 return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
785 }
786
787 static ssize_t ahci_show_port_cmd(struct device *dev,
788 struct device_attribute *attr, char *buf)
789 {
790 struct Scsi_Host *shost = class_to_shost(dev);
791 struct ata_port *ap = ata_shost_to_port(shost);
792 void __iomem *port_mmio = ahci_port_base(ap);
793
794 return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
795 }
796
797 /**
798 * ahci_save_initial_config - Save and fixup initial config values
799 * @pdev: target PCI device
800 * @hpriv: host private area to store config values
801 *
802 * Some registers containing configuration info might be setup by
803 * BIOS and might be cleared on reset. This function saves the
804 * initial values of those registers into @hpriv such that they
805 * can be restored after controller reset.
806 *
807 * If inconsistent, config values are fixed up by this function.
808 *
809 * LOCKING:
810 * None.
811 */
812 static void ahci_save_initial_config(struct pci_dev *pdev,
813 struct ahci_host_priv *hpriv)
814 {
815 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
816 u32 cap, cap2, vers, port_map;
817 int i;
818 int mv;
819
820 /* make sure AHCI mode is enabled before accessing CAP */
821 ahci_enable_ahci(mmio);
822
823 /* Values prefixed with saved_ are written back to host after
824 * reset. Values without are used for driver operation.
825 */
826 hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
827 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
828
829 /* CAP2 register is only defined for AHCI 1.2 and later */
830 vers = readl(mmio + HOST_VERSION);
831 if ((vers >> 16) > 1 ||
832 ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
833 hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
834 else
835 hpriv->saved_cap2 = cap2 = 0;
836
837 /* some chips have errata preventing 64bit use */
838 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
839 dev_printk(KERN_INFO, &pdev->dev,
840 "controller can't do 64bit DMA, forcing 32bit\n");
841 cap &= ~HOST_CAP_64;
842 }
843
844 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
845 dev_printk(KERN_INFO, &pdev->dev,
846 "controller can't do NCQ, turning off CAP_NCQ\n");
847 cap &= ~HOST_CAP_NCQ;
848 }
849
850 if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
851 dev_printk(KERN_INFO, &pdev->dev,
852 "controller can do NCQ, turning on CAP_NCQ\n");
853 cap |= HOST_CAP_NCQ;
854 }
855
856 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
857 dev_printk(KERN_INFO, &pdev->dev,
858 "controller can't do PMP, turning off CAP_PMP\n");
859 cap &= ~HOST_CAP_PMP;
860 }
861
862 if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
863 dev_printk(KERN_INFO, &pdev->dev,
864 "controller can't do SNTF, turning off CAP_SNTF\n");
865 cap &= ~HOST_CAP_SNTF;
866 }
867
868 if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
869 port_map != 1) {
870 dev_printk(KERN_INFO, &pdev->dev,
871 "JMB361 has only one port, port_map 0x%x -> 0x%x\n",
872 port_map, 1);
873 port_map = 1;
874 }
875
876 /*
877 * Temporary Marvell 6145 hack: PATA port presence
878 * is asserted through the standard AHCI port
879 * presence register, as bit 4 (counting from 0)
880 */
881 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
882 if (pdev->device == 0x6121)
883 mv = 0x3;
884 else
885 mv = 0xf;
886 dev_printk(KERN_ERR, &pdev->dev,
887 "MV_AHCI HACK: port_map %x -> %x\n",
888 port_map,
889 port_map & mv);
890 dev_printk(KERN_ERR, &pdev->dev,
891 "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
892
893 port_map &= mv;
894 }
895
896 /* cross check port_map and cap.n_ports */
897 if (port_map) {
898 int map_ports = 0;
899
900 for (i = 0; i < AHCI_MAX_PORTS; i++)
901 if (port_map & (1 << i))
902 map_ports++;
903
904 /* If PI has more ports than n_ports, whine, clear
905 * port_map and let it be generated from n_ports.
906 */
907 if (map_ports > ahci_nr_ports(cap)) {
908 dev_printk(KERN_WARNING, &pdev->dev,
909 "implemented port map (0x%x) contains more "
910 "ports than nr_ports (%u), using nr_ports\n",
911 port_map, ahci_nr_ports(cap));
912 port_map = 0;
913 }
914 }
915
916 /* fabricate port_map from cap.nr_ports */
917 if (!port_map) {
918 port_map = (1 << ahci_nr_ports(cap)) - 1;
919 dev_printk(KERN_WARNING, &pdev->dev,
920 "forcing PORTS_IMPL to 0x%x\n", port_map);
921
922 /* write the fixed up value to the PI register */
923 hpriv->saved_port_map = port_map;
924 }
925
926 /* record values to use during operation */
927 hpriv->cap = cap;
928 hpriv->cap2 = cap2;
929 hpriv->port_map = port_map;
930 }
931
932 /**
933 * ahci_restore_initial_config - Restore initial config
934 * @host: target ATA host
935 *
936 * Restore initial config stored by ahci_save_initial_config().
937 *
938 * LOCKING:
939 * None.
940 */
941 static void ahci_restore_initial_config(struct ata_host *host)
942 {
943 struct ahci_host_priv *hpriv = host->private_data;
944 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
945
946 writel(hpriv->saved_cap, mmio + HOST_CAP);
947 if (hpriv->saved_cap2)
948 writel(hpriv->saved_cap2, mmio + HOST_CAP2);
949 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
950 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
951 }
952
953 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
954 {
955 static const int offset[] = {
956 [SCR_STATUS] = PORT_SCR_STAT,
957 [SCR_CONTROL] = PORT_SCR_CTL,
958 [SCR_ERROR] = PORT_SCR_ERR,
959 [SCR_ACTIVE] = PORT_SCR_ACT,
960 [SCR_NOTIFICATION] = PORT_SCR_NTF,
961 };
962 struct ahci_host_priv *hpriv = ap->host->private_data;
963
964 if (sc_reg < ARRAY_SIZE(offset) &&
965 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
966 return offset[sc_reg];
967 return 0;
968 }
969
970 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
971 {
972 void __iomem *port_mmio = ahci_port_base(link->ap);
973 int offset = ahci_scr_offset(link->ap, sc_reg);
974
975 if (offset) {
976 *val = readl(port_mmio + offset);
977 return 0;
978 }
979 return -EINVAL;
980 }
981
982 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
983 {
984 void __iomem *port_mmio = ahci_port_base(link->ap);
985 int offset = ahci_scr_offset(link->ap, sc_reg);
986
987 if (offset) {
988 writel(val, port_mmio + offset);
989 return 0;
990 }
991 return -EINVAL;
992 }
993
994 static void ahci_start_engine(struct ata_port *ap)
995 {
996 void __iomem *port_mmio = ahci_port_base(ap);
997 u32 tmp;
998
999 /* start DMA */
1000 tmp = readl(port_mmio + PORT_CMD);
1001 tmp |= PORT_CMD_START;
1002 writel(tmp, port_mmio + PORT_CMD);
1003 readl(port_mmio + PORT_CMD); /* flush */
1004 }
1005
1006 static int ahci_stop_engine(struct ata_port *ap)
1007 {
1008 void __iomem *port_mmio = ahci_port_base(ap);
1009 u32 tmp;
1010
1011 tmp = readl(port_mmio + PORT_CMD);
1012
1013 /* check if the HBA is idle */
1014 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
1015 return 0;
1016
1017 /* setting HBA to idle */
1018 tmp &= ~PORT_CMD_START;
1019 writel(tmp, port_mmio + PORT_CMD);
1020
1021 /* wait for engine to stop. This could be as long as 500 msec */
1022 tmp = ata_wait_register(port_mmio + PORT_CMD,
1023 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
1024 if (tmp & PORT_CMD_LIST_ON)
1025 return -EIO;
1026
1027 return 0;
1028 }
1029
1030 static void ahci_start_fis_rx(struct ata_port *ap)
1031 {
1032 void __iomem *port_mmio = ahci_port_base(ap);
1033 struct ahci_host_priv *hpriv = ap->host->private_data;
1034 struct ahci_port_priv *pp = ap->private_data;
1035 u32 tmp;
1036
1037 /* set FIS registers */
1038 if (hpriv->cap & HOST_CAP_64)
1039 writel((pp->cmd_slot_dma >> 16) >> 16,
1040 port_mmio + PORT_LST_ADDR_HI);
1041 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
1042
1043 if (hpriv->cap & HOST_CAP_64)
1044 writel((pp->rx_fis_dma >> 16) >> 16,
1045 port_mmio + PORT_FIS_ADDR_HI);
1046 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
1047
1048 /* enable FIS reception */
1049 tmp = readl(port_mmio + PORT_CMD);
1050 tmp |= PORT_CMD_FIS_RX;
1051 writel(tmp, port_mmio + PORT_CMD);
1052
1053 /* flush */
1054 readl(port_mmio + PORT_CMD);
1055 }
1056
1057 static int ahci_stop_fis_rx(struct ata_port *ap)
1058 {
1059 void __iomem *port_mmio = ahci_port_base(ap);
1060 u32 tmp;
1061
1062 /* disable FIS reception */
1063 tmp = readl(port_mmio + PORT_CMD);
1064 tmp &= ~PORT_CMD_FIS_RX;
1065 writel(tmp, port_mmio + PORT_CMD);
1066
1067 /* wait for completion, spec says 500ms, give it 1000 */
1068 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
1069 PORT_CMD_FIS_ON, 10, 1000);
1070 if (tmp & PORT_CMD_FIS_ON)
1071 return -EBUSY;
1072
1073 return 0;
1074 }
1075
1076 static void ahci_power_up(struct ata_port *ap)
1077 {
1078 struct ahci_host_priv *hpriv = ap->host->private_data;
1079 void __iomem *port_mmio = ahci_port_base(ap);
1080 u32 cmd;
1081
1082 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1083
1084 /* spin up device */
1085 if (hpriv->cap & HOST_CAP_SSS) {
1086 cmd |= PORT_CMD_SPIN_UP;
1087 writel(cmd, port_mmio + PORT_CMD);
1088 }
1089
1090 /* wake up link */
1091 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
1092 }
1093
1094 static void ahci_disable_alpm(struct ata_port *ap)
1095 {
1096 struct ahci_host_priv *hpriv = ap->host->private_data;
1097 void __iomem *port_mmio = ahci_port_base(ap);
1098 u32 cmd;
1099 struct ahci_port_priv *pp = ap->private_data;
1100
1101 /* IPM bits should be disabled by libata-core */
1102 /* get the existing command bits */
1103 cmd = readl(port_mmio + PORT_CMD);
1104
1105 /* disable ALPM and ASP */
1106 cmd &= ~PORT_CMD_ASP;
1107 cmd &= ~PORT_CMD_ALPE;
1108
1109 /* force the interface back to active */
1110 cmd |= PORT_CMD_ICC_ACTIVE;
1111
1112 /* write out new cmd value */
1113 writel(cmd, port_mmio + PORT_CMD);
1114 cmd = readl(port_mmio + PORT_CMD);
1115
1116 /* wait 10ms to be sure we've come out of any low power state */
1117 msleep(10);
1118
1119 /* clear out any PhyRdy stuff from interrupt status */
1120 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
1121
1122 /* go ahead and clean out PhyRdy Change from Serror too */
1123 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
1124
1125 /*
1126 * Clear flag to indicate that we should ignore all PhyRdy
1127 * state changes
1128 */
1129 hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
1130
1131 /*
1132 * Enable interrupts on Phy Ready.
1133 */
1134 pp->intr_mask |= PORT_IRQ_PHYRDY;
1135 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1136
1137 /*
1138 * don't change the link pm policy - we can be called
1139 * just to turn of link pm temporarily
1140 */
1141 }
1142
1143 static int ahci_enable_alpm(struct ata_port *ap,
1144 enum link_pm policy)
1145 {
1146 struct ahci_host_priv *hpriv = ap->host->private_data;
1147 void __iomem *port_mmio = ahci_port_base(ap);
1148 u32 cmd;
1149 struct ahci_port_priv *pp = ap->private_data;
1150 u32 asp;
1151
1152 /* Make sure the host is capable of link power management */
1153 if (!(hpriv->cap & HOST_CAP_ALPM))
1154 return -EINVAL;
1155
1156 switch (policy) {
1157 case MAX_PERFORMANCE:
1158 case NOT_AVAILABLE:
1159 /*
1160 * if we came here with NOT_AVAILABLE,
1161 * it just means this is the first time we
1162 * have tried to enable - default to max performance,
1163 * and let the user go to lower power modes on request.
1164 */
1165 ahci_disable_alpm(ap);
1166 return 0;
1167 case MIN_POWER:
1168 /* configure HBA to enter SLUMBER */
1169 asp = PORT_CMD_ASP;
1170 break;
1171 case MEDIUM_POWER:
1172 /* configure HBA to enter PARTIAL */
1173 asp = 0;
1174 break;
1175 default:
1176 return -EINVAL;
1177 }
1178
1179 /*
1180 * Disable interrupts on Phy Ready. This keeps us from
1181 * getting woken up due to spurious phy ready interrupts
1182 * TBD - Hot plug should be done via polling now, is
1183 * that even supported?
1184 */
1185 pp->intr_mask &= ~PORT_IRQ_PHYRDY;
1186 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1187
1188 /*
1189 * Set a flag to indicate that we should ignore all PhyRdy
1190 * state changes since these can happen now whenever we
1191 * change link state
1192 */
1193 hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
1194
1195 /* get the existing command bits */
1196 cmd = readl(port_mmio + PORT_CMD);
1197
1198 /*
1199 * Set ASP based on Policy
1200 */
1201 cmd |= asp;
1202
1203 /*
1204 * Setting this bit will instruct the HBA to aggressively
1205 * enter a lower power link state when it's appropriate and
1206 * based on the value set above for ASP
1207 */
1208 cmd |= PORT_CMD_ALPE;
1209
1210 /* write out new cmd value */
1211 writel(cmd, port_mmio + PORT_CMD);
1212 cmd = readl(port_mmio + PORT_CMD);
1213
1214 /* IPM bits should be set by libata-core */
1215 return 0;
1216 }
1217
1218 #ifdef CONFIG_PM
1219 static void ahci_power_down(struct ata_port *ap)
1220 {
1221 struct ahci_host_priv *hpriv = ap->host->private_data;
1222 void __iomem *port_mmio = ahci_port_base(ap);
1223 u32 cmd, scontrol;
1224
1225 if (!(hpriv->cap & HOST_CAP_SSS))
1226 return;
1227
1228 /* put device into listen mode, first set PxSCTL.DET to 0 */
1229 scontrol = readl(port_mmio + PORT_SCR_CTL);
1230 scontrol &= ~0xf;
1231 writel(scontrol, port_mmio + PORT_SCR_CTL);
1232
1233 /* then set PxCMD.SUD to 0 */
1234 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1235 cmd &= ~PORT_CMD_SPIN_UP;
1236 writel(cmd, port_mmio + PORT_CMD);
1237 }
1238 #endif
1239
1240 static void ahci_start_port(struct ata_port *ap)
1241 {
1242 struct ahci_port_priv *pp = ap->private_data;
1243 struct ata_link *link;
1244 struct ahci_em_priv *emp;
1245 ssize_t rc;
1246 int i;
1247
1248 /* enable FIS reception */
1249 ahci_start_fis_rx(ap);
1250
1251 /* enable DMA */
1252 ahci_start_engine(ap);
1253
1254 /* turn on LEDs */
1255 if (ap->flags & ATA_FLAG_EM) {
1256 ata_for_each_link(link, ap, EDGE) {
1257 emp = &pp->em_priv[link->pmp];
1258
1259 /* EM Transmit bit maybe busy during init */
1260 for (i = 0; i < EM_MAX_RETRY; i++) {
1261 rc = ahci_transmit_led_message(ap,
1262 emp->led_state,
1263 4);
1264 if (rc == -EBUSY)
1265 msleep(1);
1266 else
1267 break;
1268 }
1269 }
1270 }
1271
1272 if (ap->flags & ATA_FLAG_SW_ACTIVITY)
1273 ata_for_each_link(link, ap, EDGE)
1274 ahci_init_sw_activity(link);
1275
1276 }
1277
1278 static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
1279 {
1280 int rc;
1281
1282 /* disable DMA */
1283 rc = ahci_stop_engine(ap);
1284 if (rc) {
1285 *emsg = "failed to stop engine";
1286 return rc;
1287 }
1288
1289 /* disable FIS reception */
1290 rc = ahci_stop_fis_rx(ap);
1291 if (rc) {
1292 *emsg = "failed stop FIS RX";
1293 return rc;
1294 }
1295
1296 return 0;
1297 }
1298
1299 static int ahci_reset_controller(struct ata_host *host)
1300 {
1301 struct pci_dev *pdev = to_pci_dev(host->dev);
1302 struct ahci_host_priv *hpriv = host->private_data;
1303 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1304 u32 tmp;
1305
1306 /* we must be in AHCI mode, before using anything
1307 * AHCI-specific, such as HOST_RESET.
1308 */
1309 ahci_enable_ahci(mmio);
1310
1311 /* global controller reset */
1312 if (!ahci_skip_host_reset) {
1313 tmp = readl(mmio + HOST_CTL);
1314 if ((tmp & HOST_RESET) == 0) {
1315 writel(tmp | HOST_RESET, mmio + HOST_CTL);
1316 readl(mmio + HOST_CTL); /* flush */
1317 }
1318
1319 /*
1320 * to perform host reset, OS should set HOST_RESET
1321 * and poll until this bit is read to be "0".
1322 * reset must complete within 1 second, or
1323 * the hardware should be considered fried.
1324 */
1325 tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
1326 HOST_RESET, 10, 1000);
1327
1328 if (tmp & HOST_RESET) {
1329 dev_printk(KERN_ERR, host->dev,
1330 "controller reset failed (0x%x)\n", tmp);
1331 return -EIO;
1332 }
1333
1334 /* turn on AHCI mode */
1335 ahci_enable_ahci(mmio);
1336
1337 /* Some registers might be cleared on reset. Restore
1338 * initial values.
1339 */
1340 ahci_restore_initial_config(host);
1341 } else
1342 dev_printk(KERN_INFO, host->dev,
1343 "skipping global host reset\n");
1344
1345 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
1346 u16 tmp16;
1347
1348 /* configure PCS */
1349 pci_read_config_word(pdev, 0x92, &tmp16);
1350 if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
1351 tmp16 |= hpriv->port_map;
1352 pci_write_config_word(pdev, 0x92, tmp16);
1353 }
1354 }
1355
1356 return 0;
1357 }
1358
1359 static void ahci_sw_activity(struct ata_link *link)
1360 {
1361 struct ata_port *ap = link->ap;
1362 struct ahci_port_priv *pp = ap->private_data;
1363 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1364
1365 if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
1366 return;
1367
1368 emp->activity++;
1369 if (!timer_pending(&emp->timer))
1370 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
1371 }
1372
1373 static void ahci_sw_activity_blink(unsigned long arg)
1374 {
1375 struct ata_link *link = (struct ata_link *)arg;
1376 struct ata_port *ap = link->ap;
1377 struct ahci_port_priv *pp = ap->private_data;
1378 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1379 unsigned long led_message = emp->led_state;
1380 u32 activity_led_state;
1381 unsigned long flags;
1382
1383 led_message &= EM_MSG_LED_VALUE;
1384 led_message |= ap->port_no | (link->pmp << 8);
1385
1386 /* check to see if we've had activity. If so,
1387 * toggle state of LED and reset timer. If not,
1388 * turn LED to desired idle state.
1389 */
1390 spin_lock_irqsave(ap->lock, flags);
1391 if (emp->saved_activity != emp->activity) {
1392 emp->saved_activity = emp->activity;
1393 /* get the current LED state */
1394 activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
1395
1396 if (activity_led_state)
1397 activity_led_state = 0;
1398 else
1399 activity_led_state = 1;
1400
1401 /* clear old state */
1402 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1403
1404 /* toggle state */
1405 led_message |= (activity_led_state << 16);
1406 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
1407 } else {
1408 /* switch to idle */
1409 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1410 if (emp->blink_policy == BLINK_OFF)
1411 led_message |= (1 << 16);
1412 }
1413 spin_unlock_irqrestore(ap->lock, flags);
1414 ahci_transmit_led_message(ap, led_message, 4);
1415 }
1416
1417 static void ahci_init_sw_activity(struct ata_link *link)
1418 {
1419 struct ata_port *ap = link->ap;
1420 struct ahci_port_priv *pp = ap->private_data;
1421 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1422
1423 /* init activity stats, setup timer */
1424 emp->saved_activity = emp->activity = 0;
1425 setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
1426
1427 /* check our blink policy and set flag for link if it's enabled */
1428 if (emp->blink_policy)
1429 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1430 }
1431
1432 static int ahci_reset_em(struct ata_host *host)
1433 {
1434 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1435 u32 em_ctl;
1436
1437 em_ctl = readl(mmio + HOST_EM_CTL);
1438 if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
1439 return -EINVAL;
1440
1441 writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
1442 return 0;
1443 }
1444
1445 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1446 ssize_t size)
1447 {
1448 struct ahci_host_priv *hpriv = ap->host->private_data;
1449 struct ahci_port_priv *pp = ap->private_data;
1450 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1451 u32 em_ctl;
1452 u32 message[] = {0, 0};
1453 unsigned long flags;
1454 int pmp;
1455 struct ahci_em_priv *emp;
1456
1457 /* get the slot number from the message */
1458 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1459 if (pmp < EM_MAX_SLOTS)
1460 emp = &pp->em_priv[pmp];
1461 else
1462 return -EINVAL;
1463
1464 spin_lock_irqsave(ap->lock, flags);
1465
1466 /*
1467 * if we are still busy transmitting a previous message,
1468 * do not allow
1469 */
1470 em_ctl = readl(mmio + HOST_EM_CTL);
1471 if (em_ctl & EM_CTL_TM) {
1472 spin_unlock_irqrestore(ap->lock, flags);
1473 return -EBUSY;
1474 }
1475
1476 /*
1477 * create message header - this is all zero except for
1478 * the message size, which is 4 bytes.
1479 */
1480 message[0] |= (4 << 8);
1481
1482 /* ignore 0:4 of byte zero, fill in port info yourself */
1483 message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
1484
1485 /* write message to EM_LOC */
1486 writel(message[0], mmio + hpriv->em_loc);
1487 writel(message[1], mmio + hpriv->em_loc+4);
1488
1489 /* save off new led state for port/slot */
1490 emp->led_state = state;
1491
1492 /*
1493 * tell hardware to transmit the message
1494 */
1495 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1496
1497 spin_unlock_irqrestore(ap->lock, flags);
1498 return size;
1499 }
1500
1501 static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1502 {
1503 struct ahci_port_priv *pp = ap->private_data;
1504 struct ata_link *link;
1505 struct ahci_em_priv *emp;
1506 int rc = 0;
1507
1508 ata_for_each_link(link, ap, EDGE) {
1509 emp = &pp->em_priv[link->pmp];
1510 rc += sprintf(buf, "%lx\n", emp->led_state);
1511 }
1512 return rc;
1513 }
1514
1515 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1516 size_t size)
1517 {
1518 int state;
1519 int pmp;
1520 struct ahci_port_priv *pp = ap->private_data;
1521 struct ahci_em_priv *emp;
1522
1523 state = simple_strtoul(buf, NULL, 0);
1524
1525 /* get the slot number from the message */
1526 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1527 if (pmp < EM_MAX_SLOTS)
1528 emp = &pp->em_priv[pmp];
1529 else
1530 return -EINVAL;
1531
1532 /* mask off the activity bits if we are in sw_activity
1533 * mode, user should turn off sw_activity before setting
1534 * activity led through em_message
1535 */
1536 if (emp->blink_policy)
1537 state &= ~EM_MSG_LED_VALUE_ACTIVITY;
1538
1539 return ahci_transmit_led_message(ap, state, size);
1540 }
1541
1542 static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1543 {
1544 struct ata_link *link = dev->link;
1545 struct ata_port *ap = link->ap;
1546 struct ahci_port_priv *pp = ap->private_data;
1547 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1548 u32 port_led_state = emp->led_state;
1549
1550 /* save the desired Activity LED behavior */
1551 if (val == OFF) {
1552 /* clear LFLAG */
1553 link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1554
1555 /* set the LED to OFF */
1556 port_led_state &= EM_MSG_LED_VALUE_OFF;
1557 port_led_state |= (ap->port_no | (link->pmp << 8));
1558 ahci_transmit_led_message(ap, port_led_state, 4);
1559 } else {
1560 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1561 if (val == BLINK_OFF) {
1562 /* set LED to ON for idle */
1563 port_led_state &= EM_MSG_LED_VALUE_OFF;
1564 port_led_state |= (ap->port_no | (link->pmp << 8));
1565 port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
1566 ahci_transmit_led_message(ap, port_led_state, 4);
1567 }
1568 }
1569 emp->blink_policy = val;
1570 return 0;
1571 }
1572
1573 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1574 {
1575 struct ata_link *link = dev->link;
1576 struct ata_port *ap = link->ap;
1577 struct ahci_port_priv *pp = ap->private_data;
1578 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1579
1580 /* display the saved value of activity behavior for this
1581 * disk.
1582 */
1583 return sprintf(buf, "%d\n", emp->blink_policy);
1584 }
1585
1586 static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
1587 int port_no, void __iomem *mmio,
1588 void __iomem *port_mmio)
1589 {
1590 const char *emsg = NULL;
1591 int rc;
1592 u32 tmp;
1593
1594 /* make sure port is not active */
1595 rc = ahci_deinit_port(ap, &emsg);
1596 if (rc)
1597 dev_printk(KERN_WARNING, &pdev->dev,
1598 "%s (%d)\n", emsg, rc);
1599
1600 /* clear SError */
1601 tmp = readl(port_mmio + PORT_SCR_ERR);
1602 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1603 writel(tmp, port_mmio + PORT_SCR_ERR);
1604
1605 /* clear port IRQ */
1606 tmp = readl(port_mmio + PORT_IRQ_STAT);
1607 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1608 if (tmp)
1609 writel(tmp, port_mmio + PORT_IRQ_STAT);
1610
1611 writel(1 << port_no, mmio + HOST_IRQ_STAT);
1612 }
1613
1614 static void ahci_init_controller(struct ata_host *host)
1615 {
1616 struct ahci_host_priv *hpriv = host->private_data;
1617 struct pci_dev *pdev = to_pci_dev(host->dev);
1618 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1619 int i;
1620 void __iomem *port_mmio;
1621 u32 tmp;
1622 int mv;
1623
1624 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
1625 if (pdev->device == 0x6121)
1626 mv = 2;
1627 else
1628 mv = 4;
1629 port_mmio = __ahci_port_base(host, mv);
1630
1631 writel(0, port_mmio + PORT_IRQ_MASK);
1632
1633 /* clear port IRQ */
1634 tmp = readl(port_mmio + PORT_IRQ_STAT);
1635 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1636 if (tmp)
1637 writel(tmp, port_mmio + PORT_IRQ_STAT);
1638 }
1639
1640 for (i = 0; i < host->n_ports; i++) {
1641 struct ata_port *ap = host->ports[i];
1642
1643 port_mmio = ahci_port_base(ap);
1644 if (ata_port_is_dummy(ap))
1645 continue;
1646
1647 ahci_port_init(pdev, ap, i, mmio, port_mmio);
1648 }
1649
1650 tmp = readl(mmio + HOST_CTL);
1651 VPRINTK("HOST_CTL 0x%x\n", tmp);
1652 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1653 tmp = readl(mmio + HOST_CTL);
1654 VPRINTK("HOST_CTL 0x%x\n", tmp);
1655 }
1656
1657 static void ahci_dev_config(struct ata_device *dev)
1658 {
1659 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1660
1661 if (hpriv->flags & AHCI_HFLAG_SECT255) {
1662 dev->max_sectors = 255;
1663 ata_dev_printk(dev, KERN_INFO,
1664 "SB600 AHCI: limiting to 255 sectors per cmd\n");
1665 }
1666 }
1667
1668 static unsigned int ahci_dev_classify(struct ata_port *ap)
1669 {
1670 void __iomem *port_mmio = ahci_port_base(ap);
1671 struct ata_taskfile tf;
1672 u32 tmp;
1673
1674 tmp = readl(port_mmio + PORT_SIG);
1675 tf.lbah = (tmp >> 24) & 0xff;
1676 tf.lbam = (tmp >> 16) & 0xff;
1677 tf.lbal = (tmp >> 8) & 0xff;
1678 tf.nsect = (tmp) & 0xff;
1679
1680 return ata_dev_classify(&tf);
1681 }
1682
1683 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1684 u32 opts)
1685 {
1686 dma_addr_t cmd_tbl_dma;
1687
1688 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1689
1690 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1691 pp->cmd_slot[tag].status = 0;
1692 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1693 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1694 }
1695
1696 static int ahci_kick_engine(struct ata_port *ap)
1697 {
1698 void __iomem *port_mmio = ahci_port_base(ap);
1699 struct ahci_host_priv *hpriv = ap->host->private_data;
1700 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1701 u32 tmp;
1702 int busy, rc;
1703
1704 /* stop engine */
1705 rc = ahci_stop_engine(ap);
1706 if (rc)
1707 goto out_restart;
1708
1709 /* need to do CLO?
1710 * always do CLO if PMP is attached (AHCI-1.3 9.2)
1711 */
1712 busy = status & (ATA_BUSY | ATA_DRQ);
1713 if (!busy && !sata_pmp_attached(ap)) {
1714 rc = 0;
1715 goto out_restart;
1716 }
1717
1718 if (!(hpriv->cap & HOST_CAP_CLO)) {
1719 rc = -EOPNOTSUPP;
1720 goto out_restart;
1721 }
1722
1723 /* perform CLO */
1724 tmp = readl(port_mmio + PORT_CMD);
1725 tmp |= PORT_CMD_CLO;
1726 writel(tmp, port_mmio + PORT_CMD);
1727
1728 rc = 0;
1729 tmp = ata_wait_register(port_mmio + PORT_CMD,
1730 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1731 if (tmp & PORT_CMD_CLO)
1732 rc = -EIO;
1733
1734 /* restart engine */
1735 out_restart:
1736 ahci_start_engine(ap);
1737 return rc;
1738 }
1739
1740 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1741 struct ata_taskfile *tf, int is_cmd, u16 flags,
1742 unsigned long timeout_msec)
1743 {
1744 const u32 cmd_fis_len = 5; /* five dwords */
1745 struct ahci_port_priv *pp = ap->private_data;
1746 void __iomem *port_mmio = ahci_port_base(ap);
1747 u8 *fis = pp->cmd_tbl;
1748 u32 tmp;
1749
1750 /* prep the command */
1751 ata_tf_to_fis(tf, pmp, is_cmd, fis);
1752 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1753
1754 /* issue & wait */
1755 writel(1, port_mmio + PORT_CMD_ISSUE);
1756
1757 if (timeout_msec) {
1758 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1759 1, timeout_msec);
1760 if (tmp & 0x1) {
1761 ahci_kick_engine(ap);
1762 return -EBUSY;
1763 }
1764 } else
1765 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1766
1767 return 0;
1768 }
1769
1770 static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1771 int pmp, unsigned long deadline,
1772 int (*check_ready)(struct ata_link *link))
1773 {
1774 struct ata_port *ap = link->ap;
1775 struct ahci_host_priv *hpriv = ap->host->private_data;
1776 const char *reason = NULL;
1777 unsigned long now, msecs;
1778 struct ata_taskfile tf;
1779 int rc;
1780
1781 DPRINTK("ENTER\n");
1782
1783 /* prepare for SRST (AHCI-1.1 10.4.1) */
1784 rc = ahci_kick_engine(ap);
1785 if (rc && rc != -EOPNOTSUPP)
1786 ata_link_printk(link, KERN_WARNING,
1787 "failed to reset engine (errno=%d)\n", rc);
1788
1789 ata_tf_init(link->device, &tf);
1790
1791 /* issue the first D2H Register FIS */
1792 msecs = 0;
1793 now = jiffies;
1794 if (time_after(now, deadline))
1795 msecs = jiffies_to_msecs(deadline - now);
1796
1797 tf.ctl |= ATA_SRST;
1798 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1799 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1800 rc = -EIO;
1801 reason = "1st FIS failed";
1802 goto fail;
1803 }
1804
1805 /* spec says at least 5us, but be generous and sleep for 1ms */
1806 msleep(1);
1807
1808 /* issue the second D2H Register FIS */
1809 tf.ctl &= ~ATA_SRST;
1810 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1811
1812 /* wait for link to become ready */
1813 rc = ata_wait_after_reset(link, deadline, check_ready);
1814 if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
1815 /*
1816 * Workaround for cases where link online status can't
1817 * be trusted. Treat device readiness timeout as link
1818 * offline.
1819 */
1820 ata_link_printk(link, KERN_INFO,
1821 "device not ready, treating as offline\n");
1822 *class = ATA_DEV_NONE;
1823 } else if (rc) {
1824 /* link occupied, -ENODEV too is an error */
1825 reason = "device not ready";
1826 goto fail;
1827 } else
1828 *class = ahci_dev_classify(ap);
1829
1830 DPRINTK("EXIT, class=%u\n", *class);
1831 return 0;
1832
1833 fail:
1834 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1835 return rc;
1836 }
1837
1838 static int ahci_check_ready(struct ata_link *link)
1839 {
1840 void __iomem *port_mmio = ahci_port_base(link->ap);
1841 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1842
1843 return ata_check_ready(status);
1844 }
1845
1846 static int ahci_softreset(struct ata_link *link, unsigned int *class,
1847 unsigned long deadline)
1848 {
1849 int pmp = sata_srst_pmp(link);
1850
1851 DPRINTK("ENTER\n");
1852
1853 return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1854 }
1855
1856 static int ahci_sb600_check_ready(struct ata_link *link)
1857 {
1858 void __iomem *port_mmio = ahci_port_base(link->ap);
1859 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1860 u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
1861
1862 /*
1863 * There is no need to check TFDATA if BAD PMP is found due to HW bug,
1864 * which can save timeout delay.
1865 */
1866 if (irq_status & PORT_IRQ_BAD_PMP)
1867 return -EIO;
1868
1869 return ata_check_ready(status);
1870 }
1871
1872 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
1873 unsigned long deadline)
1874 {
1875 struct ata_port *ap = link->ap;
1876 void __iomem *port_mmio = ahci_port_base(ap);
1877 int pmp = sata_srst_pmp(link);
1878 int rc;
1879 u32 irq_sts;
1880
1881 DPRINTK("ENTER\n");
1882
1883 rc = ahci_do_softreset(link, class, pmp, deadline,
1884 ahci_sb600_check_ready);
1885
1886 /*
1887 * Soft reset fails on some ATI chips with IPMS set when PMP
1888 * is enabled but SATA HDD/ODD is connected to SATA port,
1889 * do soft reset again to port 0.
1890 */
1891 if (rc == -EIO) {
1892 irq_sts = readl(port_mmio + PORT_IRQ_STAT);
1893 if (irq_sts & PORT_IRQ_BAD_PMP) {
1894 ata_link_printk(link, KERN_WARNING,
1895 "applying SB600 PMP SRST workaround "
1896 "and retrying\n");
1897 rc = ahci_do_softreset(link, class, 0, deadline,
1898 ahci_check_ready);
1899 }
1900 }
1901
1902 return rc;
1903 }
1904
1905 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1906 unsigned long deadline)
1907 {
1908 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1909 struct ata_port *ap = link->ap;
1910 struct ahci_port_priv *pp = ap->private_data;
1911 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1912 struct ata_taskfile tf;
1913 bool online;
1914 int rc;
1915
1916 DPRINTK("ENTER\n");
1917
1918 ahci_stop_engine(ap);
1919
1920 /* clear D2H reception area to properly wait for D2H FIS */
1921 ata_tf_init(link->device, &tf);
1922 tf.command = 0x80;
1923 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1924
1925 rc = sata_link_hardreset(link, timing, deadline, &online,
1926 ahci_check_ready);
1927
1928 ahci_start_engine(ap);
1929
1930 if (online)
1931 *class = ahci_dev_classify(ap);
1932
1933 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1934 return rc;
1935 }
1936
1937 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
1938 unsigned long deadline)
1939 {
1940 struct ata_port *ap = link->ap;
1941 bool online;
1942 int rc;
1943
1944 DPRINTK("ENTER\n");
1945
1946 ahci_stop_engine(ap);
1947
1948 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1949 deadline, &online, NULL);
1950
1951 ahci_start_engine(ap);
1952
1953 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1954
1955 /* vt8251 doesn't clear BSY on signature FIS reception,
1956 * request follow-up softreset.
1957 */
1958 return online ? -EAGAIN : rc;
1959 }
1960
1961 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
1962 unsigned long deadline)
1963 {
1964 struct ata_port *ap = link->ap;
1965 struct ahci_port_priv *pp = ap->private_data;
1966 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1967 struct ata_taskfile tf;
1968 bool online;
1969 int rc;
1970
1971 ahci_stop_engine(ap);
1972
1973 /* clear D2H reception area to properly wait for D2H FIS */
1974 ata_tf_init(link->device, &tf);
1975 tf.command = 0x80;
1976 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1977
1978 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1979 deadline, &online, NULL);
1980
1981 ahci_start_engine(ap);
1982
1983 /* The pseudo configuration device on SIMG4726 attached to
1984 * ASUS P5W-DH Deluxe doesn't send signature FIS after
1985 * hardreset if no device is attached to the first downstream
1986 * port && the pseudo device locks up on SRST w/ PMP==0. To
1987 * work around this, wait for !BSY only briefly. If BSY isn't
1988 * cleared, perform CLO and proceed to IDENTIFY (achieved by
1989 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
1990 *
1991 * Wait for two seconds. Devices attached to downstream port
1992 * which can't process the following IDENTIFY after this will
1993 * have to be reset again. For most cases, this should
1994 * suffice while making probing snappish enough.
1995 */
1996 if (online) {
1997 rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
1998 ahci_check_ready);
1999 if (rc)
2000 ahci_kick_engine(ap);
2001 }
2002 return rc;
2003 }
2004
2005 static void ahci_postreset(struct ata_link *link, unsigned int *class)
2006 {
2007 struct ata_port *ap = link->ap;
2008 void __iomem *port_mmio = ahci_port_base(ap);
2009 u32 new_tmp, tmp;
2010
2011 ata_std_postreset(link, class);
2012
2013 /* Make sure port's ATAPI bit is set appropriately */
2014 new_tmp = tmp = readl(port_mmio + PORT_CMD);
2015 if (*class == ATA_DEV_ATAPI)
2016 new_tmp |= PORT_CMD_ATAPI;
2017 else
2018 new_tmp &= ~PORT_CMD_ATAPI;
2019 if (new_tmp != tmp) {
2020 writel(new_tmp, port_mmio + PORT_CMD);
2021 readl(port_mmio + PORT_CMD); /* flush */
2022 }
2023 }
2024
2025 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
2026 {
2027 struct scatterlist *sg;
2028 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
2029 unsigned int si;
2030
2031 VPRINTK("ENTER\n");
2032
2033 /*
2034 * Next, the S/G list.
2035 */
2036 for_each_sg(qc->sg, sg, qc->n_elem, si) {
2037 dma_addr_t addr = sg_dma_address(sg);
2038 u32 sg_len = sg_dma_len(sg);
2039
2040 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
2041 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
2042 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
2043 }
2044
2045 return si;
2046 }
2047
2048 static void ahci_qc_prep(struct ata_queued_cmd *qc)
2049 {
2050 struct ata_port *ap = qc->ap;
2051 struct ahci_port_priv *pp = ap->private_data;
2052 int is_atapi = ata_is_atapi(qc->tf.protocol);
2053 void *cmd_tbl;
2054 u32 opts;
2055 const u32 cmd_fis_len = 5; /* five dwords */
2056 unsigned int n_elem;
2057
2058 /*
2059 * Fill in command table information. First, the header,
2060 * a SATA Register - Host to Device command FIS.
2061 */
2062 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
2063
2064 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
2065 if (is_atapi) {
2066 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
2067 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
2068 }
2069
2070 n_elem = 0;
2071 if (qc->flags & ATA_QCFLAG_DMAMAP)
2072 n_elem = ahci_fill_sg(qc, cmd_tbl);
2073
2074 /*
2075 * Fill in command slot information.
2076 */
2077 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
2078 if (qc->tf.flags & ATA_TFLAG_WRITE)
2079 opts |= AHCI_CMD_WRITE;
2080 if (is_atapi)
2081 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
2082
2083 ahci_fill_cmd_slot(pp, qc->tag, opts);
2084 }
2085
2086 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
2087 {
2088 struct ahci_host_priv *hpriv = ap->host->private_data;
2089 struct ahci_port_priv *pp = ap->private_data;
2090 struct ata_eh_info *host_ehi = &ap->link.eh_info;
2091 struct ata_link *link = NULL;
2092 struct ata_queued_cmd *active_qc;
2093 struct ata_eh_info *active_ehi;
2094 u32 serror;
2095
2096 /* determine active link */
2097 ata_for_each_link(link, ap, EDGE)
2098 if (ata_link_active(link))
2099 break;
2100 if (!link)
2101 link = &ap->link;
2102
2103 active_qc = ata_qc_from_tag(ap, link->active_tag);
2104 active_ehi = &link->eh_info;
2105
2106 /* record irq stat */
2107 ata_ehi_clear_desc(host_ehi);
2108 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
2109
2110 /* AHCI needs SError cleared; otherwise, it might lock up */
2111 ahci_scr_read(&ap->link, SCR_ERROR, &serror);
2112 ahci_scr_write(&ap->link, SCR_ERROR, serror);
2113 host_ehi->serror |= serror;
2114
2115 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
2116 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
2117 irq_stat &= ~PORT_IRQ_IF_ERR;
2118
2119 if (irq_stat & PORT_IRQ_TF_ERR) {
2120 /* If qc is active, charge it; otherwise, the active
2121 * link. There's no active qc on NCQ errors. It will
2122 * be determined by EH by reading log page 10h.
2123 */
2124 if (active_qc)
2125 active_qc->err_mask |= AC_ERR_DEV;
2126 else
2127 active_ehi->err_mask |= AC_ERR_DEV;
2128
2129 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
2130 host_ehi->serror &= ~SERR_INTERNAL;
2131 }
2132
2133 if (irq_stat & PORT_IRQ_UNK_FIS) {
2134 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
2135
2136 active_ehi->err_mask |= AC_ERR_HSM;
2137 active_ehi->action |= ATA_EH_RESET;
2138 ata_ehi_push_desc(active_ehi,
2139 "unknown FIS %08x %08x %08x %08x" ,
2140 unk[0], unk[1], unk[2], unk[3]);
2141 }
2142
2143 if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
2144 active_ehi->err_mask |= AC_ERR_HSM;
2145 active_ehi->action |= ATA_EH_RESET;
2146 ata_ehi_push_desc(active_ehi, "incorrect PMP");
2147 }
2148
2149 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
2150 host_ehi->err_mask |= AC_ERR_HOST_BUS;
2151 host_ehi->action |= ATA_EH_RESET;
2152 ata_ehi_push_desc(host_ehi, "host bus error");
2153 }
2154
2155 if (irq_stat & PORT_IRQ_IF_ERR) {
2156 host_ehi->err_mask |= AC_ERR_ATA_BUS;
2157 host_ehi->action |= ATA_EH_RESET;
2158 ata_ehi_push_desc(host_ehi, "interface fatal error");
2159 }
2160
2161 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
2162 ata_ehi_hotplugged(host_ehi);
2163 ata_ehi_push_desc(host_ehi, "%s",
2164 irq_stat & PORT_IRQ_CONNECT ?
2165 "connection status changed" : "PHY RDY changed");
2166 }
2167
2168 /* okay, let's hand over to EH */
2169
2170 if (irq_stat & PORT_IRQ_FREEZE)
2171 ata_port_freeze(ap);
2172 else
2173 ata_port_abort(ap);
2174 }
2175
2176 static void ahci_port_intr(struct ata_port *ap)
2177 {
2178 void __iomem *port_mmio = ahci_port_base(ap);
2179 struct ata_eh_info *ehi = &ap->link.eh_info;
2180 struct ahci_port_priv *pp = ap->private_data;
2181 struct ahci_host_priv *hpriv = ap->host->private_data;
2182 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
2183 u32 status, qc_active;
2184 int rc;
2185
2186 status = readl(port_mmio + PORT_IRQ_STAT);
2187 writel(status, port_mmio + PORT_IRQ_STAT);
2188
2189 /* ignore BAD_PMP while resetting */
2190 if (unlikely(resetting))
2191 status &= ~PORT_IRQ_BAD_PMP;
2192
2193 /* If we are getting PhyRdy, this is
2194 * just a power state change, we should
2195 * clear out this, plus the PhyRdy/Comm
2196 * Wake bits from Serror
2197 */
2198 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
2199 (status & PORT_IRQ_PHYRDY)) {
2200 status &= ~PORT_IRQ_PHYRDY;
2201 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
2202 }
2203
2204 if (unlikely(status & PORT_IRQ_ERROR)) {
2205 ahci_error_intr(ap, status);
2206 return;
2207 }
2208
2209 if (status & PORT_IRQ_SDB_FIS) {
2210 /* If SNotification is available, leave notification
2211 * handling to sata_async_notification(). If not,
2212 * emulate it by snooping SDB FIS RX area.
2213 *
2214 * Snooping FIS RX area is probably cheaper than
2215 * poking SNotification but some constrollers which
2216 * implement SNotification, ICH9 for example, don't
2217 * store AN SDB FIS into receive area.
2218 */
2219 if (hpriv->cap & HOST_CAP_SNTF)
2220 sata_async_notification(ap);
2221 else {
2222 /* If the 'N' bit in word 0 of the FIS is set,
2223 * we just received asynchronous notification.
2224 * Tell libata about it.
2225 */
2226 const __le32 *f = pp->rx_fis + RX_FIS_SDB;
2227 u32 f0 = le32_to_cpu(f[0]);
2228
2229 if (f0 & (1 << 15))
2230 sata_async_notification(ap);
2231 }
2232 }
2233
2234 /* pp->active_link is valid iff any command is in flight */
2235 if (ap->qc_active && pp->active_link->sactive)
2236 qc_active = readl(port_mmio + PORT_SCR_ACT);
2237 else
2238 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
2239
2240 rc = ata_qc_complete_multiple(ap, qc_active);
2241
2242 /* while resetting, invalid completions are expected */
2243 if (unlikely(rc < 0 && !resetting)) {
2244 ehi->err_mask |= AC_ERR_HSM;
2245 ehi->action |= ATA_EH_RESET;
2246 ata_port_freeze(ap);
2247 }
2248 }
2249
2250 static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
2251 {
2252 struct ata_host *host = dev_instance;
2253 struct ahci_host_priv *hpriv;
2254 unsigned int i, handled = 0;
2255 void __iomem *mmio;
2256 u32 irq_stat, irq_masked;
2257
2258 VPRINTK("ENTER\n");
2259
2260 hpriv = host->private_data;
2261 mmio = host->iomap[AHCI_PCI_BAR];
2262
2263 /* sigh. 0xffffffff is a valid return from h/w */
2264 irq_stat = readl(mmio + HOST_IRQ_STAT);
2265 if (!irq_stat)
2266 return IRQ_NONE;
2267
2268 irq_masked = irq_stat & hpriv->port_map;
2269
2270 spin_lock(&host->lock);
2271
2272 for (i = 0; i < host->n_ports; i++) {
2273 struct ata_port *ap;
2274
2275 if (!(irq_masked & (1 << i)))
2276 continue;
2277
2278 ap = host->ports[i];
2279 if (ap) {
2280 ahci_port_intr(ap);
2281 VPRINTK("port %u\n", i);
2282 } else {
2283 VPRINTK("port %u (no irq)\n", i);
2284 if (ata_ratelimit())
2285 dev_printk(KERN_WARNING, host->dev,
2286 "interrupt on disabled port %u\n", i);
2287 }
2288
2289 handled = 1;
2290 }
2291
2292 /* HOST_IRQ_STAT behaves as level triggered latch meaning that
2293 * it should be cleared after all the port events are cleared;
2294 * otherwise, it will raise a spurious interrupt after each
2295 * valid one. Please read section 10.6.2 of ahci 1.1 for more
2296 * information.
2297 *
2298 * Also, use the unmasked value to clear interrupt as spurious
2299 * pending event on a dummy port might cause screaming IRQ.
2300 */
2301 writel(irq_stat, mmio + HOST_IRQ_STAT);
2302
2303 spin_unlock(&host->lock);
2304
2305 VPRINTK("EXIT\n");
2306
2307 return IRQ_RETVAL(handled);
2308 }
2309
2310 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
2311 {
2312 struct ata_port *ap = qc->ap;
2313 void __iomem *port_mmio = ahci_port_base(ap);
2314 struct ahci_port_priv *pp = ap->private_data;
2315
2316 /* Keep track of the currently active link. It will be used
2317 * in completion path to determine whether NCQ phase is in
2318 * progress.
2319 */
2320 pp->active_link = qc->dev->link;
2321
2322 if (qc->tf.protocol == ATA_PROT_NCQ)
2323 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
2324 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
2325
2326 ahci_sw_activity(qc->dev->link);
2327
2328 return 0;
2329 }
2330
2331 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
2332 {
2333 struct ahci_port_priv *pp = qc->ap->private_data;
2334 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
2335
2336 ata_tf_from_fis(d2h_fis, &qc->result_tf);
2337 return true;
2338 }
2339
2340 static void ahci_freeze(struct ata_port *ap)
2341 {
2342 void __iomem *port_mmio = ahci_port_base(ap);
2343
2344 /* turn IRQ off */
2345 writel(0, port_mmio + PORT_IRQ_MASK);
2346 }
2347
2348 static void ahci_thaw(struct ata_port *ap)
2349 {
2350 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
2351 void __iomem *port_mmio = ahci_port_base(ap);
2352 u32 tmp;
2353 struct ahci_port_priv *pp = ap->private_data;
2354
2355 /* clear IRQ */
2356 tmp = readl(port_mmio + PORT_IRQ_STAT);
2357 writel(tmp, port_mmio + PORT_IRQ_STAT);
2358 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
2359
2360 /* turn IRQ back on */
2361 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2362 }
2363
2364 static void ahci_error_handler(struct ata_port *ap)
2365 {
2366 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
2367 /* restart engine */
2368 ahci_stop_engine(ap);
2369 ahci_start_engine(ap);
2370 }
2371
2372 sata_pmp_error_handler(ap);
2373 }
2374
2375 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
2376 {
2377 struct ata_port *ap = qc->ap;
2378
2379 /* make DMA engine forget about the failed command */
2380 if (qc->flags & ATA_QCFLAG_FAILED)
2381 ahci_kick_engine(ap);
2382 }
2383
2384 static void ahci_pmp_attach(struct ata_port *ap)
2385 {
2386 void __iomem *port_mmio = ahci_port_base(ap);
2387 struct ahci_port_priv *pp = ap->private_data;
2388 u32 cmd;
2389
2390 cmd = readl(port_mmio + PORT_CMD);
2391 cmd |= PORT_CMD_PMP;
2392 writel(cmd, port_mmio + PORT_CMD);
2393
2394 pp->intr_mask |= PORT_IRQ_BAD_PMP;
2395 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2396 }
2397
2398 static void ahci_pmp_detach(struct ata_port *ap)
2399 {
2400 void __iomem *port_mmio = ahci_port_base(ap);
2401 struct ahci_port_priv *pp = ap->private_data;
2402 u32 cmd;
2403
2404 cmd = readl(port_mmio + PORT_CMD);
2405 cmd &= ~PORT_CMD_PMP;
2406 writel(cmd, port_mmio + PORT_CMD);
2407
2408 pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
2409 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2410 }
2411
2412 static int ahci_port_resume(struct ata_port *ap)
2413 {
2414 ahci_power_up(ap);
2415 ahci_start_port(ap);
2416
2417 if (sata_pmp_attached(ap))
2418 ahci_pmp_attach(ap);
2419 else
2420 ahci_pmp_detach(ap);
2421
2422 return 0;
2423 }
2424
2425 #ifdef CONFIG_PM
2426 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2427 {
2428 const char *emsg = NULL;
2429 int rc;
2430
2431 rc = ahci_deinit_port(ap, &emsg);
2432 if (rc == 0)
2433 ahci_power_down(ap);
2434 else {
2435 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
2436 ahci_start_port(ap);
2437 }
2438
2439 return rc;
2440 }
2441
2442 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
2443 {
2444 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2445 struct ahci_host_priv *hpriv = host->private_data;
2446 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2447 u32 ctl;
2448
2449 if (mesg.event & PM_EVENT_SUSPEND &&
2450 hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
2451 dev_printk(KERN_ERR, &pdev->dev,
2452 "BIOS update required for suspend/resume\n");
2453 return -EIO;
2454 }
2455
2456 if (mesg.event & PM_EVENT_SLEEP) {
2457 /* AHCI spec rev1.1 section 8.3.3:
2458 * Software must disable interrupts prior to requesting a
2459 * transition of the HBA to D3 state.
2460 */
2461 ctl = readl(mmio + HOST_CTL);
2462 ctl &= ~HOST_IRQ_EN;
2463 writel(ctl, mmio + HOST_CTL);
2464 readl(mmio + HOST_CTL); /* flush */
2465 }
2466
2467 return ata_pci_device_suspend(pdev, mesg);
2468 }
2469
2470 static int ahci_pci_device_resume(struct pci_dev *pdev)
2471 {
2472 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2473 int rc;
2474
2475 rc = ata_pci_device_do_resume(pdev);
2476 if (rc)
2477 return rc;
2478
2479 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2480 rc = ahci_reset_controller(host);
2481 if (rc)
2482 return rc;
2483
2484 ahci_init_controller(host);
2485 }
2486
2487 ata_host_resume(host);
2488
2489 return 0;
2490 }
2491 #endif
2492
2493 static int ahci_port_start(struct ata_port *ap)
2494 {
2495 struct device *dev = ap->host->dev;
2496 struct ahci_port_priv *pp;
2497 void *mem;
2498 dma_addr_t mem_dma;
2499
2500 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2501 if (!pp)
2502 return -ENOMEM;
2503
2504 mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
2505 GFP_KERNEL);
2506 if (!mem)
2507 return -ENOMEM;
2508 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
2509
2510 /*
2511 * First item in chunk of DMA memory: 32-slot command table,
2512 * 32 bytes each in size
2513 */
2514 pp->cmd_slot = mem;
2515 pp->cmd_slot_dma = mem_dma;
2516
2517 mem += AHCI_CMD_SLOT_SZ;
2518 mem_dma += AHCI_CMD_SLOT_SZ;
2519
2520 /*
2521 * Second item: Received-FIS area
2522 */
2523 pp->rx_fis = mem;
2524 pp->rx_fis_dma = mem_dma;
2525
2526 mem += AHCI_RX_FIS_SZ;
2527 mem_dma += AHCI_RX_FIS_SZ;
2528
2529 /*
2530 * Third item: data area for storing a single command
2531 * and its scatter-gather table
2532 */
2533 pp->cmd_tbl = mem;
2534 pp->cmd_tbl_dma = mem_dma;
2535
2536 /*
2537 * Save off initial list of interrupts to be enabled.
2538 * This could be changed later
2539 */
2540 pp->intr_mask = DEF_PORT_IRQ;
2541
2542 ap->private_data = pp;
2543
2544 /* engage engines, captain */
2545 return ahci_port_resume(ap);
2546 }
2547
2548 static void ahci_port_stop(struct ata_port *ap)
2549 {
2550 const char *emsg = NULL;
2551 int rc;
2552
2553 /* de-initialize port */
2554 rc = ahci_deinit_port(ap, &emsg);
2555 if (rc)
2556 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
2557 }
2558
2559 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
2560 {
2561 int rc;
2562
2563 if (using_dac &&
2564 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
2565 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2566 if (rc) {
2567 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2568 if (rc) {
2569 dev_printk(KERN_ERR, &pdev->dev,
2570 "64-bit DMA enable failed\n");
2571 return rc;
2572 }
2573 }
2574 } else {
2575 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2576 if (rc) {
2577 dev_printk(KERN_ERR, &pdev->dev,
2578 "32-bit DMA enable failed\n");
2579 return rc;
2580 }
2581 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2582 if (rc) {
2583 dev_printk(KERN_ERR, &pdev->dev,
2584 "32-bit consistent DMA enable failed\n");
2585 return rc;
2586 }
2587 }
2588 return 0;
2589 }
2590
2591 static void ahci_print_info(struct ata_host *host)
2592 {
2593 struct ahci_host_priv *hpriv = host->private_data;
2594 struct pci_dev *pdev = to_pci_dev(host->dev);
2595 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2596 u32 vers, cap, cap2, impl, speed;
2597 const char *speed_s;
2598 u16 cc;
2599 const char *scc_s;
2600
2601 vers = readl(mmio + HOST_VERSION);
2602 cap = hpriv->cap;
2603 cap2 = hpriv->cap2;
2604 impl = hpriv->port_map;
2605
2606 speed = (cap >> 20) & 0xf;
2607 if (speed == 1)
2608 speed_s = "1.5";
2609 else if (speed == 2)
2610 speed_s = "3";
2611 else if (speed == 3)
2612 speed_s = "6";
2613 else
2614 speed_s = "?";
2615
2616 pci_read_config_word(pdev, 0x0a, &cc);
2617 if (cc == PCI_CLASS_STORAGE_IDE)
2618 scc_s = "IDE";
2619 else if (cc == PCI_CLASS_STORAGE_SATA)
2620 scc_s = "SATA";
2621 else if (cc == PCI_CLASS_STORAGE_RAID)
2622 scc_s = "RAID";
2623 else
2624 scc_s = "unknown";
2625
2626 dev_printk(KERN_INFO, &pdev->dev,
2627 "AHCI %02x%02x.%02x%02x "
2628 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2629 ,
2630
2631 (vers >> 24) & 0xff,
2632 (vers >> 16) & 0xff,
2633 (vers >> 8) & 0xff,
2634 vers & 0xff,
2635
2636 ((cap >> 8) & 0x1f) + 1,
2637 (cap & 0x1f) + 1,
2638 speed_s,
2639 impl,
2640 scc_s);
2641
2642 dev_printk(KERN_INFO, &pdev->dev,
2643 "flags: "
2644 "%s%s%s%s%s%s%s"
2645 "%s%s%s%s%s%s%s"
2646 "%s%s%s%s%s%s\n"
2647 ,
2648
2649 cap & HOST_CAP_64 ? "64bit " : "",
2650 cap & HOST_CAP_NCQ ? "ncq " : "",
2651 cap & HOST_CAP_SNTF ? "sntf " : "",
2652 cap & HOST_CAP_MPS ? "ilck " : "",
2653 cap & HOST_CAP_SSS ? "stag " : "",
2654 cap & HOST_CAP_ALPM ? "pm " : "",
2655 cap & HOST_CAP_LED ? "led " : "",
2656 cap & HOST_CAP_CLO ? "clo " : "",
2657 cap & HOST_CAP_ONLY ? "only " : "",
2658 cap & HOST_CAP_PMP ? "pmp " : "",
2659 cap & HOST_CAP_FBS ? "fbs " : "",
2660 cap & HOST_CAP_PIO_MULTI ? "pio " : "",
2661 cap & HOST_CAP_SSC ? "slum " : "",
2662 cap & HOST_CAP_PART ? "part " : "",
2663 cap & HOST_CAP_CCC ? "ccc " : "",
2664 cap & HOST_CAP_EMS ? "ems " : "",
2665 cap & HOST_CAP_SXS ? "sxs " : "",
2666 cap2 & HOST_CAP2_APST ? "apst " : "",
2667 cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
2668 cap2 & HOST_CAP2_BOH ? "boh " : ""
2669 );
2670 }
2671
2672 /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
2673 * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't
2674 * support PMP and the 4726 either directly exports the device
2675 * attached to the first downstream port or acts as a hardware storage
2676 * controller and emulate a single ATA device (can be RAID 0/1 or some
2677 * other configuration).
2678 *
2679 * When there's no device attached to the first downstream port of the
2680 * 4726, "Config Disk" appears, which is a pseudo ATA device to
2681 * configure the 4726. However, ATA emulation of the device is very
2682 * lame. It doesn't send signature D2H Reg FIS after the initial
2683 * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
2684 *
2685 * The following function works around the problem by always using
2686 * hardreset on the port and not depending on receiving signature FIS
2687 * afterward. If signature FIS isn't received soon, ATA class is
2688 * assumed without follow-up softreset.
2689 */
2690 static void ahci_p5wdh_workaround(struct ata_host *host)
2691 {
2692 static struct dmi_system_id sysids[] = {
2693 {
2694 .ident = "P5W DH Deluxe",
2695 .matches = {
2696 DMI_MATCH(DMI_SYS_VENDOR,
2697 "ASUSTEK COMPUTER INC"),
2698 DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
2699 },
2700 },
2701 { }
2702 };
2703 struct pci_dev *pdev = to_pci_dev(host->dev);
2704
2705 if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
2706 dmi_check_system(sysids)) {
2707 struct ata_port *ap = host->ports[1];
2708
2709 dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
2710 "Deluxe on-board SIMG4726 workaround\n");
2711
2712 ap->ops = &ahci_p5wdh_ops;
2713 ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
2714 }
2715 }
2716
2717 /* only some SB600 ahci controllers can do 64bit DMA */
2718 static bool ahci_sb600_enable_64bit(struct pci_dev *pdev)
2719 {
2720 static const struct dmi_system_id sysids[] = {
2721 /*
2722 * The oldest version known to be broken is 0901 and
2723 * working is 1501 which was released on 2007-10-26.
2724 * Enable 64bit DMA on 1501 and anything newer.
2725 *
2726 * Please read bko#9412 for more info.
2727 */
2728 {
2729 .ident = "ASUS M2A-VM",
2730 .matches = {
2731 DMI_MATCH(DMI_BOARD_VENDOR,
2732 "ASUSTeK Computer INC."),
2733 DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"),
2734 },
2735 .driver_data = "20071026", /* yyyymmdd */
2736 },
2737 /*
2738 * All BIOS versions for the MSI K9A2 Platinum (MS-7376)
2739 * support 64bit DMA.
2740 *
2741 * BIOS versions earlier than 1.5 had the Manufacturer DMI
2742 * fields as "MICRO-STAR INTERANTIONAL CO.,LTD".
2743 * This spelling mistake was fixed in BIOS version 1.5, so
2744 * 1.5 and later have the Manufacturer as
2745 * "MICRO-STAR INTERNATIONAL CO.,LTD".
2746 * So try to match on DMI_BOARD_VENDOR of "MICRO-STAR INTER".
2747 *
2748 * BIOS versions earlier than 1.9 had a Board Product Name
2749 * DMI field of "MS-7376". This was changed to be
2750 * "K9A2 Platinum (MS-7376)" in version 1.9, but we can still
2751 * match on DMI_BOARD_NAME of "MS-7376".
2752 */
2753 {
2754 .ident = "MSI K9A2 Platinum",
2755 .matches = {
2756 DMI_MATCH(DMI_BOARD_VENDOR,
2757 "MICRO-STAR INTER"),
2758 DMI_MATCH(DMI_BOARD_NAME, "MS-7376"),
2759 },
2760 },
2761 { }
2762 };
2763 const struct dmi_system_id *match;
2764 int year, month, date;
2765 char buf[9];
2766
2767 match = dmi_first_match(sysids);
2768 if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) ||
2769 !match)
2770 return false;
2771
2772 if (!match->driver_data)
2773 goto enable_64bit;
2774
2775 dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
2776 snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
2777
2778 if (strcmp(buf, match->driver_data) >= 0)
2779 goto enable_64bit;
2780 else {
2781 dev_printk(KERN_WARNING, &pdev->dev, "%s: BIOS too old, "
2782 "forcing 32bit DMA, update BIOS\n", match->ident);
2783 return false;
2784 }
2785
2786 enable_64bit:
2787 dev_printk(KERN_WARNING, &pdev->dev, "%s: enabling 64bit DMA\n",
2788 match->ident);
2789 return true;
2790 }
2791
2792 static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
2793 {
2794 static const struct dmi_system_id broken_systems[] = {
2795 {
2796 .ident = "HP Compaq nx6310",
2797 .matches = {
2798 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2799 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6310"),
2800 },
2801 /* PCI slot number of the controller */
2802 .driver_data = (void *)0x1FUL,
2803 },
2804 {
2805 .ident = "HP Compaq 6720s",
2806 .matches = {
2807 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2808 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6720s"),
2809 },
2810 /* PCI slot number of the controller */
2811 .driver_data = (void *)0x1FUL,
2812 },
2813
2814 { } /* terminate list */
2815 };
2816 const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
2817
2818 if (dmi) {
2819 unsigned long slot = (unsigned long)dmi->driver_data;
2820 /* apply the quirk only to on-board controllers */
2821 return slot == PCI_SLOT(pdev->devfn);
2822 }
2823
2824 return false;
2825 }
2826
2827 static bool ahci_broken_suspend(struct pci_dev *pdev)
2828 {
2829 static const struct dmi_system_id sysids[] = {
2830 /*
2831 * On HP dv[4-6] and HDX18 with earlier BIOSen, link
2832 * to the harddisk doesn't become online after
2833 * resuming from STR. Warn and fail suspend.
2834 */
2835 {
2836 .ident = "dv4",
2837 .matches = {
2838 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2839 DMI_MATCH(DMI_PRODUCT_NAME,
2840 "HP Pavilion dv4 Notebook PC"),
2841 },
2842 .driver_data = "F.30", /* cutoff BIOS version */
2843 },
2844 {
2845 .ident = "dv5",
2846 .matches = {
2847 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2848 DMI_MATCH(DMI_PRODUCT_NAME,
2849 "HP Pavilion dv5 Notebook PC"),
2850 },
2851 .driver_data = "F.16", /* cutoff BIOS version */
2852 },
2853 {
2854 .ident = "dv6",
2855 .matches = {
2856 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2857 DMI_MATCH(DMI_PRODUCT_NAME,
2858 "HP Pavilion dv6 Notebook PC"),
2859 },
2860 .driver_data = "F.21", /* cutoff BIOS version */
2861 },
2862 {
2863 .ident = "HDX18",
2864 .matches = {
2865 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2866 DMI_MATCH(DMI_PRODUCT_NAME,
2867 "HP HDX18 Notebook PC"),
2868 },
2869 .driver_data = "F.23", /* cutoff BIOS version */
2870 },
2871 { } /* terminate list */
2872 };
2873 const struct dmi_system_id *dmi = dmi_first_match(sysids);
2874 const char *ver;
2875
2876 if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
2877 return false;
2878
2879 ver = dmi_get_system_info(DMI_BIOS_VERSION);
2880
2881 return !ver || strcmp(ver, dmi->driver_data) < 0;
2882 }
2883
2884 static bool ahci_broken_online(struct pci_dev *pdev)
2885 {
2886 #define ENCODE_BUSDEVFN(bus, slot, func) \
2887 (void *)(unsigned long)(((bus) << 8) | PCI_DEVFN((slot), (func)))
2888 static const struct dmi_system_id sysids[] = {
2889 /*
2890 * There are several gigabyte boards which use
2891 * SIMG5723s configured as hardware RAID. Certain
2892 * 5723 firmware revisions shipped there keep the link
2893 * online but fail to answer properly to SRST or
2894 * IDENTIFY when no device is attached downstream
2895 * causing libata to retry quite a few times leading
2896 * to excessive detection delay.
2897 *
2898 * As these firmwares respond to the second reset try
2899 * with invalid device signature, considering unknown
2900 * sig as offline works around the problem acceptably.
2901 */
2902 {
2903 .ident = "EP45-DQ6",
2904 .matches = {
2905 DMI_MATCH(DMI_BOARD_VENDOR,
2906 "Gigabyte Technology Co., Ltd."),
2907 DMI_MATCH(DMI_BOARD_NAME, "EP45-DQ6"),
2908 },
2909 .driver_data = ENCODE_BUSDEVFN(0x0a, 0x00, 0),
2910 },
2911 {
2912 .ident = "EP45-DS5",
2913 .matches = {
2914 DMI_MATCH(DMI_BOARD_VENDOR,
2915 "Gigabyte Technology Co., Ltd."),
2916 DMI_MATCH(DMI_BOARD_NAME, "EP45-DS5"),
2917 },
2918 .driver_data = ENCODE_BUSDEVFN(0x03, 0x00, 0),
2919 },
2920 { } /* terminate list */
2921 };
2922 #undef ENCODE_BUSDEVFN
2923 const struct dmi_system_id *dmi = dmi_first_match(sysids);
2924 unsigned int val;
2925
2926 if (!dmi)
2927 return false;
2928
2929 val = (unsigned long)dmi->driver_data;
2930
2931 return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
2932 }
2933
2934 #ifdef CONFIG_ATA_ACPI
2935 static void ahci_gtf_filter_workaround(struct ata_host *host)
2936 {
2937 static const struct dmi_system_id sysids[] = {
2938 /*
2939 * Aspire 3810T issues a bunch of SATA enable commands
2940 * via _GTF including an invalid one and one which is
2941 * rejected by the device. Among the successful ones
2942 * is FPDMA non-zero offset enable which when enabled
2943 * only on the drive side leads to NCQ command
2944 * failures. Filter it out.
2945 */
2946 {
2947 .ident = "Aspire 3810T",
2948 .matches = {
2949 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
2950 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3810T"),
2951 },
2952 .driver_data = (void *)ATA_ACPI_FILTER_FPDMA_OFFSET,
2953 },
2954 { }
2955 };
2956 const struct dmi_system_id *dmi = dmi_first_match(sysids);
2957 unsigned int filter;
2958 int i;
2959
2960 if (!dmi)
2961 return;
2962
2963 filter = (unsigned long)dmi->driver_data;
2964 dev_printk(KERN_INFO, host->dev,
2965 "applying extra ACPI _GTF filter 0x%x for %s\n",
2966 filter, dmi->ident);
2967
2968 for (i = 0; i < host->n_ports; i++) {
2969 struct ata_port *ap = host->ports[i];
2970 struct ata_link *link;
2971 struct ata_device *dev;
2972
2973 ata_for_each_link(link, ap, EDGE)
2974 ata_for_each_dev(dev, link, ALL)
2975 dev->gtf_filter |= filter;
2976 }
2977 }
2978 #else
2979 static inline void ahci_gtf_filter_workaround(struct ata_host *host)
2980 {}
2981 #endif
2982
2983 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2984 {
2985 static int printed_version;
2986 unsigned int board_id = ent->driver_data;
2987 struct ata_port_info pi = ahci_port_info[board_id];
2988 const struct ata_port_info *ppi[] = { &pi, NULL };
2989 struct device *dev = &pdev->dev;
2990 struct ahci_host_priv *hpriv;
2991 struct ata_host *host;
2992 int n_ports, i, rc;
2993
2994 VPRINTK("ENTER\n");
2995
2996 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
2997
2998 if (!printed_version++)
2999 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
3000
3001 /* The AHCI driver can only drive the SATA ports, the PATA driver
3002 can drive them all so if both drivers are selected make sure
3003 AHCI stays out of the way */
3004 if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
3005 return -ENODEV;
3006
3007 /* Promise's PDC42819 is a SAS/SATA controller that has an AHCI mode.
3008 * At the moment, we can only use the AHCI mode. Let the users know
3009 * that for SAS drives they're out of luck.
3010 */
3011 if (pdev->vendor == PCI_VENDOR_ID_PROMISE)
3012 dev_printk(KERN_INFO, &pdev->dev, "PDC42819 "
3013 "can only drive SATA devices with this driver\n");
3014
3015 /* acquire resources */
3016 rc = pcim_enable_device(pdev);
3017 if (rc)
3018 return rc;
3019
3020 /* AHCI controllers often implement SFF compatible interface.
3021 * Grab all PCI BARs just in case.
3022 */
3023 rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
3024 if (rc == -EBUSY)
3025 pcim_pin_device(pdev);
3026 if (rc)
3027 return rc;
3028
3029 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
3030 (pdev->device == 0x2652 || pdev->device == 0x2653)) {
3031 u8 map;
3032
3033 /* ICH6s share the same PCI ID for both piix and ahci
3034 * modes. Enabling ahci mode while MAP indicates
3035 * combined mode is a bad idea. Yield to ata_piix.
3036 */
3037 pci_read_config_byte(pdev, ICH_MAP, &map);
3038 if (map & 0x3) {
3039 dev_printk(KERN_INFO, &pdev->dev, "controller is in "
3040 "combined mode, can't enable AHCI mode\n");
3041 return -ENODEV;
3042 }
3043 }
3044
3045 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
3046 if (!hpriv)
3047 return -ENOMEM;
3048 hpriv->flags |= (unsigned long)pi.private_data;
3049
3050 /* MCP65 revision A1 and A2 can't do MSI */
3051 if (board_id == board_ahci_mcp65 &&
3052 (pdev->revision == 0xa1 || pdev->revision == 0xa2))
3053 hpriv->flags |= AHCI_HFLAG_NO_MSI;
3054
3055 /* SB800 does NOT need the workaround to ignore SERR_INTERNAL */
3056 if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
3057 hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
3058
3059 /* only some SB600s can do 64bit DMA */
3060 if (ahci_sb600_enable_64bit(pdev))
3061 hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY;
3062
3063 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
3064 pci_intx(pdev, 1);
3065
3066 /* save initial config */
3067 ahci_save_initial_config(pdev, hpriv);
3068
3069 /* prepare host */
3070 if (hpriv->cap & HOST_CAP_NCQ)
3071 pi.flags |= ATA_FLAG_NCQ | ATA_FLAG_FPDMA_AA;
3072
3073 if (hpriv->cap & HOST_CAP_PMP)
3074 pi.flags |= ATA_FLAG_PMP;
3075
3076 if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
3077 u8 messages;
3078 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
3079 u32 em_loc = readl(mmio + HOST_EM_LOC);
3080 u32 em_ctl = readl(mmio + HOST_EM_CTL);
3081
3082 messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
3083
3084 /* we only support LED message type right now */
3085 if ((messages & 0x01) && (ahci_em_messages == 1)) {
3086 /* store em_loc */
3087 hpriv->em_loc = ((em_loc >> 16) * 4);
3088 pi.flags |= ATA_FLAG_EM;
3089 if (!(em_ctl & EM_CTL_ALHD))
3090 pi.flags |= ATA_FLAG_SW_ACTIVITY;
3091 }
3092 }
3093
3094 if (ahci_broken_system_poweroff(pdev)) {
3095 pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
3096 dev_info(&pdev->dev,
3097 "quirky BIOS, skipping spindown on poweroff\n");
3098 }
3099
3100 if (ahci_broken_suspend(pdev)) {
3101 hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
3102 dev_printk(KERN_WARNING, &pdev->dev,
3103 "BIOS update required for suspend/resume\n");
3104 }
3105
3106 if (ahci_broken_online(pdev)) {
3107 hpriv->flags |= AHCI_HFLAG_SRST_TOUT_IS_OFFLINE;
3108 dev_info(&pdev->dev,
3109 "online status unreliable, applying workaround\n");
3110 }
3111
3112 /* CAP.NP sometimes indicate the index of the last enabled
3113 * port, at other times, that of the last possible port, so
3114 * determining the maximum port number requires looking at
3115 * both CAP.NP and port_map.
3116 */
3117 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
3118
3119 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3120 if (!host)
3121 return -ENOMEM;
3122 host->iomap = pcim_iomap_table(pdev);
3123 host->private_data = hpriv;
3124
3125 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
3126 host->flags |= ATA_HOST_PARALLEL_SCAN;
3127 else
3128 printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
3129
3130 if (pi.flags & ATA_FLAG_EM)
3131 ahci_reset_em(host);
3132
3133 for (i = 0; i < host->n_ports; i++) {
3134 struct ata_port *ap = host->ports[i];
3135
3136 ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
3137 ata_port_pbar_desc(ap, AHCI_PCI_BAR,
3138 0x100 + ap->port_no * 0x80, "port");
3139
3140 /* set initial link pm policy */
3141 ap->pm_policy = NOT_AVAILABLE;
3142
3143 /* set enclosure management message type */
3144 if (ap->flags & ATA_FLAG_EM)
3145 ap->em_message_type = ahci_em_messages;
3146
3147
3148 /* disabled/not-implemented port */
3149 if (!(hpriv->port_map & (1 << i)))
3150 ap->ops = &ata_dummy_port_ops;
3151 }
3152
3153 /* apply workaround for ASUS P5W DH Deluxe mainboard */
3154 ahci_p5wdh_workaround(host);
3155
3156 /* apply gtf filter quirk */
3157 ahci_gtf_filter_workaround(host);
3158
3159 /* initialize adapter */
3160 rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
3161 if (rc)
3162 return rc;
3163
3164 rc = ahci_reset_controller(host);
3165 if (rc)
3166 return rc;
3167
3168 ahci_init_controller(host);
3169 ahci_print_info(host);
3170
3171 pci_set_master(pdev);
3172 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
3173 &ahci_sht);
3174 }
3175
3176 static int __init ahci_init(void)
3177 {
3178 return pci_register_driver(&ahci_pci_driver);
3179 }
3180
3181 static void __exit ahci_exit(void)
3182 {
3183 pci_unregister_driver(&ahci_pci_driver);
3184 }
3185
3186
3187 MODULE_AUTHOR("Jeff Garzik");
3188 MODULE_DESCRIPTION("AHCI SATA low-level driver");
3189 MODULE_LICENSE("GPL");
3190 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
3191 MODULE_VERSION(DRV_VERSION);
3192
3193 module_init(ahci_init);
3194 module_exit(ahci_exit);
This page took 0.102132 seconds and 6 git commands to generate.