ahci: Factor out PCI specifics from ahci_print_info()
[deliverable/linux.git] / drivers / ata / ahci.c
1 /*
2 * ahci.c - AHCI SATA support
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004-2005 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32 *
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/device.h>
44 #include <linux/dmi.h>
45 #include <linux/gfp.h>
46 #include <scsi/scsi_host.h>
47 #include <scsi/scsi_cmnd.h>
48 #include <linux/libata.h>
49
50 #define DRV_NAME "ahci"
51 #define DRV_VERSION "3.0"
52
53 /* Enclosure Management Control */
54 #define EM_CTRL_MSG_TYPE 0x000f0000
55
56 /* Enclosure Management LED Message Type */
57 #define EM_MSG_LED_HBA_PORT 0x0000000f
58 #define EM_MSG_LED_PMP_SLOT 0x0000ff00
59 #define EM_MSG_LED_VALUE 0xffff0000
60 #define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
61 #define EM_MSG_LED_VALUE_OFF 0xfff80000
62 #define EM_MSG_LED_VALUE_ON 0x00010000
63
64 static int ahci_skip_host_reset;
65 static int ahci_ignore_sss;
66
67 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
68 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
69
70 module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
71 MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
72
73 static int ahci_enable_alpm(struct ata_port *ap,
74 enum link_pm policy);
75 static void ahci_disable_alpm(struct ata_port *ap);
76 static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
77 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
78 size_t size);
79 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
80 ssize_t size);
81
82 enum {
83 AHCI_PCI_BAR = 5,
84 AHCI_MAX_PORTS = 32,
85 AHCI_MAX_SG = 168, /* hardware max is 64K */
86 AHCI_DMA_BOUNDARY = 0xffffffff,
87 AHCI_MAX_CMDS = 32,
88 AHCI_CMD_SZ = 32,
89 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
90 AHCI_RX_FIS_SZ = 256,
91 AHCI_CMD_TBL_CDB = 0x40,
92 AHCI_CMD_TBL_HDR_SZ = 0x80,
93 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
94 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
95 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
96 AHCI_RX_FIS_SZ,
97 AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ +
98 AHCI_CMD_TBL_AR_SZ +
99 (AHCI_RX_FIS_SZ * 16),
100 AHCI_IRQ_ON_SG = (1 << 31),
101 AHCI_CMD_ATAPI = (1 << 5),
102 AHCI_CMD_WRITE = (1 << 6),
103 AHCI_CMD_PREFETCH = (1 << 7),
104 AHCI_CMD_RESET = (1 << 8),
105 AHCI_CMD_CLR_BUSY = (1 << 10),
106
107 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
108 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
109 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
110
111 board_ahci = 0,
112 board_ahci_vt8251 = 1,
113 board_ahci_ign_iferr = 2,
114 board_ahci_sb600 = 3,
115 board_ahci_mv = 4,
116 board_ahci_sb700 = 5, /* for SB700 and SB800 */
117 board_ahci_mcp65 = 6,
118 board_ahci_nopmp = 7,
119 board_ahci_yesncq = 8,
120 board_ahci_nosntf = 9,
121
122 /* global controller registers */
123 HOST_CAP = 0x00, /* host capabilities */
124 HOST_CTL = 0x04, /* global host control */
125 HOST_IRQ_STAT = 0x08, /* interrupt status */
126 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
127 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
128 HOST_EM_LOC = 0x1c, /* Enclosure Management location */
129 HOST_EM_CTL = 0x20, /* Enclosure Management Control */
130 HOST_CAP2 = 0x24, /* host capabilities, extended */
131
132 /* HOST_CTL bits */
133 HOST_RESET = (1 << 0), /* reset controller; self-clear */
134 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
135 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
136
137 /* HOST_CAP bits */
138 HOST_CAP_SXS = (1 << 5), /* Supports External SATA */
139 HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
140 HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */
141 HOST_CAP_PART = (1 << 13), /* Partial state capable */
142 HOST_CAP_SSC = (1 << 14), /* Slumber state capable */
143 HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */
144 HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */
145 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
146 HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */
147 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
148 HOST_CAP_LED = (1 << 25), /* Supports activity LED */
149 HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
150 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
151 HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */
152 HOST_CAP_SNTF = (1 << 29), /* SNotification register */
153 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
154 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
155
156 /* HOST_CAP2 bits */
157 HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */
158 HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */
159 HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */
160
161 /* registers for each SATA port */
162 PORT_LST_ADDR = 0x00, /* command list DMA addr */
163 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
164 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
165 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
166 PORT_IRQ_STAT = 0x10, /* interrupt status */
167 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
168 PORT_CMD = 0x18, /* port command */
169 PORT_TFDATA = 0x20, /* taskfile data */
170 PORT_SIG = 0x24, /* device TF signature */
171 PORT_CMD_ISSUE = 0x38, /* command issue */
172 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
173 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
174 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
175 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
176 PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
177 PORT_FBS = 0x40, /* FIS-based Switching */
178
179 /* PORT_IRQ_{STAT,MASK} bits */
180 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
181 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
182 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
183 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
184 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
185 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
186 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
187 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
188
189 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
190 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
191 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
192 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
193 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
194 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
195 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
196 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
197 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
198
199 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
200 PORT_IRQ_IF_ERR |
201 PORT_IRQ_CONNECT |
202 PORT_IRQ_PHYRDY |
203 PORT_IRQ_UNK_FIS |
204 PORT_IRQ_BAD_PMP,
205 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
206 PORT_IRQ_TF_ERR |
207 PORT_IRQ_HBUS_DATA_ERR,
208 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
209 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
210 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
211
212 /* PORT_CMD bits */
213 PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
214 PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
215 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
216 PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */
217 PORT_CMD_PMP = (1 << 17), /* PMP attached */
218 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
219 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
220 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
221 PORT_CMD_CLO = (1 << 3), /* Command list override */
222 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
223 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
224 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
225
226 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
227 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
228 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
229 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
230
231 PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */
232 PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */
233 PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */
234 PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */
235 PORT_FBS_SDE = (1 << 2), /* FBS single device error */
236 PORT_FBS_DEC = (1 << 1), /* FBS device error clear */
237 PORT_FBS_EN = (1 << 0), /* Enable FBS */
238
239 /* hpriv->flags bits */
240 AHCI_HFLAG_NO_NCQ = (1 << 0),
241 AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
242 AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
243 AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
244 AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
245 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
246 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
247 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
248 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
249 AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
250 AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
251 AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
252 link offline */
253 AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
254
255 /* ap->flags bits */
256
257 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
258 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
259 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
260 ATA_FLAG_IPM,
261
262 ICH_MAP = 0x90, /* ICH MAP register */
263
264 /* em constants */
265 EM_MAX_SLOTS = 8,
266 EM_MAX_RETRY = 5,
267
268 /* em_ctl bits */
269 EM_CTL_RST = (1 << 9), /* Reset */
270 EM_CTL_TM = (1 << 8), /* Transmit Message */
271 EM_CTL_ALHD = (1 << 26), /* Activity LED */
272 };
273
274 struct ahci_cmd_hdr {
275 __le32 opts;
276 __le32 status;
277 __le32 tbl_addr;
278 __le32 tbl_addr_hi;
279 __le32 reserved[4];
280 };
281
282 struct ahci_sg {
283 __le32 addr;
284 __le32 addr_hi;
285 __le32 reserved;
286 __le32 flags_size;
287 };
288
289 struct ahci_em_priv {
290 enum sw_activity blink_policy;
291 struct timer_list timer;
292 unsigned long saved_activity;
293 unsigned long activity;
294 unsigned long led_state;
295 };
296
297 struct ahci_host_priv {
298 void __iomem * mmio; /* bus-independant mem map */
299 unsigned int flags; /* AHCI_HFLAG_* */
300 u32 cap; /* cap to use */
301 u32 cap2; /* cap2 to use */
302 u32 port_map; /* port map to use */
303 u32 saved_cap; /* saved initial cap */
304 u32 saved_cap2; /* saved initial cap2 */
305 u32 saved_port_map; /* saved initial port_map */
306 u32 em_loc; /* enclosure management location */
307 };
308
309 struct ahci_port_priv {
310 struct ata_link *active_link;
311 struct ahci_cmd_hdr *cmd_slot;
312 dma_addr_t cmd_slot_dma;
313 void *cmd_tbl;
314 dma_addr_t cmd_tbl_dma;
315 void *rx_fis;
316 dma_addr_t rx_fis_dma;
317 /* for NCQ spurious interrupt analysis */
318 unsigned int ncq_saw_d2h:1;
319 unsigned int ncq_saw_dmas:1;
320 unsigned int ncq_saw_sdb:1;
321 u32 intr_mask; /* interrupts to enable */
322 bool fbs_supported; /* set iff FBS is supported */
323 bool fbs_enabled; /* set iff FBS is enabled */
324 int fbs_last_dev; /* save FBS.DEV of last FIS */
325 /* enclosure management info per PM slot */
326 struct ahci_em_priv em_priv[EM_MAX_SLOTS];
327 };
328
329 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
330 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
331 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
332 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
333 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
334 static int ahci_port_start(struct ata_port *ap);
335 static void ahci_port_stop(struct ata_port *ap);
336 static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
337 static void ahci_qc_prep(struct ata_queued_cmd *qc);
338 static void ahci_freeze(struct ata_port *ap);
339 static void ahci_thaw(struct ata_port *ap);
340 static void ahci_enable_fbs(struct ata_port *ap);
341 static void ahci_disable_fbs(struct ata_port *ap);
342 static void ahci_pmp_attach(struct ata_port *ap);
343 static void ahci_pmp_detach(struct ata_port *ap);
344 static int ahci_softreset(struct ata_link *link, unsigned int *class,
345 unsigned long deadline);
346 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
347 unsigned long deadline);
348 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
349 unsigned long deadline);
350 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
351 unsigned long deadline);
352 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
353 unsigned long deadline);
354 static void ahci_postreset(struct ata_link *link, unsigned int *class);
355 static void ahci_error_handler(struct ata_port *ap);
356 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
357 static int ahci_port_resume(struct ata_port *ap);
358 static void ahci_dev_config(struct ata_device *dev);
359 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
360 u32 opts);
361 #ifdef CONFIG_PM
362 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
363 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
364 static int ahci_pci_device_resume(struct pci_dev *pdev);
365 #endif
366 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
367 static ssize_t ahci_activity_store(struct ata_device *dev,
368 enum sw_activity val);
369 static void ahci_init_sw_activity(struct ata_link *link);
370
371 static ssize_t ahci_show_host_caps(struct device *dev,
372 struct device_attribute *attr, char *buf);
373 static ssize_t ahci_show_host_cap2(struct device *dev,
374 struct device_attribute *attr, char *buf);
375 static ssize_t ahci_show_host_version(struct device *dev,
376 struct device_attribute *attr, char *buf);
377 static ssize_t ahci_show_port_cmd(struct device *dev,
378 struct device_attribute *attr, char *buf);
379
380 static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
381 static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
382 static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
383 static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
384
385 static struct device_attribute *ahci_shost_attrs[] = {
386 &dev_attr_link_power_management_policy,
387 &dev_attr_em_message_type,
388 &dev_attr_em_message,
389 &dev_attr_ahci_host_caps,
390 &dev_attr_ahci_host_cap2,
391 &dev_attr_ahci_host_version,
392 &dev_attr_ahci_port_cmd,
393 NULL
394 };
395
396 static struct device_attribute *ahci_sdev_attrs[] = {
397 &dev_attr_sw_activity,
398 &dev_attr_unload_heads,
399 NULL
400 };
401
402 static struct scsi_host_template ahci_sht = {
403 ATA_NCQ_SHT(DRV_NAME),
404 .can_queue = AHCI_MAX_CMDS - 1,
405 .sg_tablesize = AHCI_MAX_SG,
406 .dma_boundary = AHCI_DMA_BOUNDARY,
407 .shost_attrs = ahci_shost_attrs,
408 .sdev_attrs = ahci_sdev_attrs,
409 };
410
411 static struct ata_port_operations ahci_ops = {
412 .inherits = &sata_pmp_port_ops,
413
414 .qc_defer = ahci_pmp_qc_defer,
415 .qc_prep = ahci_qc_prep,
416 .qc_issue = ahci_qc_issue,
417 .qc_fill_rtf = ahci_qc_fill_rtf,
418
419 .freeze = ahci_freeze,
420 .thaw = ahci_thaw,
421 .softreset = ahci_softreset,
422 .hardreset = ahci_hardreset,
423 .postreset = ahci_postreset,
424 .pmp_softreset = ahci_softreset,
425 .error_handler = ahci_error_handler,
426 .post_internal_cmd = ahci_post_internal_cmd,
427 .dev_config = ahci_dev_config,
428
429 .scr_read = ahci_scr_read,
430 .scr_write = ahci_scr_write,
431 .pmp_attach = ahci_pmp_attach,
432 .pmp_detach = ahci_pmp_detach,
433
434 .enable_pm = ahci_enable_alpm,
435 .disable_pm = ahci_disable_alpm,
436 .em_show = ahci_led_show,
437 .em_store = ahci_led_store,
438 .sw_activity_show = ahci_activity_show,
439 .sw_activity_store = ahci_activity_store,
440 #ifdef CONFIG_PM
441 .port_suspend = ahci_port_suspend,
442 .port_resume = ahci_port_resume,
443 #endif
444 .port_start = ahci_port_start,
445 .port_stop = ahci_port_stop,
446 };
447
448 static struct ata_port_operations ahci_vt8251_ops = {
449 .inherits = &ahci_ops,
450 .hardreset = ahci_vt8251_hardreset,
451 };
452
453 static struct ata_port_operations ahci_p5wdh_ops = {
454 .inherits = &ahci_ops,
455 .hardreset = ahci_p5wdh_hardreset,
456 };
457
458 static struct ata_port_operations ahci_sb600_ops = {
459 .inherits = &ahci_ops,
460 .softreset = ahci_sb600_softreset,
461 .pmp_softreset = ahci_sb600_softreset,
462 };
463
464 #define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
465
466 static const struct ata_port_info ahci_port_info[] = {
467 [board_ahci] =
468 {
469 .flags = AHCI_FLAG_COMMON,
470 .pio_mask = ATA_PIO4,
471 .udma_mask = ATA_UDMA6,
472 .port_ops = &ahci_ops,
473 },
474 [board_ahci_vt8251] =
475 {
476 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
477 .flags = AHCI_FLAG_COMMON,
478 .pio_mask = ATA_PIO4,
479 .udma_mask = ATA_UDMA6,
480 .port_ops = &ahci_vt8251_ops,
481 },
482 [board_ahci_ign_iferr] =
483 {
484 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
485 .flags = AHCI_FLAG_COMMON,
486 .pio_mask = ATA_PIO4,
487 .udma_mask = ATA_UDMA6,
488 .port_ops = &ahci_ops,
489 },
490 [board_ahci_sb600] =
491 {
492 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
493 AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 |
494 AHCI_HFLAG_32BIT_ONLY),
495 .flags = AHCI_FLAG_COMMON,
496 .pio_mask = ATA_PIO4,
497 .udma_mask = ATA_UDMA6,
498 .port_ops = &ahci_sb600_ops,
499 },
500 [board_ahci_mv] =
501 {
502 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
503 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
504 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
505 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
506 .pio_mask = ATA_PIO4,
507 .udma_mask = ATA_UDMA6,
508 .port_ops = &ahci_ops,
509 },
510 [board_ahci_sb700] = /* for SB700 and SB800 */
511 {
512 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
513 .flags = AHCI_FLAG_COMMON,
514 .pio_mask = ATA_PIO4,
515 .udma_mask = ATA_UDMA6,
516 .port_ops = &ahci_sb600_ops,
517 },
518 [board_ahci_mcp65] =
519 {
520 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
521 .flags = AHCI_FLAG_COMMON,
522 .pio_mask = ATA_PIO4,
523 .udma_mask = ATA_UDMA6,
524 .port_ops = &ahci_ops,
525 },
526 [board_ahci_nopmp] =
527 {
528 AHCI_HFLAGS (AHCI_HFLAG_NO_PMP),
529 .flags = AHCI_FLAG_COMMON,
530 .pio_mask = ATA_PIO4,
531 .udma_mask = ATA_UDMA6,
532 .port_ops = &ahci_ops,
533 },
534 [board_ahci_yesncq] =
535 {
536 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
537 .flags = AHCI_FLAG_COMMON,
538 .pio_mask = ATA_PIO4,
539 .udma_mask = ATA_UDMA6,
540 .port_ops = &ahci_ops,
541 },
542 [board_ahci_nosntf] =
543 {
544 AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
545 .flags = AHCI_FLAG_COMMON,
546 .pio_mask = ATA_PIO4,
547 .udma_mask = ATA_UDMA6,
548 .port_ops = &ahci_ops,
549 },
550 };
551
552 static const struct pci_device_id ahci_pci_tbl[] = {
553 /* Intel */
554 { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
555 { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
556 { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
557 { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
558 { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
559 { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
560 { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
561 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
562 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
563 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
564 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
565 { PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8 */
566 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
567 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
568 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
569 { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
570 { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
571 { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
572 { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
573 { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
574 { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
575 { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
576 { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
577 { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
578 { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
579 { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
580 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
581 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
582 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
583 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
584 { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */
585 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
586 { PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */
587 { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
588 { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
589 { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
590 { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */
591 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
592 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
593 { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
594 { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
595 { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
596 { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
597 { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
598 { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
599 { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
600
601 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
602 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
603 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
604
605 /* ATI */
606 { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
607 { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
608 { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
609 { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
610 { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
611 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
612 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
613
614 /* AMD */
615 { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
616 /* AMD is using RAID class only for ahci controllers */
617 { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
618 PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
619
620 /* VIA */
621 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
622 { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
623
624 /* NVIDIA */
625 { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */
626 { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */
627 { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */
628 { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */
629 { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */
630 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
631 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
632 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
633 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq }, /* MCP67 */
634 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq }, /* MCP67 */
635 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq }, /* MCP67 */
636 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq }, /* MCP67 */
637 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq }, /* MCP67 */
638 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq }, /* MCP67 */
639 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq }, /* MCP67 */
640 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq }, /* MCP67 */
641 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq }, /* MCP67 */
642 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */
643 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */
644 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */
645 { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_yesncq }, /* Linux ID */
646 { PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_yesncq }, /* Linux ID */
647 { PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_yesncq }, /* Linux ID */
648 { PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_yesncq }, /* Linux ID */
649 { PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_yesncq }, /* Linux ID */
650 { PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_yesncq }, /* Linux ID */
651 { PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_yesncq }, /* Linux ID */
652 { PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_yesncq }, /* Linux ID */
653 { PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_yesncq }, /* Linux ID */
654 { PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_yesncq }, /* Linux ID */
655 { PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_yesncq }, /* Linux ID */
656 { PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_yesncq }, /* Linux ID */
657 { PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_yesncq }, /* Linux ID */
658 { PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_yesncq }, /* Linux ID */
659 { PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_yesncq }, /* Linux ID */
660 { PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_yesncq }, /* Linux ID */
661 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */
662 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */
663 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */
664 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq }, /* MCP73 */
665 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq }, /* MCP73 */
666 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq }, /* MCP73 */
667 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq }, /* MCP73 */
668 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq }, /* MCP73 */
669 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq }, /* MCP73 */
670 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq }, /* MCP73 */
671 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq }, /* MCP73 */
672 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq }, /* MCP73 */
673 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
674 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
675 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
676 { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */
677 { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */
678 { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */
679 { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */
680 { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */
681 { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */
682 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
683 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
684 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
685 { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
686 { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
687 { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
688 { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
689 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
690 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
691 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
692 { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
693 { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
694 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
695 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
696 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
697 { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci }, /* MCP89 */
698 { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci }, /* MCP89 */
699 { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci }, /* MCP89 */
700 { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci }, /* MCP89 */
701 { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci }, /* MCP89 */
702 { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci }, /* MCP89 */
703 { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci }, /* MCP89 */
704 { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci }, /* MCP89 */
705 { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci }, /* MCP89 */
706 { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci }, /* MCP89 */
707 { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci }, /* MCP89 */
708 { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci }, /* MCP89 */
709
710 /* SiS */
711 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
712 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
713 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
714
715 /* Marvell */
716 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
717 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
718
719 /* Promise */
720 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
721
722 /* Generic, PCI class code for AHCI */
723 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
724 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
725
726 { } /* terminate list */
727 };
728
729
730 static struct pci_driver ahci_pci_driver = {
731 .name = DRV_NAME,
732 .id_table = ahci_pci_tbl,
733 .probe = ahci_init_one,
734 .remove = ata_pci_remove_one,
735 #ifdef CONFIG_PM
736 .suspend = ahci_pci_device_suspend,
737 .resume = ahci_pci_device_resume,
738 #endif
739 };
740
741 static int ahci_em_messages = 1;
742 module_param(ahci_em_messages, int, 0444);
743 /* add other LED protocol types when they become supported */
744 MODULE_PARM_DESC(ahci_em_messages,
745 "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
746
747 #if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
748 static int marvell_enable;
749 #else
750 static int marvell_enable = 1;
751 #endif
752 module_param(marvell_enable, int, 0644);
753 MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
754
755
756 static inline int ahci_nr_ports(u32 cap)
757 {
758 return (cap & 0x1f) + 1;
759 }
760
761 static inline void __iomem *__ahci_port_base(struct ata_host *host,
762 unsigned int port_no)
763 {
764 struct ahci_host_priv *hpriv = host->private_data;
765 void __iomem *mmio = hpriv->mmio;
766
767 return mmio + 0x100 + (port_no * 0x80);
768 }
769
770 static inline void __iomem *ahci_port_base(struct ata_port *ap)
771 {
772 return __ahci_port_base(ap->host, ap->port_no);
773 }
774
775 static void ahci_enable_ahci(void __iomem *mmio)
776 {
777 int i;
778 u32 tmp;
779
780 /* turn on AHCI_EN */
781 tmp = readl(mmio + HOST_CTL);
782 if (tmp & HOST_AHCI_EN)
783 return;
784
785 /* Some controllers need AHCI_EN to be written multiple times.
786 * Try a few times before giving up.
787 */
788 for (i = 0; i < 5; i++) {
789 tmp |= HOST_AHCI_EN;
790 writel(tmp, mmio + HOST_CTL);
791 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
792 if (tmp & HOST_AHCI_EN)
793 return;
794 msleep(10);
795 }
796
797 WARN_ON(1);
798 }
799
800 static ssize_t ahci_show_host_caps(struct device *dev,
801 struct device_attribute *attr, char *buf)
802 {
803 struct Scsi_Host *shost = class_to_shost(dev);
804 struct ata_port *ap = ata_shost_to_port(shost);
805 struct ahci_host_priv *hpriv = ap->host->private_data;
806
807 return sprintf(buf, "%x\n", hpriv->cap);
808 }
809
810 static ssize_t ahci_show_host_cap2(struct device *dev,
811 struct device_attribute *attr, char *buf)
812 {
813 struct Scsi_Host *shost = class_to_shost(dev);
814 struct ata_port *ap = ata_shost_to_port(shost);
815 struct ahci_host_priv *hpriv = ap->host->private_data;
816
817 return sprintf(buf, "%x\n", hpriv->cap2);
818 }
819
820 static ssize_t ahci_show_host_version(struct device *dev,
821 struct device_attribute *attr, char *buf)
822 {
823 struct Scsi_Host *shost = class_to_shost(dev);
824 struct ata_port *ap = ata_shost_to_port(shost);
825 struct ahci_host_priv *hpriv = ap->host->private_data;
826 void __iomem *mmio = hpriv->mmio;
827
828 return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
829 }
830
831 static ssize_t ahci_show_port_cmd(struct device *dev,
832 struct device_attribute *attr, char *buf)
833 {
834 struct Scsi_Host *shost = class_to_shost(dev);
835 struct ata_port *ap = ata_shost_to_port(shost);
836 void __iomem *port_mmio = ahci_port_base(ap);
837
838 return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
839 }
840
841 /**
842 * ahci_save_initial_config - Save and fixup initial config values
843 * @dev: target AHCI device
844 * @hpriv: host private area to store config values
845 * @force_port_map: force port map to a specified value
846 * @mask_port_map: mask out particular bits from port map
847 *
848 * Some registers containing configuration info might be setup by
849 * BIOS and might be cleared on reset. This function saves the
850 * initial values of those registers into @hpriv such that they
851 * can be restored after controller reset.
852 *
853 * If inconsistent, config values are fixed up by this function.
854 *
855 * LOCKING:
856 * None.
857 */
858 static void ahci_save_initial_config(struct device *dev,
859 struct ahci_host_priv *hpriv,
860 unsigned int force_port_map,
861 unsigned int mask_port_map)
862 {
863 void __iomem *mmio = hpriv->mmio;
864 u32 cap, cap2, vers, port_map;
865 int i;
866
867 /* make sure AHCI mode is enabled before accessing CAP */
868 ahci_enable_ahci(mmio);
869
870 /* Values prefixed with saved_ are written back to host after
871 * reset. Values without are used for driver operation.
872 */
873 hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
874 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
875
876 /* CAP2 register is only defined for AHCI 1.2 and later */
877 vers = readl(mmio + HOST_VERSION);
878 if ((vers >> 16) > 1 ||
879 ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
880 hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
881 else
882 hpriv->saved_cap2 = cap2 = 0;
883
884 /* some chips have errata preventing 64bit use */
885 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
886 dev_printk(KERN_INFO, dev,
887 "controller can't do 64bit DMA, forcing 32bit\n");
888 cap &= ~HOST_CAP_64;
889 }
890
891 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
892 dev_printk(KERN_INFO, dev,
893 "controller can't do NCQ, turning off CAP_NCQ\n");
894 cap &= ~HOST_CAP_NCQ;
895 }
896
897 if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
898 dev_printk(KERN_INFO, dev,
899 "controller can do NCQ, turning on CAP_NCQ\n");
900 cap |= HOST_CAP_NCQ;
901 }
902
903 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
904 dev_printk(KERN_INFO, dev,
905 "controller can't do PMP, turning off CAP_PMP\n");
906 cap &= ~HOST_CAP_PMP;
907 }
908
909 if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
910 dev_printk(KERN_INFO, dev,
911 "controller can't do SNTF, turning off CAP_SNTF\n");
912 cap &= ~HOST_CAP_SNTF;
913 }
914
915 if (force_port_map && port_map != force_port_map) {
916 dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n",
917 port_map, force_port_map);
918 port_map = force_port_map;
919 }
920
921 if (mask_port_map) {
922 dev_printk(KERN_ERR, dev, "masking port_map 0x%x -> 0x%x\n",
923 port_map,
924 port_map & mask_port_map);
925 port_map &= mask_port_map;
926 }
927
928 /* cross check port_map and cap.n_ports */
929 if (port_map) {
930 int map_ports = 0;
931
932 for (i = 0; i < AHCI_MAX_PORTS; i++)
933 if (port_map & (1 << i))
934 map_ports++;
935
936 /* If PI has more ports than n_ports, whine, clear
937 * port_map and let it be generated from n_ports.
938 */
939 if (map_ports > ahci_nr_ports(cap)) {
940 dev_printk(KERN_WARNING, dev,
941 "implemented port map (0x%x) contains more "
942 "ports than nr_ports (%u), using nr_ports\n",
943 port_map, ahci_nr_ports(cap));
944 port_map = 0;
945 }
946 }
947
948 /* fabricate port_map from cap.nr_ports */
949 if (!port_map) {
950 port_map = (1 << ahci_nr_ports(cap)) - 1;
951 dev_printk(KERN_WARNING, dev,
952 "forcing PORTS_IMPL to 0x%x\n", port_map);
953
954 /* write the fixed up value to the PI register */
955 hpriv->saved_port_map = port_map;
956 }
957
958 /* record values to use during operation */
959 hpriv->cap = cap;
960 hpriv->cap2 = cap2;
961 hpriv->port_map = port_map;
962 }
963
964 static void ahci_pci_save_initial_config(struct pci_dev *pdev,
965 struct ahci_host_priv *hpriv)
966 {
967 unsigned int force_port_map = 0;
968 unsigned int mask_port_map = 0;
969
970 if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
971 dev_info(&pdev->dev, "JMB361 has only one port\n");
972 force_port_map = 1;
973 }
974
975 /*
976 * Temporary Marvell 6145 hack: PATA port presence
977 * is asserted through the standard AHCI port
978 * presence register, as bit 4 (counting from 0)
979 */
980 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
981 if (pdev->device == 0x6121)
982 mask_port_map = 0x3;
983 else
984 mask_port_map = 0xf;
985 dev_info(&pdev->dev,
986 "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
987 }
988
989 ahci_save_initial_config(&pdev->dev, hpriv, force_port_map,
990 mask_port_map);
991 }
992
993 /**
994 * ahci_restore_initial_config - Restore initial config
995 * @host: target ATA host
996 *
997 * Restore initial config stored by ahci_save_initial_config().
998 *
999 * LOCKING:
1000 * None.
1001 */
1002 static void ahci_restore_initial_config(struct ata_host *host)
1003 {
1004 struct ahci_host_priv *hpriv = host->private_data;
1005 void __iomem *mmio = hpriv->mmio;
1006
1007 writel(hpriv->saved_cap, mmio + HOST_CAP);
1008 if (hpriv->saved_cap2)
1009 writel(hpriv->saved_cap2, mmio + HOST_CAP2);
1010 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
1011 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
1012 }
1013
1014 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
1015 {
1016 static const int offset[] = {
1017 [SCR_STATUS] = PORT_SCR_STAT,
1018 [SCR_CONTROL] = PORT_SCR_CTL,
1019 [SCR_ERROR] = PORT_SCR_ERR,
1020 [SCR_ACTIVE] = PORT_SCR_ACT,
1021 [SCR_NOTIFICATION] = PORT_SCR_NTF,
1022 };
1023 struct ahci_host_priv *hpriv = ap->host->private_data;
1024
1025 if (sc_reg < ARRAY_SIZE(offset) &&
1026 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
1027 return offset[sc_reg];
1028 return 0;
1029 }
1030
1031 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1032 {
1033 void __iomem *port_mmio = ahci_port_base(link->ap);
1034 int offset = ahci_scr_offset(link->ap, sc_reg);
1035
1036 if (offset) {
1037 *val = readl(port_mmio + offset);
1038 return 0;
1039 }
1040 return -EINVAL;
1041 }
1042
1043 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1044 {
1045 void __iomem *port_mmio = ahci_port_base(link->ap);
1046 int offset = ahci_scr_offset(link->ap, sc_reg);
1047
1048 if (offset) {
1049 writel(val, port_mmio + offset);
1050 return 0;
1051 }
1052 return -EINVAL;
1053 }
1054
1055 static void ahci_start_engine(struct ata_port *ap)
1056 {
1057 void __iomem *port_mmio = ahci_port_base(ap);
1058 u32 tmp;
1059
1060 /* start DMA */
1061 tmp = readl(port_mmio + PORT_CMD);
1062 tmp |= PORT_CMD_START;
1063 writel(tmp, port_mmio + PORT_CMD);
1064 readl(port_mmio + PORT_CMD); /* flush */
1065 }
1066
1067 static int ahci_stop_engine(struct ata_port *ap)
1068 {
1069 void __iomem *port_mmio = ahci_port_base(ap);
1070 u32 tmp;
1071
1072 tmp = readl(port_mmio + PORT_CMD);
1073
1074 /* check if the HBA is idle */
1075 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
1076 return 0;
1077
1078 /* setting HBA to idle */
1079 tmp &= ~PORT_CMD_START;
1080 writel(tmp, port_mmio + PORT_CMD);
1081
1082 /* wait for engine to stop. This could be as long as 500 msec */
1083 tmp = ata_wait_register(port_mmio + PORT_CMD,
1084 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
1085 if (tmp & PORT_CMD_LIST_ON)
1086 return -EIO;
1087
1088 return 0;
1089 }
1090
1091 static void ahci_start_fis_rx(struct ata_port *ap)
1092 {
1093 void __iomem *port_mmio = ahci_port_base(ap);
1094 struct ahci_host_priv *hpriv = ap->host->private_data;
1095 struct ahci_port_priv *pp = ap->private_data;
1096 u32 tmp;
1097
1098 /* set FIS registers */
1099 if (hpriv->cap & HOST_CAP_64)
1100 writel((pp->cmd_slot_dma >> 16) >> 16,
1101 port_mmio + PORT_LST_ADDR_HI);
1102 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
1103
1104 if (hpriv->cap & HOST_CAP_64)
1105 writel((pp->rx_fis_dma >> 16) >> 16,
1106 port_mmio + PORT_FIS_ADDR_HI);
1107 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
1108
1109 /* enable FIS reception */
1110 tmp = readl(port_mmio + PORT_CMD);
1111 tmp |= PORT_CMD_FIS_RX;
1112 writel(tmp, port_mmio + PORT_CMD);
1113
1114 /* flush */
1115 readl(port_mmio + PORT_CMD);
1116 }
1117
1118 static int ahci_stop_fis_rx(struct ata_port *ap)
1119 {
1120 void __iomem *port_mmio = ahci_port_base(ap);
1121 u32 tmp;
1122
1123 /* disable FIS reception */
1124 tmp = readl(port_mmio + PORT_CMD);
1125 tmp &= ~PORT_CMD_FIS_RX;
1126 writel(tmp, port_mmio + PORT_CMD);
1127
1128 /* wait for completion, spec says 500ms, give it 1000 */
1129 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
1130 PORT_CMD_FIS_ON, 10, 1000);
1131 if (tmp & PORT_CMD_FIS_ON)
1132 return -EBUSY;
1133
1134 return 0;
1135 }
1136
1137 static void ahci_power_up(struct ata_port *ap)
1138 {
1139 struct ahci_host_priv *hpriv = ap->host->private_data;
1140 void __iomem *port_mmio = ahci_port_base(ap);
1141 u32 cmd;
1142
1143 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1144
1145 /* spin up device */
1146 if (hpriv->cap & HOST_CAP_SSS) {
1147 cmd |= PORT_CMD_SPIN_UP;
1148 writel(cmd, port_mmio + PORT_CMD);
1149 }
1150
1151 /* wake up link */
1152 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
1153 }
1154
1155 static void ahci_disable_alpm(struct ata_port *ap)
1156 {
1157 struct ahci_host_priv *hpriv = ap->host->private_data;
1158 void __iomem *port_mmio = ahci_port_base(ap);
1159 u32 cmd;
1160 struct ahci_port_priv *pp = ap->private_data;
1161
1162 /* IPM bits should be disabled by libata-core */
1163 /* get the existing command bits */
1164 cmd = readl(port_mmio + PORT_CMD);
1165
1166 /* disable ALPM and ASP */
1167 cmd &= ~PORT_CMD_ASP;
1168 cmd &= ~PORT_CMD_ALPE;
1169
1170 /* force the interface back to active */
1171 cmd |= PORT_CMD_ICC_ACTIVE;
1172
1173 /* write out new cmd value */
1174 writel(cmd, port_mmio + PORT_CMD);
1175 cmd = readl(port_mmio + PORT_CMD);
1176
1177 /* wait 10ms to be sure we've come out of any low power state */
1178 msleep(10);
1179
1180 /* clear out any PhyRdy stuff from interrupt status */
1181 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
1182
1183 /* go ahead and clean out PhyRdy Change from Serror too */
1184 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
1185
1186 /*
1187 * Clear flag to indicate that we should ignore all PhyRdy
1188 * state changes
1189 */
1190 hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
1191
1192 /*
1193 * Enable interrupts on Phy Ready.
1194 */
1195 pp->intr_mask |= PORT_IRQ_PHYRDY;
1196 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1197
1198 /*
1199 * don't change the link pm policy - we can be called
1200 * just to turn of link pm temporarily
1201 */
1202 }
1203
1204 static int ahci_enable_alpm(struct ata_port *ap,
1205 enum link_pm policy)
1206 {
1207 struct ahci_host_priv *hpriv = ap->host->private_data;
1208 void __iomem *port_mmio = ahci_port_base(ap);
1209 u32 cmd;
1210 struct ahci_port_priv *pp = ap->private_data;
1211 u32 asp;
1212
1213 /* Make sure the host is capable of link power management */
1214 if (!(hpriv->cap & HOST_CAP_ALPM))
1215 return -EINVAL;
1216
1217 switch (policy) {
1218 case MAX_PERFORMANCE:
1219 case NOT_AVAILABLE:
1220 /*
1221 * if we came here with NOT_AVAILABLE,
1222 * it just means this is the first time we
1223 * have tried to enable - default to max performance,
1224 * and let the user go to lower power modes on request.
1225 */
1226 ahci_disable_alpm(ap);
1227 return 0;
1228 case MIN_POWER:
1229 /* configure HBA to enter SLUMBER */
1230 asp = PORT_CMD_ASP;
1231 break;
1232 case MEDIUM_POWER:
1233 /* configure HBA to enter PARTIAL */
1234 asp = 0;
1235 break;
1236 default:
1237 return -EINVAL;
1238 }
1239
1240 /*
1241 * Disable interrupts on Phy Ready. This keeps us from
1242 * getting woken up due to spurious phy ready interrupts
1243 * TBD - Hot plug should be done via polling now, is
1244 * that even supported?
1245 */
1246 pp->intr_mask &= ~PORT_IRQ_PHYRDY;
1247 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1248
1249 /*
1250 * Set a flag to indicate that we should ignore all PhyRdy
1251 * state changes since these can happen now whenever we
1252 * change link state
1253 */
1254 hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
1255
1256 /* get the existing command bits */
1257 cmd = readl(port_mmio + PORT_CMD);
1258
1259 /*
1260 * Set ASP based on Policy
1261 */
1262 cmd |= asp;
1263
1264 /*
1265 * Setting this bit will instruct the HBA to aggressively
1266 * enter a lower power link state when it's appropriate and
1267 * based on the value set above for ASP
1268 */
1269 cmd |= PORT_CMD_ALPE;
1270
1271 /* write out new cmd value */
1272 writel(cmd, port_mmio + PORT_CMD);
1273 cmd = readl(port_mmio + PORT_CMD);
1274
1275 /* IPM bits should be set by libata-core */
1276 return 0;
1277 }
1278
1279 #ifdef CONFIG_PM
1280 static void ahci_power_down(struct ata_port *ap)
1281 {
1282 struct ahci_host_priv *hpriv = ap->host->private_data;
1283 void __iomem *port_mmio = ahci_port_base(ap);
1284 u32 cmd, scontrol;
1285
1286 if (!(hpriv->cap & HOST_CAP_SSS))
1287 return;
1288
1289 /* put device into listen mode, first set PxSCTL.DET to 0 */
1290 scontrol = readl(port_mmio + PORT_SCR_CTL);
1291 scontrol &= ~0xf;
1292 writel(scontrol, port_mmio + PORT_SCR_CTL);
1293
1294 /* then set PxCMD.SUD to 0 */
1295 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1296 cmd &= ~PORT_CMD_SPIN_UP;
1297 writel(cmd, port_mmio + PORT_CMD);
1298 }
1299 #endif
1300
1301 static void ahci_start_port(struct ata_port *ap)
1302 {
1303 struct ahci_port_priv *pp = ap->private_data;
1304 struct ata_link *link;
1305 struct ahci_em_priv *emp;
1306 ssize_t rc;
1307 int i;
1308
1309 /* enable FIS reception */
1310 ahci_start_fis_rx(ap);
1311
1312 /* enable DMA */
1313 ahci_start_engine(ap);
1314
1315 /* turn on LEDs */
1316 if (ap->flags & ATA_FLAG_EM) {
1317 ata_for_each_link(link, ap, EDGE) {
1318 emp = &pp->em_priv[link->pmp];
1319
1320 /* EM Transmit bit maybe busy during init */
1321 for (i = 0; i < EM_MAX_RETRY; i++) {
1322 rc = ahci_transmit_led_message(ap,
1323 emp->led_state,
1324 4);
1325 if (rc == -EBUSY)
1326 msleep(1);
1327 else
1328 break;
1329 }
1330 }
1331 }
1332
1333 if (ap->flags & ATA_FLAG_SW_ACTIVITY)
1334 ata_for_each_link(link, ap, EDGE)
1335 ahci_init_sw_activity(link);
1336
1337 }
1338
1339 static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
1340 {
1341 int rc;
1342
1343 /* disable DMA */
1344 rc = ahci_stop_engine(ap);
1345 if (rc) {
1346 *emsg = "failed to stop engine";
1347 return rc;
1348 }
1349
1350 /* disable FIS reception */
1351 rc = ahci_stop_fis_rx(ap);
1352 if (rc) {
1353 *emsg = "failed stop FIS RX";
1354 return rc;
1355 }
1356
1357 return 0;
1358 }
1359
1360 static int ahci_reset_controller(struct ata_host *host)
1361 {
1362 struct ahci_host_priv *hpriv = host->private_data;
1363 void __iomem *mmio = hpriv->mmio;
1364 u32 tmp;
1365
1366 /* we must be in AHCI mode, before using anything
1367 * AHCI-specific, such as HOST_RESET.
1368 */
1369 ahci_enable_ahci(mmio);
1370
1371 /* global controller reset */
1372 if (!ahci_skip_host_reset) {
1373 tmp = readl(mmio + HOST_CTL);
1374 if ((tmp & HOST_RESET) == 0) {
1375 writel(tmp | HOST_RESET, mmio + HOST_CTL);
1376 readl(mmio + HOST_CTL); /* flush */
1377 }
1378
1379 /*
1380 * to perform host reset, OS should set HOST_RESET
1381 * and poll until this bit is read to be "0".
1382 * reset must complete within 1 second, or
1383 * the hardware should be considered fried.
1384 */
1385 tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
1386 HOST_RESET, 10, 1000);
1387
1388 if (tmp & HOST_RESET) {
1389 dev_printk(KERN_ERR, host->dev,
1390 "controller reset failed (0x%x)\n", tmp);
1391 return -EIO;
1392 }
1393
1394 /* turn on AHCI mode */
1395 ahci_enable_ahci(mmio);
1396
1397 /* Some registers might be cleared on reset. Restore
1398 * initial values.
1399 */
1400 ahci_restore_initial_config(host);
1401 } else
1402 dev_printk(KERN_INFO, host->dev,
1403 "skipping global host reset\n");
1404
1405 return 0;
1406 }
1407
1408 static int ahci_pci_reset_controller(struct ata_host *host)
1409 {
1410 struct pci_dev *pdev = to_pci_dev(host->dev);
1411
1412 ahci_reset_controller(host);
1413
1414 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
1415 struct ahci_host_priv *hpriv = host->private_data;
1416 u16 tmp16;
1417
1418 /* configure PCS */
1419 pci_read_config_word(pdev, 0x92, &tmp16);
1420 if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
1421 tmp16 |= hpriv->port_map;
1422 pci_write_config_word(pdev, 0x92, tmp16);
1423 }
1424 }
1425
1426 return 0;
1427 }
1428
1429 static void ahci_sw_activity(struct ata_link *link)
1430 {
1431 struct ata_port *ap = link->ap;
1432 struct ahci_port_priv *pp = ap->private_data;
1433 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1434
1435 if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
1436 return;
1437
1438 emp->activity++;
1439 if (!timer_pending(&emp->timer))
1440 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
1441 }
1442
1443 static void ahci_sw_activity_blink(unsigned long arg)
1444 {
1445 struct ata_link *link = (struct ata_link *)arg;
1446 struct ata_port *ap = link->ap;
1447 struct ahci_port_priv *pp = ap->private_data;
1448 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1449 unsigned long led_message = emp->led_state;
1450 u32 activity_led_state;
1451 unsigned long flags;
1452
1453 led_message &= EM_MSG_LED_VALUE;
1454 led_message |= ap->port_no | (link->pmp << 8);
1455
1456 /* check to see if we've had activity. If so,
1457 * toggle state of LED and reset timer. If not,
1458 * turn LED to desired idle state.
1459 */
1460 spin_lock_irqsave(ap->lock, flags);
1461 if (emp->saved_activity != emp->activity) {
1462 emp->saved_activity = emp->activity;
1463 /* get the current LED state */
1464 activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
1465
1466 if (activity_led_state)
1467 activity_led_state = 0;
1468 else
1469 activity_led_state = 1;
1470
1471 /* clear old state */
1472 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1473
1474 /* toggle state */
1475 led_message |= (activity_led_state << 16);
1476 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
1477 } else {
1478 /* switch to idle */
1479 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1480 if (emp->blink_policy == BLINK_OFF)
1481 led_message |= (1 << 16);
1482 }
1483 spin_unlock_irqrestore(ap->lock, flags);
1484 ahci_transmit_led_message(ap, led_message, 4);
1485 }
1486
1487 static void ahci_init_sw_activity(struct ata_link *link)
1488 {
1489 struct ata_port *ap = link->ap;
1490 struct ahci_port_priv *pp = ap->private_data;
1491 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1492
1493 /* init activity stats, setup timer */
1494 emp->saved_activity = emp->activity = 0;
1495 setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
1496
1497 /* check our blink policy and set flag for link if it's enabled */
1498 if (emp->blink_policy)
1499 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1500 }
1501
1502 static int ahci_reset_em(struct ata_host *host)
1503 {
1504 struct ahci_host_priv *hpriv = host->private_data;
1505 void __iomem *mmio = hpriv->mmio;
1506 u32 em_ctl;
1507
1508 em_ctl = readl(mmio + HOST_EM_CTL);
1509 if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
1510 return -EINVAL;
1511
1512 writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
1513 return 0;
1514 }
1515
1516 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1517 ssize_t size)
1518 {
1519 struct ahci_host_priv *hpriv = ap->host->private_data;
1520 struct ahci_port_priv *pp = ap->private_data;
1521 void __iomem *mmio = hpriv->mmio;
1522 u32 em_ctl;
1523 u32 message[] = {0, 0};
1524 unsigned long flags;
1525 int pmp;
1526 struct ahci_em_priv *emp;
1527
1528 /* get the slot number from the message */
1529 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1530 if (pmp < EM_MAX_SLOTS)
1531 emp = &pp->em_priv[pmp];
1532 else
1533 return -EINVAL;
1534
1535 spin_lock_irqsave(ap->lock, flags);
1536
1537 /*
1538 * if we are still busy transmitting a previous message,
1539 * do not allow
1540 */
1541 em_ctl = readl(mmio + HOST_EM_CTL);
1542 if (em_ctl & EM_CTL_TM) {
1543 spin_unlock_irqrestore(ap->lock, flags);
1544 return -EBUSY;
1545 }
1546
1547 /*
1548 * create message header - this is all zero except for
1549 * the message size, which is 4 bytes.
1550 */
1551 message[0] |= (4 << 8);
1552
1553 /* ignore 0:4 of byte zero, fill in port info yourself */
1554 message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
1555
1556 /* write message to EM_LOC */
1557 writel(message[0], mmio + hpriv->em_loc);
1558 writel(message[1], mmio + hpriv->em_loc+4);
1559
1560 /* save off new led state for port/slot */
1561 emp->led_state = state;
1562
1563 /*
1564 * tell hardware to transmit the message
1565 */
1566 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1567
1568 spin_unlock_irqrestore(ap->lock, flags);
1569 return size;
1570 }
1571
1572 static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1573 {
1574 struct ahci_port_priv *pp = ap->private_data;
1575 struct ata_link *link;
1576 struct ahci_em_priv *emp;
1577 int rc = 0;
1578
1579 ata_for_each_link(link, ap, EDGE) {
1580 emp = &pp->em_priv[link->pmp];
1581 rc += sprintf(buf, "%lx\n", emp->led_state);
1582 }
1583 return rc;
1584 }
1585
1586 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1587 size_t size)
1588 {
1589 int state;
1590 int pmp;
1591 struct ahci_port_priv *pp = ap->private_data;
1592 struct ahci_em_priv *emp;
1593
1594 state = simple_strtoul(buf, NULL, 0);
1595
1596 /* get the slot number from the message */
1597 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1598 if (pmp < EM_MAX_SLOTS)
1599 emp = &pp->em_priv[pmp];
1600 else
1601 return -EINVAL;
1602
1603 /* mask off the activity bits if we are in sw_activity
1604 * mode, user should turn off sw_activity before setting
1605 * activity led through em_message
1606 */
1607 if (emp->blink_policy)
1608 state &= ~EM_MSG_LED_VALUE_ACTIVITY;
1609
1610 return ahci_transmit_led_message(ap, state, size);
1611 }
1612
1613 static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1614 {
1615 struct ata_link *link = dev->link;
1616 struct ata_port *ap = link->ap;
1617 struct ahci_port_priv *pp = ap->private_data;
1618 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1619 u32 port_led_state = emp->led_state;
1620
1621 /* save the desired Activity LED behavior */
1622 if (val == OFF) {
1623 /* clear LFLAG */
1624 link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1625
1626 /* set the LED to OFF */
1627 port_led_state &= EM_MSG_LED_VALUE_OFF;
1628 port_led_state |= (ap->port_no | (link->pmp << 8));
1629 ahci_transmit_led_message(ap, port_led_state, 4);
1630 } else {
1631 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1632 if (val == BLINK_OFF) {
1633 /* set LED to ON for idle */
1634 port_led_state &= EM_MSG_LED_VALUE_OFF;
1635 port_led_state |= (ap->port_no | (link->pmp << 8));
1636 port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
1637 ahci_transmit_led_message(ap, port_led_state, 4);
1638 }
1639 }
1640 emp->blink_policy = val;
1641 return 0;
1642 }
1643
1644 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1645 {
1646 struct ata_link *link = dev->link;
1647 struct ata_port *ap = link->ap;
1648 struct ahci_port_priv *pp = ap->private_data;
1649 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1650
1651 /* display the saved value of activity behavior for this
1652 * disk.
1653 */
1654 return sprintf(buf, "%d\n", emp->blink_policy);
1655 }
1656
1657 static void ahci_port_init(struct device *dev, struct ata_port *ap,
1658 int port_no, void __iomem *mmio,
1659 void __iomem *port_mmio)
1660 {
1661 const char *emsg = NULL;
1662 int rc;
1663 u32 tmp;
1664
1665 /* make sure port is not active */
1666 rc = ahci_deinit_port(ap, &emsg);
1667 if (rc)
1668 dev_warn(dev, "%s (%d)\n", emsg, rc);
1669
1670 /* clear SError */
1671 tmp = readl(port_mmio + PORT_SCR_ERR);
1672 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1673 writel(tmp, port_mmio + PORT_SCR_ERR);
1674
1675 /* clear port IRQ */
1676 tmp = readl(port_mmio + PORT_IRQ_STAT);
1677 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1678 if (tmp)
1679 writel(tmp, port_mmio + PORT_IRQ_STAT);
1680
1681 writel(1 << port_no, mmio + HOST_IRQ_STAT);
1682 }
1683
1684 static void ahci_init_controller(struct ata_host *host)
1685 {
1686 struct ahci_host_priv *hpriv = host->private_data;
1687 void __iomem *mmio = hpriv->mmio;
1688 int i;
1689 void __iomem *port_mmio;
1690 u32 tmp;
1691
1692 for (i = 0; i < host->n_ports; i++) {
1693 struct ata_port *ap = host->ports[i];
1694
1695 port_mmio = ahci_port_base(ap);
1696 if (ata_port_is_dummy(ap))
1697 continue;
1698
1699 ahci_port_init(host->dev, ap, i, mmio, port_mmio);
1700 }
1701
1702 tmp = readl(mmio + HOST_CTL);
1703 VPRINTK("HOST_CTL 0x%x\n", tmp);
1704 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1705 tmp = readl(mmio + HOST_CTL);
1706 VPRINTK("HOST_CTL 0x%x\n", tmp);
1707 }
1708
1709 static void ahci_pci_init_controller(struct ata_host *host)
1710 {
1711 struct ahci_host_priv *hpriv = host->private_data;
1712 struct pci_dev *pdev = to_pci_dev(host->dev);
1713 void __iomem *port_mmio;
1714 u32 tmp;
1715 int mv;
1716
1717 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
1718 if (pdev->device == 0x6121)
1719 mv = 2;
1720 else
1721 mv = 4;
1722 port_mmio = __ahci_port_base(host, mv);
1723
1724 writel(0, port_mmio + PORT_IRQ_MASK);
1725
1726 /* clear port IRQ */
1727 tmp = readl(port_mmio + PORT_IRQ_STAT);
1728 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1729 if (tmp)
1730 writel(tmp, port_mmio + PORT_IRQ_STAT);
1731 }
1732
1733 ahci_init_controller(host);
1734 }
1735
1736 static void ahci_dev_config(struct ata_device *dev)
1737 {
1738 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1739
1740 if (hpriv->flags & AHCI_HFLAG_SECT255) {
1741 dev->max_sectors = 255;
1742 ata_dev_printk(dev, KERN_INFO,
1743 "SB600 AHCI: limiting to 255 sectors per cmd\n");
1744 }
1745 }
1746
1747 static unsigned int ahci_dev_classify(struct ata_port *ap)
1748 {
1749 void __iomem *port_mmio = ahci_port_base(ap);
1750 struct ata_taskfile tf;
1751 u32 tmp;
1752
1753 tmp = readl(port_mmio + PORT_SIG);
1754 tf.lbah = (tmp >> 24) & 0xff;
1755 tf.lbam = (tmp >> 16) & 0xff;
1756 tf.lbal = (tmp >> 8) & 0xff;
1757 tf.nsect = (tmp) & 0xff;
1758
1759 return ata_dev_classify(&tf);
1760 }
1761
1762 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1763 u32 opts)
1764 {
1765 dma_addr_t cmd_tbl_dma;
1766
1767 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1768
1769 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1770 pp->cmd_slot[tag].status = 0;
1771 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1772 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1773 }
1774
1775 static int ahci_kick_engine(struct ata_port *ap)
1776 {
1777 void __iomem *port_mmio = ahci_port_base(ap);
1778 struct ahci_host_priv *hpriv = ap->host->private_data;
1779 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1780 u32 tmp;
1781 int busy, rc;
1782
1783 /* stop engine */
1784 rc = ahci_stop_engine(ap);
1785 if (rc)
1786 goto out_restart;
1787
1788 /* need to do CLO?
1789 * always do CLO if PMP is attached (AHCI-1.3 9.2)
1790 */
1791 busy = status & (ATA_BUSY | ATA_DRQ);
1792 if (!busy && !sata_pmp_attached(ap)) {
1793 rc = 0;
1794 goto out_restart;
1795 }
1796
1797 if (!(hpriv->cap & HOST_CAP_CLO)) {
1798 rc = -EOPNOTSUPP;
1799 goto out_restart;
1800 }
1801
1802 /* perform CLO */
1803 tmp = readl(port_mmio + PORT_CMD);
1804 tmp |= PORT_CMD_CLO;
1805 writel(tmp, port_mmio + PORT_CMD);
1806
1807 rc = 0;
1808 tmp = ata_wait_register(port_mmio + PORT_CMD,
1809 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1810 if (tmp & PORT_CMD_CLO)
1811 rc = -EIO;
1812
1813 /* restart engine */
1814 out_restart:
1815 ahci_start_engine(ap);
1816 return rc;
1817 }
1818
1819 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1820 struct ata_taskfile *tf, int is_cmd, u16 flags,
1821 unsigned long timeout_msec)
1822 {
1823 const u32 cmd_fis_len = 5; /* five dwords */
1824 struct ahci_port_priv *pp = ap->private_data;
1825 void __iomem *port_mmio = ahci_port_base(ap);
1826 u8 *fis = pp->cmd_tbl;
1827 u32 tmp;
1828
1829 /* prep the command */
1830 ata_tf_to_fis(tf, pmp, is_cmd, fis);
1831 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1832
1833 /* issue & wait */
1834 writel(1, port_mmio + PORT_CMD_ISSUE);
1835
1836 if (timeout_msec) {
1837 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1838 1, timeout_msec);
1839 if (tmp & 0x1) {
1840 ahci_kick_engine(ap);
1841 return -EBUSY;
1842 }
1843 } else
1844 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1845
1846 return 0;
1847 }
1848
1849 static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1850 int pmp, unsigned long deadline,
1851 int (*check_ready)(struct ata_link *link))
1852 {
1853 struct ata_port *ap = link->ap;
1854 struct ahci_host_priv *hpriv = ap->host->private_data;
1855 const char *reason = NULL;
1856 unsigned long now, msecs;
1857 struct ata_taskfile tf;
1858 int rc;
1859
1860 DPRINTK("ENTER\n");
1861
1862 /* prepare for SRST (AHCI-1.1 10.4.1) */
1863 rc = ahci_kick_engine(ap);
1864 if (rc && rc != -EOPNOTSUPP)
1865 ata_link_printk(link, KERN_WARNING,
1866 "failed to reset engine (errno=%d)\n", rc);
1867
1868 ata_tf_init(link->device, &tf);
1869
1870 /* issue the first D2H Register FIS */
1871 msecs = 0;
1872 now = jiffies;
1873 if (time_after(now, deadline))
1874 msecs = jiffies_to_msecs(deadline - now);
1875
1876 tf.ctl |= ATA_SRST;
1877 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1878 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1879 rc = -EIO;
1880 reason = "1st FIS failed";
1881 goto fail;
1882 }
1883
1884 /* spec says at least 5us, but be generous and sleep for 1ms */
1885 msleep(1);
1886
1887 /* issue the second D2H Register FIS */
1888 tf.ctl &= ~ATA_SRST;
1889 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1890
1891 /* wait for link to become ready */
1892 rc = ata_wait_after_reset(link, deadline, check_ready);
1893 if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
1894 /*
1895 * Workaround for cases where link online status can't
1896 * be trusted. Treat device readiness timeout as link
1897 * offline.
1898 */
1899 ata_link_printk(link, KERN_INFO,
1900 "device not ready, treating as offline\n");
1901 *class = ATA_DEV_NONE;
1902 } else if (rc) {
1903 /* link occupied, -ENODEV too is an error */
1904 reason = "device not ready";
1905 goto fail;
1906 } else
1907 *class = ahci_dev_classify(ap);
1908
1909 DPRINTK("EXIT, class=%u\n", *class);
1910 return 0;
1911
1912 fail:
1913 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1914 return rc;
1915 }
1916
1917 static int ahci_check_ready(struct ata_link *link)
1918 {
1919 void __iomem *port_mmio = ahci_port_base(link->ap);
1920 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1921
1922 return ata_check_ready(status);
1923 }
1924
1925 static int ahci_softreset(struct ata_link *link, unsigned int *class,
1926 unsigned long deadline)
1927 {
1928 int pmp = sata_srst_pmp(link);
1929
1930 DPRINTK("ENTER\n");
1931
1932 return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1933 }
1934
1935 static int ahci_sb600_check_ready(struct ata_link *link)
1936 {
1937 void __iomem *port_mmio = ahci_port_base(link->ap);
1938 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1939 u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
1940
1941 /*
1942 * There is no need to check TFDATA if BAD PMP is found due to HW bug,
1943 * which can save timeout delay.
1944 */
1945 if (irq_status & PORT_IRQ_BAD_PMP)
1946 return -EIO;
1947
1948 return ata_check_ready(status);
1949 }
1950
1951 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
1952 unsigned long deadline)
1953 {
1954 struct ata_port *ap = link->ap;
1955 void __iomem *port_mmio = ahci_port_base(ap);
1956 int pmp = sata_srst_pmp(link);
1957 int rc;
1958 u32 irq_sts;
1959
1960 DPRINTK("ENTER\n");
1961
1962 rc = ahci_do_softreset(link, class, pmp, deadline,
1963 ahci_sb600_check_ready);
1964
1965 /*
1966 * Soft reset fails on some ATI chips with IPMS set when PMP
1967 * is enabled but SATA HDD/ODD is connected to SATA port,
1968 * do soft reset again to port 0.
1969 */
1970 if (rc == -EIO) {
1971 irq_sts = readl(port_mmio + PORT_IRQ_STAT);
1972 if (irq_sts & PORT_IRQ_BAD_PMP) {
1973 ata_link_printk(link, KERN_WARNING,
1974 "applying SB600 PMP SRST workaround "
1975 "and retrying\n");
1976 rc = ahci_do_softreset(link, class, 0, deadline,
1977 ahci_check_ready);
1978 }
1979 }
1980
1981 return rc;
1982 }
1983
1984 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1985 unsigned long deadline)
1986 {
1987 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1988 struct ata_port *ap = link->ap;
1989 struct ahci_port_priv *pp = ap->private_data;
1990 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1991 struct ata_taskfile tf;
1992 bool online;
1993 int rc;
1994
1995 DPRINTK("ENTER\n");
1996
1997 ahci_stop_engine(ap);
1998
1999 /* clear D2H reception area to properly wait for D2H FIS */
2000 ata_tf_init(link->device, &tf);
2001 tf.command = 0x80;
2002 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
2003
2004 rc = sata_link_hardreset(link, timing, deadline, &online,
2005 ahci_check_ready);
2006
2007 ahci_start_engine(ap);
2008
2009 if (online)
2010 *class = ahci_dev_classify(ap);
2011
2012 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
2013 return rc;
2014 }
2015
2016 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
2017 unsigned long deadline)
2018 {
2019 struct ata_port *ap = link->ap;
2020 bool online;
2021 int rc;
2022
2023 DPRINTK("ENTER\n");
2024
2025 ahci_stop_engine(ap);
2026
2027 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
2028 deadline, &online, NULL);
2029
2030 ahci_start_engine(ap);
2031
2032 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
2033
2034 /* vt8251 doesn't clear BSY on signature FIS reception,
2035 * request follow-up softreset.
2036 */
2037 return online ? -EAGAIN : rc;
2038 }
2039
2040 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
2041 unsigned long deadline)
2042 {
2043 struct ata_port *ap = link->ap;
2044 struct ahci_port_priv *pp = ap->private_data;
2045 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
2046 struct ata_taskfile tf;
2047 bool online;
2048 int rc;
2049
2050 ahci_stop_engine(ap);
2051
2052 /* clear D2H reception area to properly wait for D2H FIS */
2053 ata_tf_init(link->device, &tf);
2054 tf.command = 0x80;
2055 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
2056
2057 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
2058 deadline, &online, NULL);
2059
2060 ahci_start_engine(ap);
2061
2062 /* The pseudo configuration device on SIMG4726 attached to
2063 * ASUS P5W-DH Deluxe doesn't send signature FIS after
2064 * hardreset if no device is attached to the first downstream
2065 * port && the pseudo device locks up on SRST w/ PMP==0. To
2066 * work around this, wait for !BSY only briefly. If BSY isn't
2067 * cleared, perform CLO and proceed to IDENTIFY (achieved by
2068 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
2069 *
2070 * Wait for two seconds. Devices attached to downstream port
2071 * which can't process the following IDENTIFY after this will
2072 * have to be reset again. For most cases, this should
2073 * suffice while making probing snappish enough.
2074 */
2075 if (online) {
2076 rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
2077 ahci_check_ready);
2078 if (rc)
2079 ahci_kick_engine(ap);
2080 }
2081 return rc;
2082 }
2083
2084 static void ahci_postreset(struct ata_link *link, unsigned int *class)
2085 {
2086 struct ata_port *ap = link->ap;
2087 void __iomem *port_mmio = ahci_port_base(ap);
2088 u32 new_tmp, tmp;
2089
2090 ata_std_postreset(link, class);
2091
2092 /* Make sure port's ATAPI bit is set appropriately */
2093 new_tmp = tmp = readl(port_mmio + PORT_CMD);
2094 if (*class == ATA_DEV_ATAPI)
2095 new_tmp |= PORT_CMD_ATAPI;
2096 else
2097 new_tmp &= ~PORT_CMD_ATAPI;
2098 if (new_tmp != tmp) {
2099 writel(new_tmp, port_mmio + PORT_CMD);
2100 readl(port_mmio + PORT_CMD); /* flush */
2101 }
2102 }
2103
2104 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
2105 {
2106 struct scatterlist *sg;
2107 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
2108 unsigned int si;
2109
2110 VPRINTK("ENTER\n");
2111
2112 /*
2113 * Next, the S/G list.
2114 */
2115 for_each_sg(qc->sg, sg, qc->n_elem, si) {
2116 dma_addr_t addr = sg_dma_address(sg);
2117 u32 sg_len = sg_dma_len(sg);
2118
2119 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
2120 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
2121 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
2122 }
2123
2124 return si;
2125 }
2126
2127 static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
2128 {
2129 struct ata_port *ap = qc->ap;
2130 struct ahci_port_priv *pp = ap->private_data;
2131
2132 if (!sata_pmp_attached(ap) || pp->fbs_enabled)
2133 return ata_std_qc_defer(qc);
2134 else
2135 return sata_pmp_qc_defer_cmd_switch(qc);
2136 }
2137
2138 static void ahci_qc_prep(struct ata_queued_cmd *qc)
2139 {
2140 struct ata_port *ap = qc->ap;
2141 struct ahci_port_priv *pp = ap->private_data;
2142 int is_atapi = ata_is_atapi(qc->tf.protocol);
2143 void *cmd_tbl;
2144 u32 opts;
2145 const u32 cmd_fis_len = 5; /* five dwords */
2146 unsigned int n_elem;
2147
2148 /*
2149 * Fill in command table information. First, the header,
2150 * a SATA Register - Host to Device command FIS.
2151 */
2152 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
2153
2154 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
2155 if (is_atapi) {
2156 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
2157 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
2158 }
2159
2160 n_elem = 0;
2161 if (qc->flags & ATA_QCFLAG_DMAMAP)
2162 n_elem = ahci_fill_sg(qc, cmd_tbl);
2163
2164 /*
2165 * Fill in command slot information.
2166 */
2167 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
2168 if (qc->tf.flags & ATA_TFLAG_WRITE)
2169 opts |= AHCI_CMD_WRITE;
2170 if (is_atapi)
2171 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
2172
2173 ahci_fill_cmd_slot(pp, qc->tag, opts);
2174 }
2175
2176 static void ahci_fbs_dec_intr(struct ata_port *ap)
2177 {
2178 struct ahci_port_priv *pp = ap->private_data;
2179 void __iomem *port_mmio = ahci_port_base(ap);
2180 u32 fbs = readl(port_mmio + PORT_FBS);
2181 int retries = 3;
2182
2183 DPRINTK("ENTER\n");
2184 BUG_ON(!pp->fbs_enabled);
2185
2186 /* time to wait for DEC is not specified by AHCI spec,
2187 * add a retry loop for safety.
2188 */
2189 writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS);
2190 fbs = readl(port_mmio + PORT_FBS);
2191 while ((fbs & PORT_FBS_DEC) && retries--) {
2192 udelay(1);
2193 fbs = readl(port_mmio + PORT_FBS);
2194 }
2195
2196 if (fbs & PORT_FBS_DEC)
2197 dev_printk(KERN_ERR, ap->host->dev,
2198 "failed to clear device error\n");
2199 }
2200
2201 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
2202 {
2203 struct ahci_host_priv *hpriv = ap->host->private_data;
2204 struct ahci_port_priv *pp = ap->private_data;
2205 struct ata_eh_info *host_ehi = &ap->link.eh_info;
2206 struct ata_link *link = NULL;
2207 struct ata_queued_cmd *active_qc;
2208 struct ata_eh_info *active_ehi;
2209 bool fbs_need_dec = false;
2210 u32 serror;
2211
2212 /* determine active link with error */
2213 if (pp->fbs_enabled) {
2214 void __iomem *port_mmio = ahci_port_base(ap);
2215 u32 fbs = readl(port_mmio + PORT_FBS);
2216 int pmp = fbs >> PORT_FBS_DWE_OFFSET;
2217
2218 if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) &&
2219 ata_link_online(&ap->pmp_link[pmp])) {
2220 link = &ap->pmp_link[pmp];
2221 fbs_need_dec = true;
2222 }
2223
2224 } else
2225 ata_for_each_link(link, ap, EDGE)
2226 if (ata_link_active(link))
2227 break;
2228
2229 if (!link)
2230 link = &ap->link;
2231
2232 active_qc = ata_qc_from_tag(ap, link->active_tag);
2233 active_ehi = &link->eh_info;
2234
2235 /* record irq stat */
2236 ata_ehi_clear_desc(host_ehi);
2237 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
2238
2239 /* AHCI needs SError cleared; otherwise, it might lock up */
2240 ahci_scr_read(&ap->link, SCR_ERROR, &serror);
2241 ahci_scr_write(&ap->link, SCR_ERROR, serror);
2242 host_ehi->serror |= serror;
2243
2244 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
2245 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
2246 irq_stat &= ~PORT_IRQ_IF_ERR;
2247
2248 if (irq_stat & PORT_IRQ_TF_ERR) {
2249 /* If qc is active, charge it; otherwise, the active
2250 * link. There's no active qc on NCQ errors. It will
2251 * be determined by EH by reading log page 10h.
2252 */
2253 if (active_qc)
2254 active_qc->err_mask |= AC_ERR_DEV;
2255 else
2256 active_ehi->err_mask |= AC_ERR_DEV;
2257
2258 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
2259 host_ehi->serror &= ~SERR_INTERNAL;
2260 }
2261
2262 if (irq_stat & PORT_IRQ_UNK_FIS) {
2263 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
2264
2265 active_ehi->err_mask |= AC_ERR_HSM;
2266 active_ehi->action |= ATA_EH_RESET;
2267 ata_ehi_push_desc(active_ehi,
2268 "unknown FIS %08x %08x %08x %08x" ,
2269 unk[0], unk[1], unk[2], unk[3]);
2270 }
2271
2272 if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
2273 active_ehi->err_mask |= AC_ERR_HSM;
2274 active_ehi->action |= ATA_EH_RESET;
2275 ata_ehi_push_desc(active_ehi, "incorrect PMP");
2276 }
2277
2278 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
2279 host_ehi->err_mask |= AC_ERR_HOST_BUS;
2280 host_ehi->action |= ATA_EH_RESET;
2281 ata_ehi_push_desc(host_ehi, "host bus error");
2282 }
2283
2284 if (irq_stat & PORT_IRQ_IF_ERR) {
2285 if (fbs_need_dec)
2286 active_ehi->err_mask |= AC_ERR_DEV;
2287 else {
2288 host_ehi->err_mask |= AC_ERR_ATA_BUS;
2289 host_ehi->action |= ATA_EH_RESET;
2290 }
2291
2292 ata_ehi_push_desc(host_ehi, "interface fatal error");
2293 }
2294
2295 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
2296 ata_ehi_hotplugged(host_ehi);
2297 ata_ehi_push_desc(host_ehi, "%s",
2298 irq_stat & PORT_IRQ_CONNECT ?
2299 "connection status changed" : "PHY RDY changed");
2300 }
2301
2302 /* okay, let's hand over to EH */
2303
2304 if (irq_stat & PORT_IRQ_FREEZE)
2305 ata_port_freeze(ap);
2306 else if (fbs_need_dec) {
2307 ata_link_abort(link);
2308 ahci_fbs_dec_intr(ap);
2309 } else
2310 ata_port_abort(ap);
2311 }
2312
2313 static void ahci_port_intr(struct ata_port *ap)
2314 {
2315 void __iomem *port_mmio = ahci_port_base(ap);
2316 struct ata_eh_info *ehi = &ap->link.eh_info;
2317 struct ahci_port_priv *pp = ap->private_data;
2318 struct ahci_host_priv *hpriv = ap->host->private_data;
2319 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
2320 u32 status, qc_active = 0;
2321 int rc;
2322
2323 status = readl(port_mmio + PORT_IRQ_STAT);
2324 writel(status, port_mmio + PORT_IRQ_STAT);
2325
2326 /* ignore BAD_PMP while resetting */
2327 if (unlikely(resetting))
2328 status &= ~PORT_IRQ_BAD_PMP;
2329
2330 /* If we are getting PhyRdy, this is
2331 * just a power state change, we should
2332 * clear out this, plus the PhyRdy/Comm
2333 * Wake bits from Serror
2334 */
2335 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
2336 (status & PORT_IRQ_PHYRDY)) {
2337 status &= ~PORT_IRQ_PHYRDY;
2338 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
2339 }
2340
2341 if (unlikely(status & PORT_IRQ_ERROR)) {
2342 ahci_error_intr(ap, status);
2343 return;
2344 }
2345
2346 if (status & PORT_IRQ_SDB_FIS) {
2347 /* If SNotification is available, leave notification
2348 * handling to sata_async_notification(). If not,
2349 * emulate it by snooping SDB FIS RX area.
2350 *
2351 * Snooping FIS RX area is probably cheaper than
2352 * poking SNotification but some constrollers which
2353 * implement SNotification, ICH9 for example, don't
2354 * store AN SDB FIS into receive area.
2355 */
2356 if (hpriv->cap & HOST_CAP_SNTF)
2357 sata_async_notification(ap);
2358 else {
2359 /* If the 'N' bit in word 0 of the FIS is set,
2360 * we just received asynchronous notification.
2361 * Tell libata about it.
2362 *
2363 * Lack of SNotification should not appear in
2364 * ahci 1.2, so the workaround is unnecessary
2365 * when FBS is enabled.
2366 */
2367 if (pp->fbs_enabled)
2368 WARN_ON_ONCE(1);
2369 else {
2370 const __le32 *f = pp->rx_fis + RX_FIS_SDB;
2371 u32 f0 = le32_to_cpu(f[0]);
2372 if (f0 & (1 << 15))
2373 sata_async_notification(ap);
2374 }
2375 }
2376 }
2377
2378 /* pp->active_link is not reliable once FBS is enabled, both
2379 * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
2380 * NCQ and non-NCQ commands may be in flight at the same time.
2381 */
2382 if (pp->fbs_enabled) {
2383 if (ap->qc_active) {
2384 qc_active = readl(port_mmio + PORT_SCR_ACT);
2385 qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
2386 }
2387 } else {
2388 /* pp->active_link is valid iff any command is in flight */
2389 if (ap->qc_active && pp->active_link->sactive)
2390 qc_active = readl(port_mmio + PORT_SCR_ACT);
2391 else
2392 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
2393 }
2394
2395 rc = ata_qc_complete_multiple(ap, qc_active);
2396
2397 /* while resetting, invalid completions are expected */
2398 if (unlikely(rc < 0 && !resetting)) {
2399 ehi->err_mask |= AC_ERR_HSM;
2400 ehi->action |= ATA_EH_RESET;
2401 ata_port_freeze(ap);
2402 }
2403 }
2404
2405 static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
2406 {
2407 struct ata_host *host = dev_instance;
2408 struct ahci_host_priv *hpriv;
2409 unsigned int i, handled = 0;
2410 void __iomem *mmio;
2411 u32 irq_stat, irq_masked;
2412
2413 VPRINTK("ENTER\n");
2414
2415 hpriv = host->private_data;
2416 mmio = hpriv->mmio;
2417
2418 /* sigh. 0xffffffff is a valid return from h/w */
2419 irq_stat = readl(mmio + HOST_IRQ_STAT);
2420 if (!irq_stat)
2421 return IRQ_NONE;
2422
2423 irq_masked = irq_stat & hpriv->port_map;
2424
2425 spin_lock(&host->lock);
2426
2427 for (i = 0; i < host->n_ports; i++) {
2428 struct ata_port *ap;
2429
2430 if (!(irq_masked & (1 << i)))
2431 continue;
2432
2433 ap = host->ports[i];
2434 if (ap) {
2435 ahci_port_intr(ap);
2436 VPRINTK("port %u\n", i);
2437 } else {
2438 VPRINTK("port %u (no irq)\n", i);
2439 if (ata_ratelimit())
2440 dev_printk(KERN_WARNING, host->dev,
2441 "interrupt on disabled port %u\n", i);
2442 }
2443
2444 handled = 1;
2445 }
2446
2447 /* HOST_IRQ_STAT behaves as level triggered latch meaning that
2448 * it should be cleared after all the port events are cleared;
2449 * otherwise, it will raise a spurious interrupt after each
2450 * valid one. Please read section 10.6.2 of ahci 1.1 for more
2451 * information.
2452 *
2453 * Also, use the unmasked value to clear interrupt as spurious
2454 * pending event on a dummy port might cause screaming IRQ.
2455 */
2456 writel(irq_stat, mmio + HOST_IRQ_STAT);
2457
2458 spin_unlock(&host->lock);
2459
2460 VPRINTK("EXIT\n");
2461
2462 return IRQ_RETVAL(handled);
2463 }
2464
2465 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
2466 {
2467 struct ata_port *ap = qc->ap;
2468 void __iomem *port_mmio = ahci_port_base(ap);
2469 struct ahci_port_priv *pp = ap->private_data;
2470
2471 /* Keep track of the currently active link. It will be used
2472 * in completion path to determine whether NCQ phase is in
2473 * progress.
2474 */
2475 pp->active_link = qc->dev->link;
2476
2477 if (qc->tf.protocol == ATA_PROT_NCQ)
2478 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
2479
2480 if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
2481 u32 fbs = readl(port_mmio + PORT_FBS);
2482 fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
2483 fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
2484 writel(fbs, port_mmio + PORT_FBS);
2485 pp->fbs_last_dev = qc->dev->link->pmp;
2486 }
2487
2488 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
2489
2490 ahci_sw_activity(qc->dev->link);
2491
2492 return 0;
2493 }
2494
2495 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
2496 {
2497 struct ahci_port_priv *pp = qc->ap->private_data;
2498 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
2499
2500 if (pp->fbs_enabled)
2501 d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
2502
2503 ata_tf_from_fis(d2h_fis, &qc->result_tf);
2504 return true;
2505 }
2506
2507 static void ahci_freeze(struct ata_port *ap)
2508 {
2509 void __iomem *port_mmio = ahci_port_base(ap);
2510
2511 /* turn IRQ off */
2512 writel(0, port_mmio + PORT_IRQ_MASK);
2513 }
2514
2515 static void ahci_thaw(struct ata_port *ap)
2516 {
2517 struct ahci_host_priv *hpriv = ap->host->private_data;
2518 void __iomem *mmio = hpriv->mmio;
2519 void __iomem *port_mmio = ahci_port_base(ap);
2520 u32 tmp;
2521 struct ahci_port_priv *pp = ap->private_data;
2522
2523 /* clear IRQ */
2524 tmp = readl(port_mmio + PORT_IRQ_STAT);
2525 writel(tmp, port_mmio + PORT_IRQ_STAT);
2526 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
2527
2528 /* turn IRQ back on */
2529 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2530 }
2531
2532 static void ahci_error_handler(struct ata_port *ap)
2533 {
2534 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
2535 /* restart engine */
2536 ahci_stop_engine(ap);
2537 ahci_start_engine(ap);
2538 }
2539
2540 sata_pmp_error_handler(ap);
2541 }
2542
2543 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
2544 {
2545 struct ata_port *ap = qc->ap;
2546
2547 /* make DMA engine forget about the failed command */
2548 if (qc->flags & ATA_QCFLAG_FAILED)
2549 ahci_kick_engine(ap);
2550 }
2551
2552 static void ahci_enable_fbs(struct ata_port *ap)
2553 {
2554 struct ahci_port_priv *pp = ap->private_data;
2555 void __iomem *port_mmio = ahci_port_base(ap);
2556 u32 fbs;
2557 int rc;
2558
2559 if (!pp->fbs_supported)
2560 return;
2561
2562 fbs = readl(port_mmio + PORT_FBS);
2563 if (fbs & PORT_FBS_EN) {
2564 pp->fbs_enabled = true;
2565 pp->fbs_last_dev = -1; /* initialization */
2566 return;
2567 }
2568
2569 rc = ahci_stop_engine(ap);
2570 if (rc)
2571 return;
2572
2573 writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
2574 fbs = readl(port_mmio + PORT_FBS);
2575 if (fbs & PORT_FBS_EN) {
2576 dev_printk(KERN_INFO, ap->host->dev, "FBS is enabled.\n");
2577 pp->fbs_enabled = true;
2578 pp->fbs_last_dev = -1; /* initialization */
2579 } else
2580 dev_printk(KERN_ERR, ap->host->dev, "Failed to enable FBS\n");
2581
2582 ahci_start_engine(ap);
2583 }
2584
2585 static void ahci_disable_fbs(struct ata_port *ap)
2586 {
2587 struct ahci_port_priv *pp = ap->private_data;
2588 void __iomem *port_mmio = ahci_port_base(ap);
2589 u32 fbs;
2590 int rc;
2591
2592 if (!pp->fbs_supported)
2593 return;
2594
2595 fbs = readl(port_mmio + PORT_FBS);
2596 if ((fbs & PORT_FBS_EN) == 0) {
2597 pp->fbs_enabled = false;
2598 return;
2599 }
2600
2601 rc = ahci_stop_engine(ap);
2602 if (rc)
2603 return;
2604
2605 writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
2606 fbs = readl(port_mmio + PORT_FBS);
2607 if (fbs & PORT_FBS_EN)
2608 dev_printk(KERN_ERR, ap->host->dev, "Failed to disable FBS\n");
2609 else {
2610 dev_printk(KERN_INFO, ap->host->dev, "FBS is disabled.\n");
2611 pp->fbs_enabled = false;
2612 }
2613
2614 ahci_start_engine(ap);
2615 }
2616
2617 static void ahci_pmp_attach(struct ata_port *ap)
2618 {
2619 void __iomem *port_mmio = ahci_port_base(ap);
2620 struct ahci_port_priv *pp = ap->private_data;
2621 u32 cmd;
2622
2623 cmd = readl(port_mmio + PORT_CMD);
2624 cmd |= PORT_CMD_PMP;
2625 writel(cmd, port_mmio + PORT_CMD);
2626
2627 ahci_enable_fbs(ap);
2628
2629 pp->intr_mask |= PORT_IRQ_BAD_PMP;
2630 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2631 }
2632
2633 static void ahci_pmp_detach(struct ata_port *ap)
2634 {
2635 void __iomem *port_mmio = ahci_port_base(ap);
2636 struct ahci_port_priv *pp = ap->private_data;
2637 u32 cmd;
2638
2639 ahci_disable_fbs(ap);
2640
2641 cmd = readl(port_mmio + PORT_CMD);
2642 cmd &= ~PORT_CMD_PMP;
2643 writel(cmd, port_mmio + PORT_CMD);
2644
2645 pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
2646 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2647 }
2648
2649 static int ahci_port_resume(struct ata_port *ap)
2650 {
2651 ahci_power_up(ap);
2652 ahci_start_port(ap);
2653
2654 if (sata_pmp_attached(ap))
2655 ahci_pmp_attach(ap);
2656 else
2657 ahci_pmp_detach(ap);
2658
2659 return 0;
2660 }
2661
2662 #ifdef CONFIG_PM
2663 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2664 {
2665 const char *emsg = NULL;
2666 int rc;
2667
2668 rc = ahci_deinit_port(ap, &emsg);
2669 if (rc == 0)
2670 ahci_power_down(ap);
2671 else {
2672 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
2673 ahci_start_port(ap);
2674 }
2675
2676 return rc;
2677 }
2678
2679 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
2680 {
2681 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2682 struct ahci_host_priv *hpriv = host->private_data;
2683 void __iomem *mmio = hpriv->mmio;
2684 u32 ctl;
2685
2686 if (mesg.event & PM_EVENT_SUSPEND &&
2687 hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
2688 dev_printk(KERN_ERR, &pdev->dev,
2689 "BIOS update required for suspend/resume\n");
2690 return -EIO;
2691 }
2692
2693 if (mesg.event & PM_EVENT_SLEEP) {
2694 /* AHCI spec rev1.1 section 8.3.3:
2695 * Software must disable interrupts prior to requesting a
2696 * transition of the HBA to D3 state.
2697 */
2698 ctl = readl(mmio + HOST_CTL);
2699 ctl &= ~HOST_IRQ_EN;
2700 writel(ctl, mmio + HOST_CTL);
2701 readl(mmio + HOST_CTL); /* flush */
2702 }
2703
2704 return ata_pci_device_suspend(pdev, mesg);
2705 }
2706
2707 static int ahci_pci_device_resume(struct pci_dev *pdev)
2708 {
2709 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2710 int rc;
2711
2712 rc = ata_pci_device_do_resume(pdev);
2713 if (rc)
2714 return rc;
2715
2716 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2717 rc = ahci_pci_reset_controller(host);
2718 if (rc)
2719 return rc;
2720
2721 ahci_pci_init_controller(host);
2722 }
2723
2724 ata_host_resume(host);
2725
2726 return 0;
2727 }
2728 #endif
2729
2730 static int ahci_port_start(struct ata_port *ap)
2731 {
2732 struct ahci_host_priv *hpriv = ap->host->private_data;
2733 struct device *dev = ap->host->dev;
2734 struct ahci_port_priv *pp;
2735 void *mem;
2736 dma_addr_t mem_dma;
2737 size_t dma_sz, rx_fis_sz;
2738
2739 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2740 if (!pp)
2741 return -ENOMEM;
2742
2743 /* check FBS capability */
2744 if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
2745 void __iomem *port_mmio = ahci_port_base(ap);
2746 u32 cmd = readl(port_mmio + PORT_CMD);
2747 if (cmd & PORT_CMD_FBSCP)
2748 pp->fbs_supported = true;
2749 else
2750 dev_printk(KERN_WARNING, dev,
2751 "The port is not capable of FBS\n");
2752 }
2753
2754 if (pp->fbs_supported) {
2755 dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
2756 rx_fis_sz = AHCI_RX_FIS_SZ * 16;
2757 } else {
2758 dma_sz = AHCI_PORT_PRIV_DMA_SZ;
2759 rx_fis_sz = AHCI_RX_FIS_SZ;
2760 }
2761
2762 mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
2763 if (!mem)
2764 return -ENOMEM;
2765 memset(mem, 0, dma_sz);
2766
2767 /*
2768 * First item in chunk of DMA memory: 32-slot command table,
2769 * 32 bytes each in size
2770 */
2771 pp->cmd_slot = mem;
2772 pp->cmd_slot_dma = mem_dma;
2773
2774 mem += AHCI_CMD_SLOT_SZ;
2775 mem_dma += AHCI_CMD_SLOT_SZ;
2776
2777 /*
2778 * Second item: Received-FIS area
2779 */
2780 pp->rx_fis = mem;
2781 pp->rx_fis_dma = mem_dma;
2782
2783 mem += rx_fis_sz;
2784 mem_dma += rx_fis_sz;
2785
2786 /*
2787 * Third item: data area for storing a single command
2788 * and its scatter-gather table
2789 */
2790 pp->cmd_tbl = mem;
2791 pp->cmd_tbl_dma = mem_dma;
2792
2793 /*
2794 * Save off initial list of interrupts to be enabled.
2795 * This could be changed later
2796 */
2797 pp->intr_mask = DEF_PORT_IRQ;
2798
2799 ap->private_data = pp;
2800
2801 /* engage engines, captain */
2802 return ahci_port_resume(ap);
2803 }
2804
2805 static void ahci_port_stop(struct ata_port *ap)
2806 {
2807 const char *emsg = NULL;
2808 int rc;
2809
2810 /* de-initialize port */
2811 rc = ahci_deinit_port(ap, &emsg);
2812 if (rc)
2813 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
2814 }
2815
2816 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
2817 {
2818 int rc;
2819
2820 if (using_dac &&
2821 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
2822 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2823 if (rc) {
2824 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2825 if (rc) {
2826 dev_printk(KERN_ERR, &pdev->dev,
2827 "64-bit DMA enable failed\n");
2828 return rc;
2829 }
2830 }
2831 } else {
2832 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2833 if (rc) {
2834 dev_printk(KERN_ERR, &pdev->dev,
2835 "32-bit DMA enable failed\n");
2836 return rc;
2837 }
2838 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2839 if (rc) {
2840 dev_printk(KERN_ERR, &pdev->dev,
2841 "32-bit consistent DMA enable failed\n");
2842 return rc;
2843 }
2844 }
2845 return 0;
2846 }
2847
2848 static void ahci_print_info(struct ata_host *host, const char *scc_s)
2849 {
2850 struct ahci_host_priv *hpriv = host->private_data;
2851 void __iomem *mmio = hpriv->mmio;
2852 u32 vers, cap, cap2, impl, speed;
2853 const char *speed_s;
2854
2855 vers = readl(mmio + HOST_VERSION);
2856 cap = hpriv->cap;
2857 cap2 = hpriv->cap2;
2858 impl = hpriv->port_map;
2859
2860 speed = (cap >> 20) & 0xf;
2861 if (speed == 1)
2862 speed_s = "1.5";
2863 else if (speed == 2)
2864 speed_s = "3";
2865 else if (speed == 3)
2866 speed_s = "6";
2867 else
2868 speed_s = "?";
2869
2870 dev_info(host->dev,
2871 "AHCI %02x%02x.%02x%02x "
2872 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2873 ,
2874
2875 (vers >> 24) & 0xff,
2876 (vers >> 16) & 0xff,
2877 (vers >> 8) & 0xff,
2878 vers & 0xff,
2879
2880 ((cap >> 8) & 0x1f) + 1,
2881 (cap & 0x1f) + 1,
2882 speed_s,
2883 impl,
2884 scc_s);
2885
2886 dev_info(host->dev,
2887 "flags: "
2888 "%s%s%s%s%s%s%s"
2889 "%s%s%s%s%s%s%s"
2890 "%s%s%s%s%s%s\n"
2891 ,
2892
2893 cap & HOST_CAP_64 ? "64bit " : "",
2894 cap & HOST_CAP_NCQ ? "ncq " : "",
2895 cap & HOST_CAP_SNTF ? "sntf " : "",
2896 cap & HOST_CAP_MPS ? "ilck " : "",
2897 cap & HOST_CAP_SSS ? "stag " : "",
2898 cap & HOST_CAP_ALPM ? "pm " : "",
2899 cap & HOST_CAP_LED ? "led " : "",
2900 cap & HOST_CAP_CLO ? "clo " : "",
2901 cap & HOST_CAP_ONLY ? "only " : "",
2902 cap & HOST_CAP_PMP ? "pmp " : "",
2903 cap & HOST_CAP_FBS ? "fbs " : "",
2904 cap & HOST_CAP_PIO_MULTI ? "pio " : "",
2905 cap & HOST_CAP_SSC ? "slum " : "",
2906 cap & HOST_CAP_PART ? "part " : "",
2907 cap & HOST_CAP_CCC ? "ccc " : "",
2908 cap & HOST_CAP_EMS ? "ems " : "",
2909 cap & HOST_CAP_SXS ? "sxs " : "",
2910 cap2 & HOST_CAP2_APST ? "apst " : "",
2911 cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
2912 cap2 & HOST_CAP2_BOH ? "boh " : ""
2913 );
2914 }
2915
2916 static void ahci_pci_print_info(struct ata_host *host)
2917 {
2918 struct pci_dev *pdev = to_pci_dev(host->dev);
2919 u16 cc;
2920 const char *scc_s;
2921
2922 pci_read_config_word(pdev, 0x0a, &cc);
2923 if (cc == PCI_CLASS_STORAGE_IDE)
2924 scc_s = "IDE";
2925 else if (cc == PCI_CLASS_STORAGE_SATA)
2926 scc_s = "SATA";
2927 else if (cc == PCI_CLASS_STORAGE_RAID)
2928 scc_s = "RAID";
2929 else
2930 scc_s = "unknown";
2931
2932 ahci_print_info(host, scc_s);
2933 }
2934
2935 /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
2936 * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't
2937 * support PMP and the 4726 either directly exports the device
2938 * attached to the first downstream port or acts as a hardware storage
2939 * controller and emulate a single ATA device (can be RAID 0/1 or some
2940 * other configuration).
2941 *
2942 * When there's no device attached to the first downstream port of the
2943 * 4726, "Config Disk" appears, which is a pseudo ATA device to
2944 * configure the 4726. However, ATA emulation of the device is very
2945 * lame. It doesn't send signature D2H Reg FIS after the initial
2946 * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
2947 *
2948 * The following function works around the problem by always using
2949 * hardreset on the port and not depending on receiving signature FIS
2950 * afterward. If signature FIS isn't received soon, ATA class is
2951 * assumed without follow-up softreset.
2952 */
2953 static void ahci_p5wdh_workaround(struct ata_host *host)
2954 {
2955 static struct dmi_system_id sysids[] = {
2956 {
2957 .ident = "P5W DH Deluxe",
2958 .matches = {
2959 DMI_MATCH(DMI_SYS_VENDOR,
2960 "ASUSTEK COMPUTER INC"),
2961 DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
2962 },
2963 },
2964 { }
2965 };
2966 struct pci_dev *pdev = to_pci_dev(host->dev);
2967
2968 if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
2969 dmi_check_system(sysids)) {
2970 struct ata_port *ap = host->ports[1];
2971
2972 dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
2973 "Deluxe on-board SIMG4726 workaround\n");
2974
2975 ap->ops = &ahci_p5wdh_ops;
2976 ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
2977 }
2978 }
2979
2980 /* only some SB600 ahci controllers can do 64bit DMA */
2981 static bool ahci_sb600_enable_64bit(struct pci_dev *pdev)
2982 {
2983 static const struct dmi_system_id sysids[] = {
2984 /*
2985 * The oldest version known to be broken is 0901 and
2986 * working is 1501 which was released on 2007-10-26.
2987 * Enable 64bit DMA on 1501 and anything newer.
2988 *
2989 * Please read bko#9412 for more info.
2990 */
2991 {
2992 .ident = "ASUS M2A-VM",
2993 .matches = {
2994 DMI_MATCH(DMI_BOARD_VENDOR,
2995 "ASUSTeK Computer INC."),
2996 DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"),
2997 },
2998 .driver_data = "20071026", /* yyyymmdd */
2999 },
3000 /*
3001 * All BIOS versions for the MSI K9A2 Platinum (MS-7376)
3002 * support 64bit DMA.
3003 *
3004 * BIOS versions earlier than 1.5 had the Manufacturer DMI
3005 * fields as "MICRO-STAR INTERANTIONAL CO.,LTD".
3006 * This spelling mistake was fixed in BIOS version 1.5, so
3007 * 1.5 and later have the Manufacturer as
3008 * "MICRO-STAR INTERNATIONAL CO.,LTD".
3009 * So try to match on DMI_BOARD_VENDOR of "MICRO-STAR INTER".
3010 *
3011 * BIOS versions earlier than 1.9 had a Board Product Name
3012 * DMI field of "MS-7376". This was changed to be
3013 * "K9A2 Platinum (MS-7376)" in version 1.9, but we can still
3014 * match on DMI_BOARD_NAME of "MS-7376".
3015 */
3016 {
3017 .ident = "MSI K9A2 Platinum",
3018 .matches = {
3019 DMI_MATCH(DMI_BOARD_VENDOR,
3020 "MICRO-STAR INTER"),
3021 DMI_MATCH(DMI_BOARD_NAME, "MS-7376"),
3022 },
3023 },
3024 { }
3025 };
3026 const struct dmi_system_id *match;
3027 int year, month, date;
3028 char buf[9];
3029
3030 match = dmi_first_match(sysids);
3031 if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) ||
3032 !match)
3033 return false;
3034
3035 if (!match->driver_data)
3036 goto enable_64bit;
3037
3038 dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
3039 snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
3040
3041 if (strcmp(buf, match->driver_data) >= 0)
3042 goto enable_64bit;
3043 else {
3044 dev_printk(KERN_WARNING, &pdev->dev, "%s: BIOS too old, "
3045 "forcing 32bit DMA, update BIOS\n", match->ident);
3046 return false;
3047 }
3048
3049 enable_64bit:
3050 dev_printk(KERN_WARNING, &pdev->dev, "%s: enabling 64bit DMA\n",
3051 match->ident);
3052 return true;
3053 }
3054
3055 static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
3056 {
3057 static const struct dmi_system_id broken_systems[] = {
3058 {
3059 .ident = "HP Compaq nx6310",
3060 .matches = {
3061 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3062 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6310"),
3063 },
3064 /* PCI slot number of the controller */
3065 .driver_data = (void *)0x1FUL,
3066 },
3067 {
3068 .ident = "HP Compaq 6720s",
3069 .matches = {
3070 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3071 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6720s"),
3072 },
3073 /* PCI slot number of the controller */
3074 .driver_data = (void *)0x1FUL,
3075 },
3076
3077 { } /* terminate list */
3078 };
3079 const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
3080
3081 if (dmi) {
3082 unsigned long slot = (unsigned long)dmi->driver_data;
3083 /* apply the quirk only to on-board controllers */
3084 return slot == PCI_SLOT(pdev->devfn);
3085 }
3086
3087 return false;
3088 }
3089
3090 static bool ahci_broken_suspend(struct pci_dev *pdev)
3091 {
3092 static const struct dmi_system_id sysids[] = {
3093 /*
3094 * On HP dv[4-6] and HDX18 with earlier BIOSen, link
3095 * to the harddisk doesn't become online after
3096 * resuming from STR. Warn and fail suspend.
3097 *
3098 * http://bugzilla.kernel.org/show_bug.cgi?id=12276
3099 *
3100 * Use dates instead of versions to match as HP is
3101 * apparently recycling both product and version
3102 * strings.
3103 *
3104 * http://bugzilla.kernel.org/show_bug.cgi?id=15462
3105 */
3106 {
3107 .ident = "dv4",
3108 .matches = {
3109 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3110 DMI_MATCH(DMI_PRODUCT_NAME,
3111 "HP Pavilion dv4 Notebook PC"),
3112 },
3113 .driver_data = "20090105", /* F.30 */
3114 },
3115 {
3116 .ident = "dv5",
3117 .matches = {
3118 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3119 DMI_MATCH(DMI_PRODUCT_NAME,
3120 "HP Pavilion dv5 Notebook PC"),
3121 },
3122 .driver_data = "20090506", /* F.16 */
3123 },
3124 {
3125 .ident = "dv6",
3126 .matches = {
3127 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3128 DMI_MATCH(DMI_PRODUCT_NAME,
3129 "HP Pavilion dv6 Notebook PC"),
3130 },
3131 .driver_data = "20090423", /* F.21 */
3132 },
3133 {
3134 .ident = "HDX18",
3135 .matches = {
3136 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3137 DMI_MATCH(DMI_PRODUCT_NAME,
3138 "HP HDX18 Notebook PC"),
3139 },
3140 .driver_data = "20090430", /* F.23 */
3141 },
3142 /*
3143 * Acer eMachines G725 has the same problem. BIOS
3144 * V1.03 is known to be broken. V3.04 is known to
3145 * work. Inbetween, there are V1.06, V2.06 and V3.03
3146 * that we don't have much idea about. For now,
3147 * blacklist anything older than V3.04.
3148 *
3149 * http://bugzilla.kernel.org/show_bug.cgi?id=15104
3150 */
3151 {
3152 .ident = "G725",
3153 .matches = {
3154 DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
3155 DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
3156 },
3157 .driver_data = "20091216", /* V3.04 */
3158 },
3159 { } /* terminate list */
3160 };
3161 const struct dmi_system_id *dmi = dmi_first_match(sysids);
3162 int year, month, date;
3163 char buf[9];
3164
3165 if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
3166 return false;
3167
3168 dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
3169 snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
3170
3171 return strcmp(buf, dmi->driver_data) < 0;
3172 }
3173
3174 static bool ahci_broken_online(struct pci_dev *pdev)
3175 {
3176 #define ENCODE_BUSDEVFN(bus, slot, func) \
3177 (void *)(unsigned long)(((bus) << 8) | PCI_DEVFN((slot), (func)))
3178 static const struct dmi_system_id sysids[] = {
3179 /*
3180 * There are several gigabyte boards which use
3181 * SIMG5723s configured as hardware RAID. Certain
3182 * 5723 firmware revisions shipped there keep the link
3183 * online but fail to answer properly to SRST or
3184 * IDENTIFY when no device is attached downstream
3185 * causing libata to retry quite a few times leading
3186 * to excessive detection delay.
3187 *
3188 * As these firmwares respond to the second reset try
3189 * with invalid device signature, considering unknown
3190 * sig as offline works around the problem acceptably.
3191 */
3192 {
3193 .ident = "EP45-DQ6",
3194 .matches = {
3195 DMI_MATCH(DMI_BOARD_VENDOR,
3196 "Gigabyte Technology Co., Ltd."),
3197 DMI_MATCH(DMI_BOARD_NAME, "EP45-DQ6"),
3198 },
3199 .driver_data = ENCODE_BUSDEVFN(0x0a, 0x00, 0),
3200 },
3201 {
3202 .ident = "EP45-DS5",
3203 .matches = {
3204 DMI_MATCH(DMI_BOARD_VENDOR,
3205 "Gigabyte Technology Co., Ltd."),
3206 DMI_MATCH(DMI_BOARD_NAME, "EP45-DS5"),
3207 },
3208 .driver_data = ENCODE_BUSDEVFN(0x03, 0x00, 0),
3209 },
3210 { } /* terminate list */
3211 };
3212 #undef ENCODE_BUSDEVFN
3213 const struct dmi_system_id *dmi = dmi_first_match(sysids);
3214 unsigned int val;
3215
3216 if (!dmi)
3217 return false;
3218
3219 val = (unsigned long)dmi->driver_data;
3220
3221 return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
3222 }
3223
3224 #ifdef CONFIG_ATA_ACPI
3225 static void ahci_gtf_filter_workaround(struct ata_host *host)
3226 {
3227 static const struct dmi_system_id sysids[] = {
3228 /*
3229 * Aspire 3810T issues a bunch of SATA enable commands
3230 * via _GTF including an invalid one and one which is
3231 * rejected by the device. Among the successful ones
3232 * is FPDMA non-zero offset enable which when enabled
3233 * only on the drive side leads to NCQ command
3234 * failures. Filter it out.
3235 */
3236 {
3237 .ident = "Aspire 3810T",
3238 .matches = {
3239 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
3240 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3810T"),
3241 },
3242 .driver_data = (void *)ATA_ACPI_FILTER_FPDMA_OFFSET,
3243 },
3244 { }
3245 };
3246 const struct dmi_system_id *dmi = dmi_first_match(sysids);
3247 unsigned int filter;
3248 int i;
3249
3250 if (!dmi)
3251 return;
3252
3253 filter = (unsigned long)dmi->driver_data;
3254 dev_printk(KERN_INFO, host->dev,
3255 "applying extra ACPI _GTF filter 0x%x for %s\n",
3256 filter, dmi->ident);
3257
3258 for (i = 0; i < host->n_ports; i++) {
3259 struct ata_port *ap = host->ports[i];
3260 struct ata_link *link;
3261 struct ata_device *dev;
3262
3263 ata_for_each_link(link, ap, EDGE)
3264 ata_for_each_dev(dev, link, ALL)
3265 dev->gtf_filter |= filter;
3266 }
3267 }
3268 #else
3269 static inline void ahci_gtf_filter_workaround(struct ata_host *host)
3270 {}
3271 #endif
3272
3273 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3274 {
3275 static int printed_version;
3276 unsigned int board_id = ent->driver_data;
3277 struct ata_port_info pi = ahci_port_info[board_id];
3278 const struct ata_port_info *ppi[] = { &pi, NULL };
3279 struct device *dev = &pdev->dev;
3280 struct ahci_host_priv *hpriv;
3281 struct ata_host *host;
3282 int n_ports, i, rc;
3283
3284 VPRINTK("ENTER\n");
3285
3286 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
3287
3288 if (!printed_version++)
3289 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
3290
3291 /* The AHCI driver can only drive the SATA ports, the PATA driver
3292 can drive them all so if both drivers are selected make sure
3293 AHCI stays out of the way */
3294 if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
3295 return -ENODEV;
3296
3297 /* Promise's PDC42819 is a SAS/SATA controller that has an AHCI mode.
3298 * At the moment, we can only use the AHCI mode. Let the users know
3299 * that for SAS drives they're out of luck.
3300 */
3301 if (pdev->vendor == PCI_VENDOR_ID_PROMISE)
3302 dev_printk(KERN_INFO, &pdev->dev, "PDC42819 "
3303 "can only drive SATA devices with this driver\n");
3304
3305 /* acquire resources */
3306 rc = pcim_enable_device(pdev);
3307 if (rc)
3308 return rc;
3309
3310 /* AHCI controllers often implement SFF compatible interface.
3311 * Grab all PCI BARs just in case.
3312 */
3313 rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
3314 if (rc == -EBUSY)
3315 pcim_pin_device(pdev);
3316 if (rc)
3317 return rc;
3318
3319 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
3320 (pdev->device == 0x2652 || pdev->device == 0x2653)) {
3321 u8 map;
3322
3323 /* ICH6s share the same PCI ID for both piix and ahci
3324 * modes. Enabling ahci mode while MAP indicates
3325 * combined mode is a bad idea. Yield to ata_piix.
3326 */
3327 pci_read_config_byte(pdev, ICH_MAP, &map);
3328 if (map & 0x3) {
3329 dev_printk(KERN_INFO, &pdev->dev, "controller is in "
3330 "combined mode, can't enable AHCI mode\n");
3331 return -ENODEV;
3332 }
3333 }
3334
3335 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
3336 if (!hpriv)
3337 return -ENOMEM;
3338 hpriv->flags |= (unsigned long)pi.private_data;
3339
3340 /* MCP65 revision A1 and A2 can't do MSI */
3341 if (board_id == board_ahci_mcp65 &&
3342 (pdev->revision == 0xa1 || pdev->revision == 0xa2))
3343 hpriv->flags |= AHCI_HFLAG_NO_MSI;
3344
3345 /* SB800 does NOT need the workaround to ignore SERR_INTERNAL */
3346 if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
3347 hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
3348
3349 /* only some SB600s can do 64bit DMA */
3350 if (ahci_sb600_enable_64bit(pdev))
3351 hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY;
3352
3353 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
3354 pci_intx(pdev, 1);
3355
3356 hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
3357
3358 /* save initial config */
3359 ahci_pci_save_initial_config(pdev, hpriv);
3360
3361 /* prepare host */
3362 if (hpriv->cap & HOST_CAP_NCQ) {
3363 pi.flags |= ATA_FLAG_NCQ;
3364 /* Auto-activate optimization is supposed to be supported on
3365 all AHCI controllers indicating NCQ support, but it seems
3366 to be broken at least on some NVIDIA MCP79 chipsets.
3367 Until we get info on which NVIDIA chipsets don't have this
3368 issue, if any, disable AA on all NVIDIA AHCIs. */
3369 if (pdev->vendor != PCI_VENDOR_ID_NVIDIA)
3370 pi.flags |= ATA_FLAG_FPDMA_AA;
3371 }
3372
3373 if (hpriv->cap & HOST_CAP_PMP)
3374 pi.flags |= ATA_FLAG_PMP;
3375
3376 if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
3377 u8 messages;
3378 void __iomem *mmio = hpriv->mmio;
3379 u32 em_loc = readl(mmio + HOST_EM_LOC);
3380 u32 em_ctl = readl(mmio + HOST_EM_CTL);
3381
3382 messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
3383
3384 /* we only support LED message type right now */
3385 if ((messages & 0x01) && (ahci_em_messages == 1)) {
3386 /* store em_loc */
3387 hpriv->em_loc = ((em_loc >> 16) * 4);
3388 pi.flags |= ATA_FLAG_EM;
3389 if (!(em_ctl & EM_CTL_ALHD))
3390 pi.flags |= ATA_FLAG_SW_ACTIVITY;
3391 }
3392 }
3393
3394 if (ahci_broken_system_poweroff(pdev)) {
3395 pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
3396 dev_info(&pdev->dev,
3397 "quirky BIOS, skipping spindown on poweroff\n");
3398 }
3399
3400 if (ahci_broken_suspend(pdev)) {
3401 hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
3402 dev_printk(KERN_WARNING, &pdev->dev,
3403 "BIOS update required for suspend/resume\n");
3404 }
3405
3406 if (ahci_broken_online(pdev)) {
3407 hpriv->flags |= AHCI_HFLAG_SRST_TOUT_IS_OFFLINE;
3408 dev_info(&pdev->dev,
3409 "online status unreliable, applying workaround\n");
3410 }
3411
3412 /* CAP.NP sometimes indicate the index of the last enabled
3413 * port, at other times, that of the last possible port, so
3414 * determining the maximum port number requires looking at
3415 * both CAP.NP and port_map.
3416 */
3417 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
3418
3419 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3420 if (!host)
3421 return -ENOMEM;
3422 host->private_data = hpriv;
3423
3424 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
3425 host->flags |= ATA_HOST_PARALLEL_SCAN;
3426 else
3427 printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
3428
3429 if (pi.flags & ATA_FLAG_EM)
3430 ahci_reset_em(host);
3431
3432 for (i = 0; i < host->n_ports; i++) {
3433 struct ata_port *ap = host->ports[i];
3434
3435 ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
3436 ata_port_pbar_desc(ap, AHCI_PCI_BAR,
3437 0x100 + ap->port_no * 0x80, "port");
3438
3439 /* set initial link pm policy */
3440 ap->pm_policy = NOT_AVAILABLE;
3441
3442 /* set enclosure management message type */
3443 if (ap->flags & ATA_FLAG_EM)
3444 ap->em_message_type = ahci_em_messages;
3445
3446
3447 /* disabled/not-implemented port */
3448 if (!(hpriv->port_map & (1 << i)))
3449 ap->ops = &ata_dummy_port_ops;
3450 }
3451
3452 /* apply workaround for ASUS P5W DH Deluxe mainboard */
3453 ahci_p5wdh_workaround(host);
3454
3455 /* apply gtf filter quirk */
3456 ahci_gtf_filter_workaround(host);
3457
3458 /* initialize adapter */
3459 rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
3460 if (rc)
3461 return rc;
3462
3463 rc = ahci_pci_reset_controller(host);
3464 if (rc)
3465 return rc;
3466
3467 ahci_pci_init_controller(host);
3468 ahci_pci_print_info(host);
3469
3470 pci_set_master(pdev);
3471 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
3472 &ahci_sht);
3473 }
3474
3475 static int __init ahci_init(void)
3476 {
3477 return pci_register_driver(&ahci_pci_driver);
3478 }
3479
3480 static void __exit ahci_exit(void)
3481 {
3482 pci_unregister_driver(&ahci_pci_driver);
3483 }
3484
3485
3486 MODULE_AUTHOR("Jeff Garzik");
3487 MODULE_DESCRIPTION("AHCI SATA low-level driver");
3488 MODULE_LICENSE("GPL");
3489 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
3490 MODULE_VERSION(DRV_VERSION);
3491
3492 module_init(ahci_init);
3493 module_exit(ahci_exit);
This page took 0.223534 seconds and 6 git commands to generate.