c95015986c940d4dcbeb3c82e76b5687a15584e6
[deliverable/linux.git] / drivers / ata / ahci.c
1 /*
2 * ahci.c - AHCI SATA support
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004-2005 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32 *
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/device.h>
44 #include <linux/dmi.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_cmnd.h>
47 #include <linux/libata.h>
48
49 #define DRV_NAME "ahci"
50 #define DRV_VERSION "3.0"
51
52 /* Enclosure Management Control */
53 #define EM_CTRL_MSG_TYPE 0x000f0000
54
55 /* Enclosure Management LED Message Type */
56 #define EM_MSG_LED_HBA_PORT 0x0000000f
57 #define EM_MSG_LED_PMP_SLOT 0x0000ff00
58 #define EM_MSG_LED_VALUE 0xffff0000
59 #define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
60 #define EM_MSG_LED_VALUE_OFF 0xfff80000
61 #define EM_MSG_LED_VALUE_ON 0x00010000
62
63 static int ahci_skip_host_reset;
64 static int ahci_ignore_sss;
65
66 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
67 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
68
69 module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
70 MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
71
72 static int ahci_enable_alpm(struct ata_port *ap,
73 enum link_pm policy);
74 static void ahci_disable_alpm(struct ata_port *ap);
75 static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
76 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
77 size_t size);
78 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
79 ssize_t size);
80
81 enum {
82 AHCI_PCI_BAR = 5,
83 AHCI_MAX_PORTS = 32,
84 AHCI_MAX_SG = 168, /* hardware max is 64K */
85 AHCI_DMA_BOUNDARY = 0xffffffff,
86 AHCI_MAX_CMDS = 32,
87 AHCI_CMD_SZ = 32,
88 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
89 AHCI_RX_FIS_SZ = 256,
90 AHCI_CMD_TBL_CDB = 0x40,
91 AHCI_CMD_TBL_HDR_SZ = 0x80,
92 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
93 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
94 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
95 AHCI_RX_FIS_SZ,
96 AHCI_IRQ_ON_SG = (1 << 31),
97 AHCI_CMD_ATAPI = (1 << 5),
98 AHCI_CMD_WRITE = (1 << 6),
99 AHCI_CMD_PREFETCH = (1 << 7),
100 AHCI_CMD_RESET = (1 << 8),
101 AHCI_CMD_CLR_BUSY = (1 << 10),
102
103 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
104 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
105 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
106
107 board_ahci = 0,
108 board_ahci_vt8251 = 1,
109 board_ahci_ign_iferr = 2,
110 board_ahci_sb600 = 3,
111 board_ahci_mv = 4,
112 board_ahci_sb700 = 5, /* for SB700 and SB800 */
113 board_ahci_mcp65 = 6,
114 board_ahci_nopmp = 7,
115 board_ahci_yesncq = 8,
116
117 /* global controller registers */
118 HOST_CAP = 0x00, /* host capabilities */
119 HOST_CTL = 0x04, /* global host control */
120 HOST_IRQ_STAT = 0x08, /* interrupt status */
121 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
122 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
123 HOST_EM_LOC = 0x1c, /* Enclosure Management location */
124 HOST_EM_CTL = 0x20, /* Enclosure Management Control */
125
126 /* HOST_CTL bits */
127 HOST_RESET = (1 << 0), /* reset controller; self-clear */
128 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
129 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
130
131 /* HOST_CAP bits */
132 HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
133 HOST_CAP_SSC = (1 << 14), /* Slumber capable */
134 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
135 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
136 HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
137 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
138 HOST_CAP_SNTF = (1 << 29), /* SNotification register */
139 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
140 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
141
142 /* registers for each SATA port */
143 PORT_LST_ADDR = 0x00, /* command list DMA addr */
144 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
145 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
146 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
147 PORT_IRQ_STAT = 0x10, /* interrupt status */
148 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
149 PORT_CMD = 0x18, /* port command */
150 PORT_TFDATA = 0x20, /* taskfile data */
151 PORT_SIG = 0x24, /* device TF signature */
152 PORT_CMD_ISSUE = 0x38, /* command issue */
153 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
154 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
155 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
156 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
157 PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
158
159 /* PORT_IRQ_{STAT,MASK} bits */
160 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
161 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
162 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
163 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
164 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
165 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
166 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
167 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
168
169 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
170 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
171 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
172 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
173 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
174 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
175 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
176 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
177 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
178
179 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
180 PORT_IRQ_IF_ERR |
181 PORT_IRQ_CONNECT |
182 PORT_IRQ_PHYRDY |
183 PORT_IRQ_UNK_FIS |
184 PORT_IRQ_BAD_PMP,
185 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
186 PORT_IRQ_TF_ERR |
187 PORT_IRQ_HBUS_DATA_ERR,
188 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
189 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
190 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
191
192 /* PORT_CMD bits */
193 PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
194 PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
195 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
196 PORT_CMD_PMP = (1 << 17), /* PMP attached */
197 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
198 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
199 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
200 PORT_CMD_CLO = (1 << 3), /* Command list override */
201 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
202 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
203 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
204
205 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
206 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
207 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
208 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
209
210 /* hpriv->flags bits */
211 AHCI_HFLAG_NO_NCQ = (1 << 0),
212 AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
213 AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
214 AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
215 AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
216 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
217 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
218 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
219 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
220 AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
221 AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
222 AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
223 link offline */
224
225 /* ap->flags bits */
226
227 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
228 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
229 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
230 ATA_FLAG_IPM,
231
232 ICH_MAP = 0x90, /* ICH MAP register */
233
234 /* em constants */
235 EM_MAX_SLOTS = 8,
236 EM_MAX_RETRY = 5,
237
238 /* em_ctl bits */
239 EM_CTL_RST = (1 << 9), /* Reset */
240 EM_CTL_TM = (1 << 8), /* Transmit Message */
241 EM_CTL_ALHD = (1 << 26), /* Activity LED */
242 };
243
244 struct ahci_cmd_hdr {
245 __le32 opts;
246 __le32 status;
247 __le32 tbl_addr;
248 __le32 tbl_addr_hi;
249 __le32 reserved[4];
250 };
251
252 struct ahci_sg {
253 __le32 addr;
254 __le32 addr_hi;
255 __le32 reserved;
256 __le32 flags_size;
257 };
258
259 struct ahci_em_priv {
260 enum sw_activity blink_policy;
261 struct timer_list timer;
262 unsigned long saved_activity;
263 unsigned long activity;
264 unsigned long led_state;
265 };
266
267 struct ahci_host_priv {
268 unsigned int flags; /* AHCI_HFLAG_* */
269 u32 cap; /* cap to use */
270 u32 port_map; /* port map to use */
271 u32 saved_cap; /* saved initial cap */
272 u32 saved_port_map; /* saved initial port_map */
273 u32 em_loc; /* enclosure management location */
274 };
275
276 struct ahci_port_priv {
277 struct ata_link *active_link;
278 struct ahci_cmd_hdr *cmd_slot;
279 dma_addr_t cmd_slot_dma;
280 void *cmd_tbl;
281 dma_addr_t cmd_tbl_dma;
282 void *rx_fis;
283 dma_addr_t rx_fis_dma;
284 /* for NCQ spurious interrupt analysis */
285 unsigned int ncq_saw_d2h:1;
286 unsigned int ncq_saw_dmas:1;
287 unsigned int ncq_saw_sdb:1;
288 u32 intr_mask; /* interrupts to enable */
289 /* enclosure management info per PM slot */
290 struct ahci_em_priv em_priv[EM_MAX_SLOTS];
291 };
292
293 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
294 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
295 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
296 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
297 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
298 static int ahci_port_start(struct ata_port *ap);
299 static void ahci_port_stop(struct ata_port *ap);
300 static void ahci_qc_prep(struct ata_queued_cmd *qc);
301 static void ahci_freeze(struct ata_port *ap);
302 static void ahci_thaw(struct ata_port *ap);
303 static void ahci_pmp_attach(struct ata_port *ap);
304 static void ahci_pmp_detach(struct ata_port *ap);
305 static int ahci_softreset(struct ata_link *link, unsigned int *class,
306 unsigned long deadline);
307 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
308 unsigned long deadline);
309 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
310 unsigned long deadline);
311 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
312 unsigned long deadline);
313 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
314 unsigned long deadline);
315 static void ahci_postreset(struct ata_link *link, unsigned int *class);
316 static void ahci_error_handler(struct ata_port *ap);
317 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
318 static int ahci_port_resume(struct ata_port *ap);
319 static void ahci_dev_config(struct ata_device *dev);
320 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
321 u32 opts);
322 #ifdef CONFIG_PM
323 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
324 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
325 static int ahci_pci_device_resume(struct pci_dev *pdev);
326 #endif
327 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
328 static ssize_t ahci_activity_store(struct ata_device *dev,
329 enum sw_activity val);
330 static void ahci_init_sw_activity(struct ata_link *link);
331
332 static ssize_t ahci_show_host_caps(struct device *dev,
333 struct device_attribute *attr, char *buf);
334 static ssize_t ahci_show_host_version(struct device *dev,
335 struct device_attribute *attr, char *buf);
336 static ssize_t ahci_show_port_cmd(struct device *dev,
337 struct device_attribute *attr, char *buf);
338
339 DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
340 DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
341 DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
342
343 static struct device_attribute *ahci_shost_attrs[] = {
344 &dev_attr_link_power_management_policy,
345 &dev_attr_em_message_type,
346 &dev_attr_em_message,
347 &dev_attr_ahci_host_caps,
348 &dev_attr_ahci_host_version,
349 &dev_attr_ahci_port_cmd,
350 NULL
351 };
352
353 static struct device_attribute *ahci_sdev_attrs[] = {
354 &dev_attr_sw_activity,
355 &dev_attr_unload_heads,
356 NULL
357 };
358
359 static struct scsi_host_template ahci_sht = {
360 ATA_NCQ_SHT(DRV_NAME),
361 .can_queue = AHCI_MAX_CMDS - 1,
362 .sg_tablesize = AHCI_MAX_SG,
363 .dma_boundary = AHCI_DMA_BOUNDARY,
364 .shost_attrs = ahci_shost_attrs,
365 .sdev_attrs = ahci_sdev_attrs,
366 };
367
368 static struct ata_port_operations ahci_ops = {
369 .inherits = &sata_pmp_port_ops,
370
371 .qc_defer = sata_pmp_qc_defer_cmd_switch,
372 .qc_prep = ahci_qc_prep,
373 .qc_issue = ahci_qc_issue,
374 .qc_fill_rtf = ahci_qc_fill_rtf,
375
376 .freeze = ahci_freeze,
377 .thaw = ahci_thaw,
378 .softreset = ahci_softreset,
379 .hardreset = ahci_hardreset,
380 .postreset = ahci_postreset,
381 .pmp_softreset = ahci_softreset,
382 .error_handler = ahci_error_handler,
383 .post_internal_cmd = ahci_post_internal_cmd,
384 .dev_config = ahci_dev_config,
385
386 .scr_read = ahci_scr_read,
387 .scr_write = ahci_scr_write,
388 .pmp_attach = ahci_pmp_attach,
389 .pmp_detach = ahci_pmp_detach,
390
391 .enable_pm = ahci_enable_alpm,
392 .disable_pm = ahci_disable_alpm,
393 .em_show = ahci_led_show,
394 .em_store = ahci_led_store,
395 .sw_activity_show = ahci_activity_show,
396 .sw_activity_store = ahci_activity_store,
397 #ifdef CONFIG_PM
398 .port_suspend = ahci_port_suspend,
399 .port_resume = ahci_port_resume,
400 #endif
401 .port_start = ahci_port_start,
402 .port_stop = ahci_port_stop,
403 };
404
405 static struct ata_port_operations ahci_vt8251_ops = {
406 .inherits = &ahci_ops,
407 .hardreset = ahci_vt8251_hardreset,
408 };
409
410 static struct ata_port_operations ahci_p5wdh_ops = {
411 .inherits = &ahci_ops,
412 .hardreset = ahci_p5wdh_hardreset,
413 };
414
415 static struct ata_port_operations ahci_sb600_ops = {
416 .inherits = &ahci_ops,
417 .softreset = ahci_sb600_softreset,
418 .pmp_softreset = ahci_sb600_softreset,
419 };
420
421 #define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
422
423 static const struct ata_port_info ahci_port_info[] = {
424 [board_ahci] =
425 {
426 .flags = AHCI_FLAG_COMMON,
427 .pio_mask = ATA_PIO4,
428 .udma_mask = ATA_UDMA6,
429 .port_ops = &ahci_ops,
430 },
431 [board_ahci_vt8251] =
432 {
433 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
434 .flags = AHCI_FLAG_COMMON,
435 .pio_mask = ATA_PIO4,
436 .udma_mask = ATA_UDMA6,
437 .port_ops = &ahci_vt8251_ops,
438 },
439 [board_ahci_ign_iferr] =
440 {
441 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
442 .flags = AHCI_FLAG_COMMON,
443 .pio_mask = ATA_PIO4,
444 .udma_mask = ATA_UDMA6,
445 .port_ops = &ahci_ops,
446 },
447 [board_ahci_sb600] =
448 {
449 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
450 AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 |
451 AHCI_HFLAG_32BIT_ONLY),
452 .flags = AHCI_FLAG_COMMON,
453 .pio_mask = ATA_PIO4,
454 .udma_mask = ATA_UDMA6,
455 .port_ops = &ahci_sb600_ops,
456 },
457 [board_ahci_mv] =
458 {
459 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
460 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
461 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
462 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
463 .pio_mask = ATA_PIO4,
464 .udma_mask = ATA_UDMA6,
465 .port_ops = &ahci_ops,
466 },
467 [board_ahci_sb700] = /* for SB700 and SB800 */
468 {
469 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
470 .flags = AHCI_FLAG_COMMON,
471 .pio_mask = ATA_PIO4,
472 .udma_mask = ATA_UDMA6,
473 .port_ops = &ahci_sb600_ops,
474 },
475 [board_ahci_mcp65] =
476 {
477 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
478 .flags = AHCI_FLAG_COMMON,
479 .pio_mask = ATA_PIO4,
480 .udma_mask = ATA_UDMA6,
481 .port_ops = &ahci_ops,
482 },
483 [board_ahci_nopmp] =
484 {
485 AHCI_HFLAGS (AHCI_HFLAG_NO_PMP),
486 .flags = AHCI_FLAG_COMMON,
487 .pio_mask = ATA_PIO4,
488 .udma_mask = ATA_UDMA6,
489 .port_ops = &ahci_ops,
490 },
491 /* board_ahci_yesncq */
492 {
493 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
494 .flags = AHCI_FLAG_COMMON,
495 .pio_mask = ATA_PIO4,
496 .udma_mask = ATA_UDMA6,
497 .port_ops = &ahci_ops,
498 },
499 };
500
501 static const struct pci_device_id ahci_pci_tbl[] = {
502 /* Intel */
503 { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
504 { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
505 { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
506 { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
507 { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
508 { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
509 { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
510 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
511 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
512 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
513 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
514 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
515 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
516 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
517 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
518 { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
519 { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
520 { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
521 { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
522 { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
523 { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
524 { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
525 { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
526 { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
527 { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
528 { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
529 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
530 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
531 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
532 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
533 { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */
534 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
535 { PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */
536 { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
537 { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
538 { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
539 { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */
540 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
541 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
542 { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
543
544 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
545 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
546 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
547
548 /* ATI */
549 { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
550 { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
551 { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
552 { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
553 { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
554 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
555 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
556
557 /* AMD */
558 { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD SB900 */
559 /* AMD is using RAID class only for ahci controllers */
560 { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
561 PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
562
563 /* VIA */
564 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
565 { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
566
567 /* NVIDIA */
568 { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */
569 { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */
570 { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */
571 { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */
572 { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */
573 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
574 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
575 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
576 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq }, /* MCP67 */
577 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq }, /* MCP67 */
578 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq }, /* MCP67 */
579 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq }, /* MCP67 */
580 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq }, /* MCP67 */
581 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq }, /* MCP67 */
582 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq }, /* MCP67 */
583 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq }, /* MCP67 */
584 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq }, /* MCP67 */
585 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */
586 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */
587 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */
588 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */
589 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */
590 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */
591 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq }, /* MCP73 */
592 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq }, /* MCP73 */
593 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq }, /* MCP73 */
594 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq }, /* MCP73 */
595 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq }, /* MCP73 */
596 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq }, /* MCP73 */
597 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq }, /* MCP73 */
598 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq }, /* MCP73 */
599 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq }, /* MCP73 */
600 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
601 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
602 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
603 { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */
604 { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */
605 { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */
606 { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */
607 { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */
608 { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */
609 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
610 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
611 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
612 { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
613 { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
614 { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
615 { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
616 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
617 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
618 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
619 { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
620 { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
621 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
622 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
623 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
624 { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci }, /* MCP89 */
625 { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci }, /* MCP89 */
626 { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci }, /* MCP89 */
627 { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci }, /* MCP89 */
628 { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci }, /* MCP89 */
629 { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci }, /* MCP89 */
630 { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci }, /* MCP89 */
631 { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci }, /* MCP89 */
632 { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci }, /* MCP89 */
633 { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci }, /* MCP89 */
634 { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci }, /* MCP89 */
635 { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci }, /* MCP89 */
636
637 /* SiS */
638 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
639 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
640 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
641
642 /* Marvell */
643 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
644 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
645
646 /* Promise */
647 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
648
649 /* Generic, PCI class code for AHCI */
650 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
651 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
652
653 { } /* terminate list */
654 };
655
656
657 static struct pci_driver ahci_pci_driver = {
658 .name = DRV_NAME,
659 .id_table = ahci_pci_tbl,
660 .probe = ahci_init_one,
661 .remove = ata_pci_remove_one,
662 #ifdef CONFIG_PM
663 .suspend = ahci_pci_device_suspend,
664 .resume = ahci_pci_device_resume,
665 #endif
666 };
667
668 static int ahci_em_messages = 1;
669 module_param(ahci_em_messages, int, 0444);
670 /* add other LED protocol types when they become supported */
671 MODULE_PARM_DESC(ahci_em_messages,
672 "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
673
674 #if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
675 static int marvell_enable;
676 #else
677 static int marvell_enable = 1;
678 #endif
679 module_param(marvell_enable, int, 0644);
680 MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
681
682
683 static inline int ahci_nr_ports(u32 cap)
684 {
685 return (cap & 0x1f) + 1;
686 }
687
688 static inline void __iomem *__ahci_port_base(struct ata_host *host,
689 unsigned int port_no)
690 {
691 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
692
693 return mmio + 0x100 + (port_no * 0x80);
694 }
695
696 static inline void __iomem *ahci_port_base(struct ata_port *ap)
697 {
698 return __ahci_port_base(ap->host, ap->port_no);
699 }
700
701 static void ahci_enable_ahci(void __iomem *mmio)
702 {
703 int i;
704 u32 tmp;
705
706 /* turn on AHCI_EN */
707 tmp = readl(mmio + HOST_CTL);
708 if (tmp & HOST_AHCI_EN)
709 return;
710
711 /* Some controllers need AHCI_EN to be written multiple times.
712 * Try a few times before giving up.
713 */
714 for (i = 0; i < 5; i++) {
715 tmp |= HOST_AHCI_EN;
716 writel(tmp, mmio + HOST_CTL);
717 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
718 if (tmp & HOST_AHCI_EN)
719 return;
720 msleep(10);
721 }
722
723 WARN_ON(1);
724 }
725
726 static ssize_t ahci_show_host_caps(struct device *dev,
727 struct device_attribute *attr, char *buf)
728 {
729 struct Scsi_Host *shost = class_to_shost(dev);
730 struct ata_port *ap = ata_shost_to_port(shost);
731 struct ahci_host_priv *hpriv = ap->host->private_data;
732
733 return sprintf(buf, "%x\n", hpriv->cap);
734 }
735
736 static ssize_t ahci_show_host_version(struct device *dev,
737 struct device_attribute *attr, char *buf)
738 {
739 struct Scsi_Host *shost = class_to_shost(dev);
740 struct ata_port *ap = ata_shost_to_port(shost);
741 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
742
743 return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
744 }
745
746 static ssize_t ahci_show_port_cmd(struct device *dev,
747 struct device_attribute *attr, char *buf)
748 {
749 struct Scsi_Host *shost = class_to_shost(dev);
750 struct ata_port *ap = ata_shost_to_port(shost);
751 void __iomem *port_mmio = ahci_port_base(ap);
752
753 return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
754 }
755
756 /**
757 * ahci_save_initial_config - Save and fixup initial config values
758 * @pdev: target PCI device
759 * @hpriv: host private area to store config values
760 *
761 * Some registers containing configuration info might be setup by
762 * BIOS and might be cleared on reset. This function saves the
763 * initial values of those registers into @hpriv such that they
764 * can be restored after controller reset.
765 *
766 * If inconsistent, config values are fixed up by this function.
767 *
768 * LOCKING:
769 * None.
770 */
771 static void ahci_save_initial_config(struct pci_dev *pdev,
772 struct ahci_host_priv *hpriv)
773 {
774 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
775 u32 cap, port_map;
776 int i;
777 int mv;
778
779 /* make sure AHCI mode is enabled before accessing CAP */
780 ahci_enable_ahci(mmio);
781
782 /* Values prefixed with saved_ are written back to host after
783 * reset. Values without are used for driver operation.
784 */
785 hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
786 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
787
788 /* some chips have errata preventing 64bit use */
789 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
790 dev_printk(KERN_INFO, &pdev->dev,
791 "controller can't do 64bit DMA, forcing 32bit\n");
792 cap &= ~HOST_CAP_64;
793 }
794
795 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
796 dev_printk(KERN_INFO, &pdev->dev,
797 "controller can't do NCQ, turning off CAP_NCQ\n");
798 cap &= ~HOST_CAP_NCQ;
799 }
800
801 if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
802 dev_printk(KERN_INFO, &pdev->dev,
803 "controller can do NCQ, turning on CAP_NCQ\n");
804 cap |= HOST_CAP_NCQ;
805 }
806
807 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
808 dev_printk(KERN_INFO, &pdev->dev,
809 "controller can't do PMP, turning off CAP_PMP\n");
810 cap &= ~HOST_CAP_PMP;
811 }
812
813 if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
814 port_map != 1) {
815 dev_printk(KERN_INFO, &pdev->dev,
816 "JMB361 has only one port, port_map 0x%x -> 0x%x\n",
817 port_map, 1);
818 port_map = 1;
819 }
820
821 /*
822 * Temporary Marvell 6145 hack: PATA port presence
823 * is asserted through the standard AHCI port
824 * presence register, as bit 4 (counting from 0)
825 */
826 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
827 if (pdev->device == 0x6121)
828 mv = 0x3;
829 else
830 mv = 0xf;
831 dev_printk(KERN_ERR, &pdev->dev,
832 "MV_AHCI HACK: port_map %x -> %x\n",
833 port_map,
834 port_map & mv);
835 dev_printk(KERN_ERR, &pdev->dev,
836 "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
837
838 port_map &= mv;
839 }
840
841 /* cross check port_map and cap.n_ports */
842 if (port_map) {
843 int map_ports = 0;
844
845 for (i = 0; i < AHCI_MAX_PORTS; i++)
846 if (port_map & (1 << i))
847 map_ports++;
848
849 /* If PI has more ports than n_ports, whine, clear
850 * port_map and let it be generated from n_ports.
851 */
852 if (map_ports > ahci_nr_ports(cap)) {
853 dev_printk(KERN_WARNING, &pdev->dev,
854 "implemented port map (0x%x) contains more "
855 "ports than nr_ports (%u), using nr_ports\n",
856 port_map, ahci_nr_ports(cap));
857 port_map = 0;
858 }
859 }
860
861 /* fabricate port_map from cap.nr_ports */
862 if (!port_map) {
863 port_map = (1 << ahci_nr_ports(cap)) - 1;
864 dev_printk(KERN_WARNING, &pdev->dev,
865 "forcing PORTS_IMPL to 0x%x\n", port_map);
866
867 /* write the fixed up value to the PI register */
868 hpriv->saved_port_map = port_map;
869 }
870
871 /* record values to use during operation */
872 hpriv->cap = cap;
873 hpriv->port_map = port_map;
874 }
875
876 /**
877 * ahci_restore_initial_config - Restore initial config
878 * @host: target ATA host
879 *
880 * Restore initial config stored by ahci_save_initial_config().
881 *
882 * LOCKING:
883 * None.
884 */
885 static void ahci_restore_initial_config(struct ata_host *host)
886 {
887 struct ahci_host_priv *hpriv = host->private_data;
888 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
889
890 writel(hpriv->saved_cap, mmio + HOST_CAP);
891 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
892 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
893 }
894
895 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
896 {
897 static const int offset[] = {
898 [SCR_STATUS] = PORT_SCR_STAT,
899 [SCR_CONTROL] = PORT_SCR_CTL,
900 [SCR_ERROR] = PORT_SCR_ERR,
901 [SCR_ACTIVE] = PORT_SCR_ACT,
902 [SCR_NOTIFICATION] = PORT_SCR_NTF,
903 };
904 struct ahci_host_priv *hpriv = ap->host->private_data;
905
906 if (sc_reg < ARRAY_SIZE(offset) &&
907 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
908 return offset[sc_reg];
909 return 0;
910 }
911
912 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
913 {
914 void __iomem *port_mmio = ahci_port_base(link->ap);
915 int offset = ahci_scr_offset(link->ap, sc_reg);
916
917 if (offset) {
918 *val = readl(port_mmio + offset);
919 return 0;
920 }
921 return -EINVAL;
922 }
923
924 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
925 {
926 void __iomem *port_mmio = ahci_port_base(link->ap);
927 int offset = ahci_scr_offset(link->ap, sc_reg);
928
929 if (offset) {
930 writel(val, port_mmio + offset);
931 return 0;
932 }
933 return -EINVAL;
934 }
935
936 static void ahci_start_engine(struct ata_port *ap)
937 {
938 void __iomem *port_mmio = ahci_port_base(ap);
939 u32 tmp;
940
941 /* start DMA */
942 tmp = readl(port_mmio + PORT_CMD);
943 tmp |= PORT_CMD_START;
944 writel(tmp, port_mmio + PORT_CMD);
945 readl(port_mmio + PORT_CMD); /* flush */
946 }
947
948 static int ahci_stop_engine(struct ata_port *ap)
949 {
950 void __iomem *port_mmio = ahci_port_base(ap);
951 u32 tmp;
952
953 tmp = readl(port_mmio + PORT_CMD);
954
955 /* check if the HBA is idle */
956 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
957 return 0;
958
959 /* setting HBA to idle */
960 tmp &= ~PORT_CMD_START;
961 writel(tmp, port_mmio + PORT_CMD);
962
963 /* wait for engine to stop. This could be as long as 500 msec */
964 tmp = ata_wait_register(port_mmio + PORT_CMD,
965 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
966 if (tmp & PORT_CMD_LIST_ON)
967 return -EIO;
968
969 return 0;
970 }
971
972 static void ahci_start_fis_rx(struct ata_port *ap)
973 {
974 void __iomem *port_mmio = ahci_port_base(ap);
975 struct ahci_host_priv *hpriv = ap->host->private_data;
976 struct ahci_port_priv *pp = ap->private_data;
977 u32 tmp;
978
979 /* set FIS registers */
980 if (hpriv->cap & HOST_CAP_64)
981 writel((pp->cmd_slot_dma >> 16) >> 16,
982 port_mmio + PORT_LST_ADDR_HI);
983 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
984
985 if (hpriv->cap & HOST_CAP_64)
986 writel((pp->rx_fis_dma >> 16) >> 16,
987 port_mmio + PORT_FIS_ADDR_HI);
988 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
989
990 /* enable FIS reception */
991 tmp = readl(port_mmio + PORT_CMD);
992 tmp |= PORT_CMD_FIS_RX;
993 writel(tmp, port_mmio + PORT_CMD);
994
995 /* flush */
996 readl(port_mmio + PORT_CMD);
997 }
998
999 static int ahci_stop_fis_rx(struct ata_port *ap)
1000 {
1001 void __iomem *port_mmio = ahci_port_base(ap);
1002 u32 tmp;
1003
1004 /* disable FIS reception */
1005 tmp = readl(port_mmio + PORT_CMD);
1006 tmp &= ~PORT_CMD_FIS_RX;
1007 writel(tmp, port_mmio + PORT_CMD);
1008
1009 /* wait for completion, spec says 500ms, give it 1000 */
1010 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
1011 PORT_CMD_FIS_ON, 10, 1000);
1012 if (tmp & PORT_CMD_FIS_ON)
1013 return -EBUSY;
1014
1015 return 0;
1016 }
1017
1018 static void ahci_power_up(struct ata_port *ap)
1019 {
1020 struct ahci_host_priv *hpriv = ap->host->private_data;
1021 void __iomem *port_mmio = ahci_port_base(ap);
1022 u32 cmd;
1023
1024 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1025
1026 /* spin up device */
1027 if (hpriv->cap & HOST_CAP_SSS) {
1028 cmd |= PORT_CMD_SPIN_UP;
1029 writel(cmd, port_mmio + PORT_CMD);
1030 }
1031
1032 /* wake up link */
1033 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
1034 }
1035
1036 static void ahci_disable_alpm(struct ata_port *ap)
1037 {
1038 struct ahci_host_priv *hpriv = ap->host->private_data;
1039 void __iomem *port_mmio = ahci_port_base(ap);
1040 u32 cmd;
1041 struct ahci_port_priv *pp = ap->private_data;
1042
1043 /* IPM bits should be disabled by libata-core */
1044 /* get the existing command bits */
1045 cmd = readl(port_mmio + PORT_CMD);
1046
1047 /* disable ALPM and ASP */
1048 cmd &= ~PORT_CMD_ASP;
1049 cmd &= ~PORT_CMD_ALPE;
1050
1051 /* force the interface back to active */
1052 cmd |= PORT_CMD_ICC_ACTIVE;
1053
1054 /* write out new cmd value */
1055 writel(cmd, port_mmio + PORT_CMD);
1056 cmd = readl(port_mmio + PORT_CMD);
1057
1058 /* wait 10ms to be sure we've come out of any low power state */
1059 msleep(10);
1060
1061 /* clear out any PhyRdy stuff from interrupt status */
1062 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
1063
1064 /* go ahead and clean out PhyRdy Change from Serror too */
1065 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
1066
1067 /*
1068 * Clear flag to indicate that we should ignore all PhyRdy
1069 * state changes
1070 */
1071 hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
1072
1073 /*
1074 * Enable interrupts on Phy Ready.
1075 */
1076 pp->intr_mask |= PORT_IRQ_PHYRDY;
1077 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1078
1079 /*
1080 * don't change the link pm policy - we can be called
1081 * just to turn of link pm temporarily
1082 */
1083 }
1084
1085 static int ahci_enable_alpm(struct ata_port *ap,
1086 enum link_pm policy)
1087 {
1088 struct ahci_host_priv *hpriv = ap->host->private_data;
1089 void __iomem *port_mmio = ahci_port_base(ap);
1090 u32 cmd;
1091 struct ahci_port_priv *pp = ap->private_data;
1092 u32 asp;
1093
1094 /* Make sure the host is capable of link power management */
1095 if (!(hpriv->cap & HOST_CAP_ALPM))
1096 return -EINVAL;
1097
1098 switch (policy) {
1099 case MAX_PERFORMANCE:
1100 case NOT_AVAILABLE:
1101 /*
1102 * if we came here with NOT_AVAILABLE,
1103 * it just means this is the first time we
1104 * have tried to enable - default to max performance,
1105 * and let the user go to lower power modes on request.
1106 */
1107 ahci_disable_alpm(ap);
1108 return 0;
1109 case MIN_POWER:
1110 /* configure HBA to enter SLUMBER */
1111 asp = PORT_CMD_ASP;
1112 break;
1113 case MEDIUM_POWER:
1114 /* configure HBA to enter PARTIAL */
1115 asp = 0;
1116 break;
1117 default:
1118 return -EINVAL;
1119 }
1120
1121 /*
1122 * Disable interrupts on Phy Ready. This keeps us from
1123 * getting woken up due to spurious phy ready interrupts
1124 * TBD - Hot plug should be done via polling now, is
1125 * that even supported?
1126 */
1127 pp->intr_mask &= ~PORT_IRQ_PHYRDY;
1128 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1129
1130 /*
1131 * Set a flag to indicate that we should ignore all PhyRdy
1132 * state changes since these can happen now whenever we
1133 * change link state
1134 */
1135 hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
1136
1137 /* get the existing command bits */
1138 cmd = readl(port_mmio + PORT_CMD);
1139
1140 /*
1141 * Set ASP based on Policy
1142 */
1143 cmd |= asp;
1144
1145 /*
1146 * Setting this bit will instruct the HBA to aggressively
1147 * enter a lower power link state when it's appropriate and
1148 * based on the value set above for ASP
1149 */
1150 cmd |= PORT_CMD_ALPE;
1151
1152 /* write out new cmd value */
1153 writel(cmd, port_mmio + PORT_CMD);
1154 cmd = readl(port_mmio + PORT_CMD);
1155
1156 /* IPM bits should be set by libata-core */
1157 return 0;
1158 }
1159
1160 #ifdef CONFIG_PM
1161 static void ahci_power_down(struct ata_port *ap)
1162 {
1163 struct ahci_host_priv *hpriv = ap->host->private_data;
1164 void __iomem *port_mmio = ahci_port_base(ap);
1165 u32 cmd, scontrol;
1166
1167 if (!(hpriv->cap & HOST_CAP_SSS))
1168 return;
1169
1170 /* put device into listen mode, first set PxSCTL.DET to 0 */
1171 scontrol = readl(port_mmio + PORT_SCR_CTL);
1172 scontrol &= ~0xf;
1173 writel(scontrol, port_mmio + PORT_SCR_CTL);
1174
1175 /* then set PxCMD.SUD to 0 */
1176 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1177 cmd &= ~PORT_CMD_SPIN_UP;
1178 writel(cmd, port_mmio + PORT_CMD);
1179 }
1180 #endif
1181
1182 static void ahci_start_port(struct ata_port *ap)
1183 {
1184 struct ahci_port_priv *pp = ap->private_data;
1185 struct ata_link *link;
1186 struct ahci_em_priv *emp;
1187 ssize_t rc;
1188 int i;
1189
1190 /* enable FIS reception */
1191 ahci_start_fis_rx(ap);
1192
1193 /* enable DMA */
1194 ahci_start_engine(ap);
1195
1196 /* turn on LEDs */
1197 if (ap->flags & ATA_FLAG_EM) {
1198 ata_for_each_link(link, ap, EDGE) {
1199 emp = &pp->em_priv[link->pmp];
1200
1201 /* EM Transmit bit maybe busy during init */
1202 for (i = 0; i < EM_MAX_RETRY; i++) {
1203 rc = ahci_transmit_led_message(ap,
1204 emp->led_state,
1205 4);
1206 if (rc == -EBUSY)
1207 msleep(1);
1208 else
1209 break;
1210 }
1211 }
1212 }
1213
1214 if (ap->flags & ATA_FLAG_SW_ACTIVITY)
1215 ata_for_each_link(link, ap, EDGE)
1216 ahci_init_sw_activity(link);
1217
1218 }
1219
1220 static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
1221 {
1222 int rc;
1223
1224 /* disable DMA */
1225 rc = ahci_stop_engine(ap);
1226 if (rc) {
1227 *emsg = "failed to stop engine";
1228 return rc;
1229 }
1230
1231 /* disable FIS reception */
1232 rc = ahci_stop_fis_rx(ap);
1233 if (rc) {
1234 *emsg = "failed stop FIS RX";
1235 return rc;
1236 }
1237
1238 return 0;
1239 }
1240
1241 static int ahci_reset_controller(struct ata_host *host)
1242 {
1243 struct pci_dev *pdev = to_pci_dev(host->dev);
1244 struct ahci_host_priv *hpriv = host->private_data;
1245 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1246 u32 tmp;
1247
1248 /* we must be in AHCI mode, before using anything
1249 * AHCI-specific, such as HOST_RESET.
1250 */
1251 ahci_enable_ahci(mmio);
1252
1253 /* global controller reset */
1254 if (!ahci_skip_host_reset) {
1255 tmp = readl(mmio + HOST_CTL);
1256 if ((tmp & HOST_RESET) == 0) {
1257 writel(tmp | HOST_RESET, mmio + HOST_CTL);
1258 readl(mmio + HOST_CTL); /* flush */
1259 }
1260
1261 /*
1262 * to perform host reset, OS should set HOST_RESET
1263 * and poll until this bit is read to be "0".
1264 * reset must complete within 1 second, or
1265 * the hardware should be considered fried.
1266 */
1267 tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
1268 HOST_RESET, 10, 1000);
1269
1270 if (tmp & HOST_RESET) {
1271 dev_printk(KERN_ERR, host->dev,
1272 "controller reset failed (0x%x)\n", tmp);
1273 return -EIO;
1274 }
1275
1276 /* turn on AHCI mode */
1277 ahci_enable_ahci(mmio);
1278
1279 /* Some registers might be cleared on reset. Restore
1280 * initial values.
1281 */
1282 ahci_restore_initial_config(host);
1283 } else
1284 dev_printk(KERN_INFO, host->dev,
1285 "skipping global host reset\n");
1286
1287 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
1288 u16 tmp16;
1289
1290 /* configure PCS */
1291 pci_read_config_word(pdev, 0x92, &tmp16);
1292 if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
1293 tmp16 |= hpriv->port_map;
1294 pci_write_config_word(pdev, 0x92, tmp16);
1295 }
1296 }
1297
1298 return 0;
1299 }
1300
1301 static void ahci_sw_activity(struct ata_link *link)
1302 {
1303 struct ata_port *ap = link->ap;
1304 struct ahci_port_priv *pp = ap->private_data;
1305 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1306
1307 if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
1308 return;
1309
1310 emp->activity++;
1311 if (!timer_pending(&emp->timer))
1312 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
1313 }
1314
1315 static void ahci_sw_activity_blink(unsigned long arg)
1316 {
1317 struct ata_link *link = (struct ata_link *)arg;
1318 struct ata_port *ap = link->ap;
1319 struct ahci_port_priv *pp = ap->private_data;
1320 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1321 unsigned long led_message = emp->led_state;
1322 u32 activity_led_state;
1323 unsigned long flags;
1324
1325 led_message &= EM_MSG_LED_VALUE;
1326 led_message |= ap->port_no | (link->pmp << 8);
1327
1328 /* check to see if we've had activity. If so,
1329 * toggle state of LED and reset timer. If not,
1330 * turn LED to desired idle state.
1331 */
1332 spin_lock_irqsave(ap->lock, flags);
1333 if (emp->saved_activity != emp->activity) {
1334 emp->saved_activity = emp->activity;
1335 /* get the current LED state */
1336 activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
1337
1338 if (activity_led_state)
1339 activity_led_state = 0;
1340 else
1341 activity_led_state = 1;
1342
1343 /* clear old state */
1344 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1345
1346 /* toggle state */
1347 led_message |= (activity_led_state << 16);
1348 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
1349 } else {
1350 /* switch to idle */
1351 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1352 if (emp->blink_policy == BLINK_OFF)
1353 led_message |= (1 << 16);
1354 }
1355 spin_unlock_irqrestore(ap->lock, flags);
1356 ahci_transmit_led_message(ap, led_message, 4);
1357 }
1358
1359 static void ahci_init_sw_activity(struct ata_link *link)
1360 {
1361 struct ata_port *ap = link->ap;
1362 struct ahci_port_priv *pp = ap->private_data;
1363 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1364
1365 /* init activity stats, setup timer */
1366 emp->saved_activity = emp->activity = 0;
1367 setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
1368
1369 /* check our blink policy and set flag for link if it's enabled */
1370 if (emp->blink_policy)
1371 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1372 }
1373
1374 static int ahci_reset_em(struct ata_host *host)
1375 {
1376 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1377 u32 em_ctl;
1378
1379 em_ctl = readl(mmio + HOST_EM_CTL);
1380 if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
1381 return -EINVAL;
1382
1383 writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
1384 return 0;
1385 }
1386
1387 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1388 ssize_t size)
1389 {
1390 struct ahci_host_priv *hpriv = ap->host->private_data;
1391 struct ahci_port_priv *pp = ap->private_data;
1392 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1393 u32 em_ctl;
1394 u32 message[] = {0, 0};
1395 unsigned long flags;
1396 int pmp;
1397 struct ahci_em_priv *emp;
1398
1399 /* get the slot number from the message */
1400 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1401 if (pmp < EM_MAX_SLOTS)
1402 emp = &pp->em_priv[pmp];
1403 else
1404 return -EINVAL;
1405
1406 spin_lock_irqsave(ap->lock, flags);
1407
1408 /*
1409 * if we are still busy transmitting a previous message,
1410 * do not allow
1411 */
1412 em_ctl = readl(mmio + HOST_EM_CTL);
1413 if (em_ctl & EM_CTL_TM) {
1414 spin_unlock_irqrestore(ap->lock, flags);
1415 return -EBUSY;
1416 }
1417
1418 /*
1419 * create message header - this is all zero except for
1420 * the message size, which is 4 bytes.
1421 */
1422 message[0] |= (4 << 8);
1423
1424 /* ignore 0:4 of byte zero, fill in port info yourself */
1425 message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
1426
1427 /* write message to EM_LOC */
1428 writel(message[0], mmio + hpriv->em_loc);
1429 writel(message[1], mmio + hpriv->em_loc+4);
1430
1431 /* save off new led state for port/slot */
1432 emp->led_state = state;
1433
1434 /*
1435 * tell hardware to transmit the message
1436 */
1437 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1438
1439 spin_unlock_irqrestore(ap->lock, flags);
1440 return size;
1441 }
1442
1443 static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1444 {
1445 struct ahci_port_priv *pp = ap->private_data;
1446 struct ata_link *link;
1447 struct ahci_em_priv *emp;
1448 int rc = 0;
1449
1450 ata_for_each_link(link, ap, EDGE) {
1451 emp = &pp->em_priv[link->pmp];
1452 rc += sprintf(buf, "%lx\n", emp->led_state);
1453 }
1454 return rc;
1455 }
1456
1457 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1458 size_t size)
1459 {
1460 int state;
1461 int pmp;
1462 struct ahci_port_priv *pp = ap->private_data;
1463 struct ahci_em_priv *emp;
1464
1465 state = simple_strtoul(buf, NULL, 0);
1466
1467 /* get the slot number from the message */
1468 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1469 if (pmp < EM_MAX_SLOTS)
1470 emp = &pp->em_priv[pmp];
1471 else
1472 return -EINVAL;
1473
1474 /* mask off the activity bits if we are in sw_activity
1475 * mode, user should turn off sw_activity before setting
1476 * activity led through em_message
1477 */
1478 if (emp->blink_policy)
1479 state &= ~EM_MSG_LED_VALUE_ACTIVITY;
1480
1481 return ahci_transmit_led_message(ap, state, size);
1482 }
1483
1484 static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1485 {
1486 struct ata_link *link = dev->link;
1487 struct ata_port *ap = link->ap;
1488 struct ahci_port_priv *pp = ap->private_data;
1489 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1490 u32 port_led_state = emp->led_state;
1491
1492 /* save the desired Activity LED behavior */
1493 if (val == OFF) {
1494 /* clear LFLAG */
1495 link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1496
1497 /* set the LED to OFF */
1498 port_led_state &= EM_MSG_LED_VALUE_OFF;
1499 port_led_state |= (ap->port_no | (link->pmp << 8));
1500 ahci_transmit_led_message(ap, port_led_state, 4);
1501 } else {
1502 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1503 if (val == BLINK_OFF) {
1504 /* set LED to ON for idle */
1505 port_led_state &= EM_MSG_LED_VALUE_OFF;
1506 port_led_state |= (ap->port_no | (link->pmp << 8));
1507 port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
1508 ahci_transmit_led_message(ap, port_led_state, 4);
1509 }
1510 }
1511 emp->blink_policy = val;
1512 return 0;
1513 }
1514
1515 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1516 {
1517 struct ata_link *link = dev->link;
1518 struct ata_port *ap = link->ap;
1519 struct ahci_port_priv *pp = ap->private_data;
1520 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1521
1522 /* display the saved value of activity behavior for this
1523 * disk.
1524 */
1525 return sprintf(buf, "%d\n", emp->blink_policy);
1526 }
1527
1528 static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
1529 int port_no, void __iomem *mmio,
1530 void __iomem *port_mmio)
1531 {
1532 const char *emsg = NULL;
1533 int rc;
1534 u32 tmp;
1535
1536 /* make sure port is not active */
1537 rc = ahci_deinit_port(ap, &emsg);
1538 if (rc)
1539 dev_printk(KERN_WARNING, &pdev->dev,
1540 "%s (%d)\n", emsg, rc);
1541
1542 /* clear SError */
1543 tmp = readl(port_mmio + PORT_SCR_ERR);
1544 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1545 writel(tmp, port_mmio + PORT_SCR_ERR);
1546
1547 /* clear port IRQ */
1548 tmp = readl(port_mmio + PORT_IRQ_STAT);
1549 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1550 if (tmp)
1551 writel(tmp, port_mmio + PORT_IRQ_STAT);
1552
1553 writel(1 << port_no, mmio + HOST_IRQ_STAT);
1554 }
1555
1556 static void ahci_init_controller(struct ata_host *host)
1557 {
1558 struct ahci_host_priv *hpriv = host->private_data;
1559 struct pci_dev *pdev = to_pci_dev(host->dev);
1560 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1561 int i;
1562 void __iomem *port_mmio;
1563 u32 tmp;
1564 int mv;
1565
1566 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
1567 if (pdev->device == 0x6121)
1568 mv = 2;
1569 else
1570 mv = 4;
1571 port_mmio = __ahci_port_base(host, mv);
1572
1573 writel(0, port_mmio + PORT_IRQ_MASK);
1574
1575 /* clear port IRQ */
1576 tmp = readl(port_mmio + PORT_IRQ_STAT);
1577 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1578 if (tmp)
1579 writel(tmp, port_mmio + PORT_IRQ_STAT);
1580 }
1581
1582 for (i = 0; i < host->n_ports; i++) {
1583 struct ata_port *ap = host->ports[i];
1584
1585 port_mmio = ahci_port_base(ap);
1586 if (ata_port_is_dummy(ap))
1587 continue;
1588
1589 ahci_port_init(pdev, ap, i, mmio, port_mmio);
1590 }
1591
1592 tmp = readl(mmio + HOST_CTL);
1593 VPRINTK("HOST_CTL 0x%x\n", tmp);
1594 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1595 tmp = readl(mmio + HOST_CTL);
1596 VPRINTK("HOST_CTL 0x%x\n", tmp);
1597 }
1598
1599 static void ahci_dev_config(struct ata_device *dev)
1600 {
1601 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1602
1603 if (hpriv->flags & AHCI_HFLAG_SECT255) {
1604 dev->max_sectors = 255;
1605 ata_dev_printk(dev, KERN_INFO,
1606 "SB600 AHCI: limiting to 255 sectors per cmd\n");
1607 }
1608 }
1609
1610 static unsigned int ahci_dev_classify(struct ata_port *ap)
1611 {
1612 void __iomem *port_mmio = ahci_port_base(ap);
1613 struct ata_taskfile tf;
1614 u32 tmp;
1615
1616 tmp = readl(port_mmio + PORT_SIG);
1617 tf.lbah = (tmp >> 24) & 0xff;
1618 tf.lbam = (tmp >> 16) & 0xff;
1619 tf.lbal = (tmp >> 8) & 0xff;
1620 tf.nsect = (tmp) & 0xff;
1621
1622 return ata_dev_classify(&tf);
1623 }
1624
1625 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1626 u32 opts)
1627 {
1628 dma_addr_t cmd_tbl_dma;
1629
1630 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1631
1632 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1633 pp->cmd_slot[tag].status = 0;
1634 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1635 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1636 }
1637
1638 static int ahci_kick_engine(struct ata_port *ap)
1639 {
1640 void __iomem *port_mmio = ahci_port_base(ap);
1641 struct ahci_host_priv *hpriv = ap->host->private_data;
1642 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1643 u32 tmp;
1644 int busy, rc;
1645
1646 /* stop engine */
1647 rc = ahci_stop_engine(ap);
1648 if (rc)
1649 goto out_restart;
1650
1651 /* need to do CLO?
1652 * always do CLO if PMP is attached (AHCI-1.3 9.2)
1653 */
1654 busy = status & (ATA_BUSY | ATA_DRQ);
1655 if (!busy && !sata_pmp_attached(ap)) {
1656 rc = 0;
1657 goto out_restart;
1658 }
1659
1660 if (!(hpriv->cap & HOST_CAP_CLO)) {
1661 rc = -EOPNOTSUPP;
1662 goto out_restart;
1663 }
1664
1665 /* perform CLO */
1666 tmp = readl(port_mmio + PORT_CMD);
1667 tmp |= PORT_CMD_CLO;
1668 writel(tmp, port_mmio + PORT_CMD);
1669
1670 rc = 0;
1671 tmp = ata_wait_register(port_mmio + PORT_CMD,
1672 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1673 if (tmp & PORT_CMD_CLO)
1674 rc = -EIO;
1675
1676 /* restart engine */
1677 out_restart:
1678 ahci_start_engine(ap);
1679 return rc;
1680 }
1681
1682 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1683 struct ata_taskfile *tf, int is_cmd, u16 flags,
1684 unsigned long timeout_msec)
1685 {
1686 const u32 cmd_fis_len = 5; /* five dwords */
1687 struct ahci_port_priv *pp = ap->private_data;
1688 void __iomem *port_mmio = ahci_port_base(ap);
1689 u8 *fis = pp->cmd_tbl;
1690 u32 tmp;
1691
1692 /* prep the command */
1693 ata_tf_to_fis(tf, pmp, is_cmd, fis);
1694 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1695
1696 /* issue & wait */
1697 writel(1, port_mmio + PORT_CMD_ISSUE);
1698
1699 if (timeout_msec) {
1700 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1701 1, timeout_msec);
1702 if (tmp & 0x1) {
1703 ahci_kick_engine(ap);
1704 return -EBUSY;
1705 }
1706 } else
1707 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1708
1709 return 0;
1710 }
1711
1712 static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1713 int pmp, unsigned long deadline,
1714 int (*check_ready)(struct ata_link *link))
1715 {
1716 struct ata_port *ap = link->ap;
1717 struct ahci_host_priv *hpriv = ap->host->private_data;
1718 const char *reason = NULL;
1719 unsigned long now, msecs;
1720 struct ata_taskfile tf;
1721 int rc;
1722
1723 DPRINTK("ENTER\n");
1724
1725 /* prepare for SRST (AHCI-1.1 10.4.1) */
1726 rc = ahci_kick_engine(ap);
1727 if (rc && rc != -EOPNOTSUPP)
1728 ata_link_printk(link, KERN_WARNING,
1729 "failed to reset engine (errno=%d)\n", rc);
1730
1731 ata_tf_init(link->device, &tf);
1732
1733 /* issue the first D2H Register FIS */
1734 msecs = 0;
1735 now = jiffies;
1736 if (time_after(now, deadline))
1737 msecs = jiffies_to_msecs(deadline - now);
1738
1739 tf.ctl |= ATA_SRST;
1740 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1741 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1742 rc = -EIO;
1743 reason = "1st FIS failed";
1744 goto fail;
1745 }
1746
1747 /* spec says at least 5us, but be generous and sleep for 1ms */
1748 msleep(1);
1749
1750 /* issue the second D2H Register FIS */
1751 tf.ctl &= ~ATA_SRST;
1752 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1753
1754 /* wait for link to become ready */
1755 rc = ata_wait_after_reset(link, deadline, check_ready);
1756 if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
1757 /*
1758 * Workaround for cases where link online status can't
1759 * be trusted. Treat device readiness timeout as link
1760 * offline.
1761 */
1762 ata_link_printk(link, KERN_INFO,
1763 "device not ready, treating as offline\n");
1764 *class = ATA_DEV_NONE;
1765 } else if (rc) {
1766 /* link occupied, -ENODEV too is an error */
1767 reason = "device not ready";
1768 goto fail;
1769 } else
1770 *class = ahci_dev_classify(ap);
1771
1772 DPRINTK("EXIT, class=%u\n", *class);
1773 return 0;
1774
1775 fail:
1776 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1777 return rc;
1778 }
1779
1780 static int ahci_check_ready(struct ata_link *link)
1781 {
1782 void __iomem *port_mmio = ahci_port_base(link->ap);
1783 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1784
1785 return ata_check_ready(status);
1786 }
1787
1788 static int ahci_softreset(struct ata_link *link, unsigned int *class,
1789 unsigned long deadline)
1790 {
1791 int pmp = sata_srst_pmp(link);
1792
1793 DPRINTK("ENTER\n");
1794
1795 return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1796 }
1797
1798 static int ahci_sb600_check_ready(struct ata_link *link)
1799 {
1800 void __iomem *port_mmio = ahci_port_base(link->ap);
1801 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1802 u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
1803
1804 /*
1805 * There is no need to check TFDATA if BAD PMP is found due to HW bug,
1806 * which can save timeout delay.
1807 */
1808 if (irq_status & PORT_IRQ_BAD_PMP)
1809 return -EIO;
1810
1811 return ata_check_ready(status);
1812 }
1813
1814 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
1815 unsigned long deadline)
1816 {
1817 struct ata_port *ap = link->ap;
1818 void __iomem *port_mmio = ahci_port_base(ap);
1819 int pmp = sata_srst_pmp(link);
1820 int rc;
1821 u32 irq_sts;
1822
1823 DPRINTK("ENTER\n");
1824
1825 rc = ahci_do_softreset(link, class, pmp, deadline,
1826 ahci_sb600_check_ready);
1827
1828 /*
1829 * Soft reset fails on some ATI chips with IPMS set when PMP
1830 * is enabled but SATA HDD/ODD is connected to SATA port,
1831 * do soft reset again to port 0.
1832 */
1833 if (rc == -EIO) {
1834 irq_sts = readl(port_mmio + PORT_IRQ_STAT);
1835 if (irq_sts & PORT_IRQ_BAD_PMP) {
1836 ata_link_printk(link, KERN_WARNING,
1837 "applying SB600 PMP SRST workaround "
1838 "and retrying\n");
1839 rc = ahci_do_softreset(link, class, 0, deadline,
1840 ahci_check_ready);
1841 }
1842 }
1843
1844 return rc;
1845 }
1846
1847 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1848 unsigned long deadline)
1849 {
1850 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1851 struct ata_port *ap = link->ap;
1852 struct ahci_port_priv *pp = ap->private_data;
1853 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1854 struct ata_taskfile tf;
1855 bool online;
1856 int rc;
1857
1858 DPRINTK("ENTER\n");
1859
1860 ahci_stop_engine(ap);
1861
1862 /* clear D2H reception area to properly wait for D2H FIS */
1863 ata_tf_init(link->device, &tf);
1864 tf.command = 0x80;
1865 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1866
1867 rc = sata_link_hardreset(link, timing, deadline, &online,
1868 ahci_check_ready);
1869
1870 ahci_start_engine(ap);
1871
1872 if (online)
1873 *class = ahci_dev_classify(ap);
1874
1875 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1876 return rc;
1877 }
1878
1879 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
1880 unsigned long deadline)
1881 {
1882 struct ata_port *ap = link->ap;
1883 bool online;
1884 int rc;
1885
1886 DPRINTK("ENTER\n");
1887
1888 ahci_stop_engine(ap);
1889
1890 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1891 deadline, &online, NULL);
1892
1893 ahci_start_engine(ap);
1894
1895 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1896
1897 /* vt8251 doesn't clear BSY on signature FIS reception,
1898 * request follow-up softreset.
1899 */
1900 return online ? -EAGAIN : rc;
1901 }
1902
1903 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
1904 unsigned long deadline)
1905 {
1906 struct ata_port *ap = link->ap;
1907 struct ahci_port_priv *pp = ap->private_data;
1908 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1909 struct ata_taskfile tf;
1910 bool online;
1911 int rc;
1912
1913 ahci_stop_engine(ap);
1914
1915 /* clear D2H reception area to properly wait for D2H FIS */
1916 ata_tf_init(link->device, &tf);
1917 tf.command = 0x80;
1918 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1919
1920 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1921 deadline, &online, NULL);
1922
1923 ahci_start_engine(ap);
1924
1925 /* The pseudo configuration device on SIMG4726 attached to
1926 * ASUS P5W-DH Deluxe doesn't send signature FIS after
1927 * hardreset if no device is attached to the first downstream
1928 * port && the pseudo device locks up on SRST w/ PMP==0. To
1929 * work around this, wait for !BSY only briefly. If BSY isn't
1930 * cleared, perform CLO and proceed to IDENTIFY (achieved by
1931 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
1932 *
1933 * Wait for two seconds. Devices attached to downstream port
1934 * which can't process the following IDENTIFY after this will
1935 * have to be reset again. For most cases, this should
1936 * suffice while making probing snappish enough.
1937 */
1938 if (online) {
1939 rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
1940 ahci_check_ready);
1941 if (rc)
1942 ahci_kick_engine(ap);
1943 }
1944 return rc;
1945 }
1946
1947 static void ahci_postreset(struct ata_link *link, unsigned int *class)
1948 {
1949 struct ata_port *ap = link->ap;
1950 void __iomem *port_mmio = ahci_port_base(ap);
1951 u32 new_tmp, tmp;
1952
1953 ata_std_postreset(link, class);
1954
1955 /* Make sure port's ATAPI bit is set appropriately */
1956 new_tmp = tmp = readl(port_mmio + PORT_CMD);
1957 if (*class == ATA_DEV_ATAPI)
1958 new_tmp |= PORT_CMD_ATAPI;
1959 else
1960 new_tmp &= ~PORT_CMD_ATAPI;
1961 if (new_tmp != tmp) {
1962 writel(new_tmp, port_mmio + PORT_CMD);
1963 readl(port_mmio + PORT_CMD); /* flush */
1964 }
1965 }
1966
1967 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
1968 {
1969 struct scatterlist *sg;
1970 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
1971 unsigned int si;
1972
1973 VPRINTK("ENTER\n");
1974
1975 /*
1976 * Next, the S/G list.
1977 */
1978 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1979 dma_addr_t addr = sg_dma_address(sg);
1980 u32 sg_len = sg_dma_len(sg);
1981
1982 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
1983 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
1984 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
1985 }
1986
1987 return si;
1988 }
1989
1990 static void ahci_qc_prep(struct ata_queued_cmd *qc)
1991 {
1992 struct ata_port *ap = qc->ap;
1993 struct ahci_port_priv *pp = ap->private_data;
1994 int is_atapi = ata_is_atapi(qc->tf.protocol);
1995 void *cmd_tbl;
1996 u32 opts;
1997 const u32 cmd_fis_len = 5; /* five dwords */
1998 unsigned int n_elem;
1999
2000 /*
2001 * Fill in command table information. First, the header,
2002 * a SATA Register - Host to Device command FIS.
2003 */
2004 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
2005
2006 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
2007 if (is_atapi) {
2008 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
2009 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
2010 }
2011
2012 n_elem = 0;
2013 if (qc->flags & ATA_QCFLAG_DMAMAP)
2014 n_elem = ahci_fill_sg(qc, cmd_tbl);
2015
2016 /*
2017 * Fill in command slot information.
2018 */
2019 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
2020 if (qc->tf.flags & ATA_TFLAG_WRITE)
2021 opts |= AHCI_CMD_WRITE;
2022 if (is_atapi)
2023 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
2024
2025 ahci_fill_cmd_slot(pp, qc->tag, opts);
2026 }
2027
2028 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
2029 {
2030 struct ahci_host_priv *hpriv = ap->host->private_data;
2031 struct ahci_port_priv *pp = ap->private_data;
2032 struct ata_eh_info *host_ehi = &ap->link.eh_info;
2033 struct ata_link *link = NULL;
2034 struct ata_queued_cmd *active_qc;
2035 struct ata_eh_info *active_ehi;
2036 u32 serror;
2037
2038 /* determine active link */
2039 ata_for_each_link(link, ap, EDGE)
2040 if (ata_link_active(link))
2041 break;
2042 if (!link)
2043 link = &ap->link;
2044
2045 active_qc = ata_qc_from_tag(ap, link->active_tag);
2046 active_ehi = &link->eh_info;
2047
2048 /* record irq stat */
2049 ata_ehi_clear_desc(host_ehi);
2050 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
2051
2052 /* AHCI needs SError cleared; otherwise, it might lock up */
2053 ahci_scr_read(&ap->link, SCR_ERROR, &serror);
2054 ahci_scr_write(&ap->link, SCR_ERROR, serror);
2055 host_ehi->serror |= serror;
2056
2057 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
2058 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
2059 irq_stat &= ~PORT_IRQ_IF_ERR;
2060
2061 if (irq_stat & PORT_IRQ_TF_ERR) {
2062 /* If qc is active, charge it; otherwise, the active
2063 * link. There's no active qc on NCQ errors. It will
2064 * be determined by EH by reading log page 10h.
2065 */
2066 if (active_qc)
2067 active_qc->err_mask |= AC_ERR_DEV;
2068 else
2069 active_ehi->err_mask |= AC_ERR_DEV;
2070
2071 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
2072 host_ehi->serror &= ~SERR_INTERNAL;
2073 }
2074
2075 if (irq_stat & PORT_IRQ_UNK_FIS) {
2076 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
2077
2078 active_ehi->err_mask |= AC_ERR_HSM;
2079 active_ehi->action |= ATA_EH_RESET;
2080 ata_ehi_push_desc(active_ehi,
2081 "unknown FIS %08x %08x %08x %08x" ,
2082 unk[0], unk[1], unk[2], unk[3]);
2083 }
2084
2085 if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
2086 active_ehi->err_mask |= AC_ERR_HSM;
2087 active_ehi->action |= ATA_EH_RESET;
2088 ata_ehi_push_desc(active_ehi, "incorrect PMP");
2089 }
2090
2091 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
2092 host_ehi->err_mask |= AC_ERR_HOST_BUS;
2093 host_ehi->action |= ATA_EH_RESET;
2094 ata_ehi_push_desc(host_ehi, "host bus error");
2095 }
2096
2097 if (irq_stat & PORT_IRQ_IF_ERR) {
2098 host_ehi->err_mask |= AC_ERR_ATA_BUS;
2099 host_ehi->action |= ATA_EH_RESET;
2100 ata_ehi_push_desc(host_ehi, "interface fatal error");
2101 }
2102
2103 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
2104 ata_ehi_hotplugged(host_ehi);
2105 ata_ehi_push_desc(host_ehi, "%s",
2106 irq_stat & PORT_IRQ_CONNECT ?
2107 "connection status changed" : "PHY RDY changed");
2108 }
2109
2110 /* okay, let's hand over to EH */
2111
2112 if (irq_stat & PORT_IRQ_FREEZE)
2113 ata_port_freeze(ap);
2114 else
2115 ata_port_abort(ap);
2116 }
2117
2118 static void ahci_port_intr(struct ata_port *ap)
2119 {
2120 void __iomem *port_mmio = ahci_port_base(ap);
2121 struct ata_eh_info *ehi = &ap->link.eh_info;
2122 struct ahci_port_priv *pp = ap->private_data;
2123 struct ahci_host_priv *hpriv = ap->host->private_data;
2124 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
2125 u32 status, qc_active;
2126 int rc;
2127
2128 status = readl(port_mmio + PORT_IRQ_STAT);
2129 writel(status, port_mmio + PORT_IRQ_STAT);
2130
2131 /* ignore BAD_PMP while resetting */
2132 if (unlikely(resetting))
2133 status &= ~PORT_IRQ_BAD_PMP;
2134
2135 /* If we are getting PhyRdy, this is
2136 * just a power state change, we should
2137 * clear out this, plus the PhyRdy/Comm
2138 * Wake bits from Serror
2139 */
2140 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
2141 (status & PORT_IRQ_PHYRDY)) {
2142 status &= ~PORT_IRQ_PHYRDY;
2143 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
2144 }
2145
2146 if (unlikely(status & PORT_IRQ_ERROR)) {
2147 ahci_error_intr(ap, status);
2148 return;
2149 }
2150
2151 if (status & PORT_IRQ_SDB_FIS) {
2152 /* If SNotification is available, leave notification
2153 * handling to sata_async_notification(). If not,
2154 * emulate it by snooping SDB FIS RX area.
2155 *
2156 * Snooping FIS RX area is probably cheaper than
2157 * poking SNotification but some constrollers which
2158 * implement SNotification, ICH9 for example, don't
2159 * store AN SDB FIS into receive area.
2160 */
2161 if (hpriv->cap & HOST_CAP_SNTF)
2162 sata_async_notification(ap);
2163 else {
2164 /* If the 'N' bit in word 0 of the FIS is set,
2165 * we just received asynchronous notification.
2166 * Tell libata about it.
2167 */
2168 const __le32 *f = pp->rx_fis + RX_FIS_SDB;
2169 u32 f0 = le32_to_cpu(f[0]);
2170
2171 if (f0 & (1 << 15))
2172 sata_async_notification(ap);
2173 }
2174 }
2175
2176 /* pp->active_link is valid iff any command is in flight */
2177 if (ap->qc_active && pp->active_link->sactive)
2178 qc_active = readl(port_mmio + PORT_SCR_ACT);
2179 else
2180 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
2181
2182 rc = ata_qc_complete_multiple(ap, qc_active);
2183
2184 /* while resetting, invalid completions are expected */
2185 if (unlikely(rc < 0 && !resetting)) {
2186 ehi->err_mask |= AC_ERR_HSM;
2187 ehi->action |= ATA_EH_RESET;
2188 ata_port_freeze(ap);
2189 }
2190 }
2191
2192 static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
2193 {
2194 struct ata_host *host = dev_instance;
2195 struct ahci_host_priv *hpriv;
2196 unsigned int i, handled = 0;
2197 void __iomem *mmio;
2198 u32 irq_stat, irq_masked;
2199
2200 VPRINTK("ENTER\n");
2201
2202 hpriv = host->private_data;
2203 mmio = host->iomap[AHCI_PCI_BAR];
2204
2205 /* sigh. 0xffffffff is a valid return from h/w */
2206 irq_stat = readl(mmio + HOST_IRQ_STAT);
2207 if (!irq_stat)
2208 return IRQ_NONE;
2209
2210 irq_masked = irq_stat & hpriv->port_map;
2211
2212 spin_lock(&host->lock);
2213
2214 for (i = 0; i < host->n_ports; i++) {
2215 struct ata_port *ap;
2216
2217 if (!(irq_masked & (1 << i)))
2218 continue;
2219
2220 ap = host->ports[i];
2221 if (ap) {
2222 ahci_port_intr(ap);
2223 VPRINTK("port %u\n", i);
2224 } else {
2225 VPRINTK("port %u (no irq)\n", i);
2226 if (ata_ratelimit())
2227 dev_printk(KERN_WARNING, host->dev,
2228 "interrupt on disabled port %u\n", i);
2229 }
2230
2231 handled = 1;
2232 }
2233
2234 /* HOST_IRQ_STAT behaves as level triggered latch meaning that
2235 * it should be cleared after all the port events are cleared;
2236 * otherwise, it will raise a spurious interrupt after each
2237 * valid one. Please read section 10.6.2 of ahci 1.1 for more
2238 * information.
2239 *
2240 * Also, use the unmasked value to clear interrupt as spurious
2241 * pending event on a dummy port might cause screaming IRQ.
2242 */
2243 writel(irq_stat, mmio + HOST_IRQ_STAT);
2244
2245 spin_unlock(&host->lock);
2246
2247 VPRINTK("EXIT\n");
2248
2249 return IRQ_RETVAL(handled);
2250 }
2251
2252 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
2253 {
2254 struct ata_port *ap = qc->ap;
2255 void __iomem *port_mmio = ahci_port_base(ap);
2256 struct ahci_port_priv *pp = ap->private_data;
2257
2258 /* Keep track of the currently active link. It will be used
2259 * in completion path to determine whether NCQ phase is in
2260 * progress.
2261 */
2262 pp->active_link = qc->dev->link;
2263
2264 if (qc->tf.protocol == ATA_PROT_NCQ)
2265 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
2266 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
2267
2268 ahci_sw_activity(qc->dev->link);
2269
2270 return 0;
2271 }
2272
2273 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
2274 {
2275 struct ahci_port_priv *pp = qc->ap->private_data;
2276 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
2277
2278 ata_tf_from_fis(d2h_fis, &qc->result_tf);
2279 return true;
2280 }
2281
2282 static void ahci_freeze(struct ata_port *ap)
2283 {
2284 void __iomem *port_mmio = ahci_port_base(ap);
2285
2286 /* turn IRQ off */
2287 writel(0, port_mmio + PORT_IRQ_MASK);
2288 }
2289
2290 static void ahci_thaw(struct ata_port *ap)
2291 {
2292 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
2293 void __iomem *port_mmio = ahci_port_base(ap);
2294 u32 tmp;
2295 struct ahci_port_priv *pp = ap->private_data;
2296
2297 /* clear IRQ */
2298 tmp = readl(port_mmio + PORT_IRQ_STAT);
2299 writel(tmp, port_mmio + PORT_IRQ_STAT);
2300 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
2301
2302 /* turn IRQ back on */
2303 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2304 }
2305
2306 static void ahci_error_handler(struct ata_port *ap)
2307 {
2308 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
2309 /* restart engine */
2310 ahci_stop_engine(ap);
2311 ahci_start_engine(ap);
2312 }
2313
2314 sata_pmp_error_handler(ap);
2315 }
2316
2317 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
2318 {
2319 struct ata_port *ap = qc->ap;
2320
2321 /* make DMA engine forget about the failed command */
2322 if (qc->flags & ATA_QCFLAG_FAILED)
2323 ahci_kick_engine(ap);
2324 }
2325
2326 static void ahci_pmp_attach(struct ata_port *ap)
2327 {
2328 void __iomem *port_mmio = ahci_port_base(ap);
2329 struct ahci_port_priv *pp = ap->private_data;
2330 u32 cmd;
2331
2332 cmd = readl(port_mmio + PORT_CMD);
2333 cmd |= PORT_CMD_PMP;
2334 writel(cmd, port_mmio + PORT_CMD);
2335
2336 pp->intr_mask |= PORT_IRQ_BAD_PMP;
2337 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2338 }
2339
2340 static void ahci_pmp_detach(struct ata_port *ap)
2341 {
2342 void __iomem *port_mmio = ahci_port_base(ap);
2343 struct ahci_port_priv *pp = ap->private_data;
2344 u32 cmd;
2345
2346 cmd = readl(port_mmio + PORT_CMD);
2347 cmd &= ~PORT_CMD_PMP;
2348 writel(cmd, port_mmio + PORT_CMD);
2349
2350 pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
2351 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2352 }
2353
2354 static int ahci_port_resume(struct ata_port *ap)
2355 {
2356 ahci_power_up(ap);
2357 ahci_start_port(ap);
2358
2359 if (sata_pmp_attached(ap))
2360 ahci_pmp_attach(ap);
2361 else
2362 ahci_pmp_detach(ap);
2363
2364 return 0;
2365 }
2366
2367 #ifdef CONFIG_PM
2368 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2369 {
2370 const char *emsg = NULL;
2371 int rc;
2372
2373 rc = ahci_deinit_port(ap, &emsg);
2374 if (rc == 0)
2375 ahci_power_down(ap);
2376 else {
2377 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
2378 ahci_start_port(ap);
2379 }
2380
2381 return rc;
2382 }
2383
2384 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
2385 {
2386 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2387 struct ahci_host_priv *hpriv = host->private_data;
2388 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2389 u32 ctl;
2390
2391 if (mesg.event & PM_EVENT_SUSPEND &&
2392 hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
2393 dev_printk(KERN_ERR, &pdev->dev,
2394 "BIOS update required for suspend/resume\n");
2395 return -EIO;
2396 }
2397
2398 if (mesg.event & PM_EVENT_SLEEP) {
2399 /* AHCI spec rev1.1 section 8.3.3:
2400 * Software must disable interrupts prior to requesting a
2401 * transition of the HBA to D3 state.
2402 */
2403 ctl = readl(mmio + HOST_CTL);
2404 ctl &= ~HOST_IRQ_EN;
2405 writel(ctl, mmio + HOST_CTL);
2406 readl(mmio + HOST_CTL); /* flush */
2407 }
2408
2409 return ata_pci_device_suspend(pdev, mesg);
2410 }
2411
2412 static int ahci_pci_device_resume(struct pci_dev *pdev)
2413 {
2414 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2415 int rc;
2416
2417 rc = ata_pci_device_do_resume(pdev);
2418 if (rc)
2419 return rc;
2420
2421 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2422 rc = ahci_reset_controller(host);
2423 if (rc)
2424 return rc;
2425
2426 ahci_init_controller(host);
2427 }
2428
2429 ata_host_resume(host);
2430
2431 return 0;
2432 }
2433 #endif
2434
2435 static int ahci_port_start(struct ata_port *ap)
2436 {
2437 struct device *dev = ap->host->dev;
2438 struct ahci_port_priv *pp;
2439 void *mem;
2440 dma_addr_t mem_dma;
2441
2442 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2443 if (!pp)
2444 return -ENOMEM;
2445
2446 mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
2447 GFP_KERNEL);
2448 if (!mem)
2449 return -ENOMEM;
2450 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
2451
2452 /*
2453 * First item in chunk of DMA memory: 32-slot command table,
2454 * 32 bytes each in size
2455 */
2456 pp->cmd_slot = mem;
2457 pp->cmd_slot_dma = mem_dma;
2458
2459 mem += AHCI_CMD_SLOT_SZ;
2460 mem_dma += AHCI_CMD_SLOT_SZ;
2461
2462 /*
2463 * Second item: Received-FIS area
2464 */
2465 pp->rx_fis = mem;
2466 pp->rx_fis_dma = mem_dma;
2467
2468 mem += AHCI_RX_FIS_SZ;
2469 mem_dma += AHCI_RX_FIS_SZ;
2470
2471 /*
2472 * Third item: data area for storing a single command
2473 * and its scatter-gather table
2474 */
2475 pp->cmd_tbl = mem;
2476 pp->cmd_tbl_dma = mem_dma;
2477
2478 /*
2479 * Save off initial list of interrupts to be enabled.
2480 * This could be changed later
2481 */
2482 pp->intr_mask = DEF_PORT_IRQ;
2483
2484 ap->private_data = pp;
2485
2486 /* engage engines, captain */
2487 return ahci_port_resume(ap);
2488 }
2489
2490 static void ahci_port_stop(struct ata_port *ap)
2491 {
2492 const char *emsg = NULL;
2493 int rc;
2494
2495 /* de-initialize port */
2496 rc = ahci_deinit_port(ap, &emsg);
2497 if (rc)
2498 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
2499 }
2500
2501 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
2502 {
2503 int rc;
2504
2505 if (using_dac &&
2506 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
2507 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2508 if (rc) {
2509 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2510 if (rc) {
2511 dev_printk(KERN_ERR, &pdev->dev,
2512 "64-bit DMA enable failed\n");
2513 return rc;
2514 }
2515 }
2516 } else {
2517 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2518 if (rc) {
2519 dev_printk(KERN_ERR, &pdev->dev,
2520 "32-bit DMA enable failed\n");
2521 return rc;
2522 }
2523 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2524 if (rc) {
2525 dev_printk(KERN_ERR, &pdev->dev,
2526 "32-bit consistent DMA enable failed\n");
2527 return rc;
2528 }
2529 }
2530 return 0;
2531 }
2532
2533 static void ahci_print_info(struct ata_host *host)
2534 {
2535 struct ahci_host_priv *hpriv = host->private_data;
2536 struct pci_dev *pdev = to_pci_dev(host->dev);
2537 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2538 u32 vers, cap, impl, speed;
2539 const char *speed_s;
2540 u16 cc;
2541 const char *scc_s;
2542
2543 vers = readl(mmio + HOST_VERSION);
2544 cap = hpriv->cap;
2545 impl = hpriv->port_map;
2546
2547 speed = (cap >> 20) & 0xf;
2548 if (speed == 1)
2549 speed_s = "1.5";
2550 else if (speed == 2)
2551 speed_s = "3";
2552 else if (speed == 3)
2553 speed_s = "6";
2554 else
2555 speed_s = "?";
2556
2557 pci_read_config_word(pdev, 0x0a, &cc);
2558 if (cc == PCI_CLASS_STORAGE_IDE)
2559 scc_s = "IDE";
2560 else if (cc == PCI_CLASS_STORAGE_SATA)
2561 scc_s = "SATA";
2562 else if (cc == PCI_CLASS_STORAGE_RAID)
2563 scc_s = "RAID";
2564 else
2565 scc_s = "unknown";
2566
2567 dev_printk(KERN_INFO, &pdev->dev,
2568 "AHCI %02x%02x.%02x%02x "
2569 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2570 ,
2571
2572 (vers >> 24) & 0xff,
2573 (vers >> 16) & 0xff,
2574 (vers >> 8) & 0xff,
2575 vers & 0xff,
2576
2577 ((cap >> 8) & 0x1f) + 1,
2578 (cap & 0x1f) + 1,
2579 speed_s,
2580 impl,
2581 scc_s);
2582
2583 dev_printk(KERN_INFO, &pdev->dev,
2584 "flags: "
2585 "%s%s%s%s%s%s%s"
2586 "%s%s%s%s%s%s%s"
2587 "%s\n"
2588 ,
2589
2590 cap & (1 << 31) ? "64bit " : "",
2591 cap & (1 << 30) ? "ncq " : "",
2592 cap & (1 << 29) ? "sntf " : "",
2593 cap & (1 << 28) ? "ilck " : "",
2594 cap & (1 << 27) ? "stag " : "",
2595 cap & (1 << 26) ? "pm " : "",
2596 cap & (1 << 25) ? "led " : "",
2597
2598 cap & (1 << 24) ? "clo " : "",
2599 cap & (1 << 19) ? "nz " : "",
2600 cap & (1 << 18) ? "only " : "",
2601 cap & (1 << 17) ? "pmp " : "",
2602 cap & (1 << 15) ? "pio " : "",
2603 cap & (1 << 14) ? "slum " : "",
2604 cap & (1 << 13) ? "part " : "",
2605 cap & (1 << 6) ? "ems ": ""
2606 );
2607 }
2608
2609 /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
2610 * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't
2611 * support PMP and the 4726 either directly exports the device
2612 * attached to the first downstream port or acts as a hardware storage
2613 * controller and emulate a single ATA device (can be RAID 0/1 or some
2614 * other configuration).
2615 *
2616 * When there's no device attached to the first downstream port of the
2617 * 4726, "Config Disk" appears, which is a pseudo ATA device to
2618 * configure the 4726. However, ATA emulation of the device is very
2619 * lame. It doesn't send signature D2H Reg FIS after the initial
2620 * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
2621 *
2622 * The following function works around the problem by always using
2623 * hardreset on the port and not depending on receiving signature FIS
2624 * afterward. If signature FIS isn't received soon, ATA class is
2625 * assumed without follow-up softreset.
2626 */
2627 static void ahci_p5wdh_workaround(struct ata_host *host)
2628 {
2629 static struct dmi_system_id sysids[] = {
2630 {
2631 .ident = "P5W DH Deluxe",
2632 .matches = {
2633 DMI_MATCH(DMI_SYS_VENDOR,
2634 "ASUSTEK COMPUTER INC"),
2635 DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
2636 },
2637 },
2638 { }
2639 };
2640 struct pci_dev *pdev = to_pci_dev(host->dev);
2641
2642 if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
2643 dmi_check_system(sysids)) {
2644 struct ata_port *ap = host->ports[1];
2645
2646 dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
2647 "Deluxe on-board SIMG4726 workaround\n");
2648
2649 ap->ops = &ahci_p5wdh_ops;
2650 ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
2651 }
2652 }
2653
2654 /* only some SB600 ahci controllers can do 64bit DMA */
2655 static bool ahci_sb600_enable_64bit(struct pci_dev *pdev)
2656 {
2657 static const struct dmi_system_id sysids[] = {
2658 /*
2659 * The oldest version known to be broken is 0901 and
2660 * working is 1501 which was released on 2007-10-26.
2661 * Enable 64bit DMA on 1501 and anything newer.
2662 *
2663 * Please read bko#9412 for more info.
2664 */
2665 {
2666 .ident = "ASUS M2A-VM",
2667 .matches = {
2668 DMI_MATCH(DMI_BOARD_VENDOR,
2669 "ASUSTeK Computer INC."),
2670 DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"),
2671 },
2672 .driver_data = "20071026", /* yyyymmdd */
2673 },
2674 { }
2675 };
2676 const struct dmi_system_id *match;
2677 int year, month, date;
2678 char buf[9];
2679
2680 match = dmi_first_match(sysids);
2681 if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) ||
2682 !match)
2683 return false;
2684
2685 dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
2686 snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
2687
2688 if (strcmp(buf, match->driver_data) >= 0) {
2689 dev_printk(KERN_WARNING, &pdev->dev, "%s: enabling 64bit DMA\n",
2690 match->ident);
2691 return true;
2692 } else {
2693 dev_printk(KERN_WARNING, &pdev->dev, "%s: BIOS too old, "
2694 "forcing 32bit DMA, update BIOS\n", match->ident);
2695 return false;
2696 }
2697 }
2698
2699 static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
2700 {
2701 static const struct dmi_system_id broken_systems[] = {
2702 {
2703 .ident = "HP Compaq nx6310",
2704 .matches = {
2705 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2706 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6310"),
2707 },
2708 /* PCI slot number of the controller */
2709 .driver_data = (void *)0x1FUL,
2710 },
2711 {
2712 .ident = "HP Compaq 6720s",
2713 .matches = {
2714 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2715 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6720s"),
2716 },
2717 /* PCI slot number of the controller */
2718 .driver_data = (void *)0x1FUL,
2719 },
2720
2721 { } /* terminate list */
2722 };
2723 const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
2724
2725 if (dmi) {
2726 unsigned long slot = (unsigned long)dmi->driver_data;
2727 /* apply the quirk only to on-board controllers */
2728 return slot == PCI_SLOT(pdev->devfn);
2729 }
2730
2731 return false;
2732 }
2733
2734 static bool ahci_broken_suspend(struct pci_dev *pdev)
2735 {
2736 static const struct dmi_system_id sysids[] = {
2737 /*
2738 * On HP dv[4-6] and HDX18 with earlier BIOSen, link
2739 * to the harddisk doesn't become online after
2740 * resuming from STR. Warn and fail suspend.
2741 */
2742 {
2743 .ident = "dv4",
2744 .matches = {
2745 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2746 DMI_MATCH(DMI_PRODUCT_NAME,
2747 "HP Pavilion dv4 Notebook PC"),
2748 },
2749 .driver_data = "F.30", /* cutoff BIOS version */
2750 },
2751 {
2752 .ident = "dv5",
2753 .matches = {
2754 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2755 DMI_MATCH(DMI_PRODUCT_NAME,
2756 "HP Pavilion dv5 Notebook PC"),
2757 },
2758 .driver_data = "F.16", /* cutoff BIOS version */
2759 },
2760 {
2761 .ident = "dv6",
2762 .matches = {
2763 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2764 DMI_MATCH(DMI_PRODUCT_NAME,
2765 "HP Pavilion dv6 Notebook PC"),
2766 },
2767 .driver_data = "F.21", /* cutoff BIOS version */
2768 },
2769 {
2770 .ident = "HDX18",
2771 .matches = {
2772 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2773 DMI_MATCH(DMI_PRODUCT_NAME,
2774 "HP HDX18 Notebook PC"),
2775 },
2776 .driver_data = "F.23", /* cutoff BIOS version */
2777 },
2778 { } /* terminate list */
2779 };
2780 const struct dmi_system_id *dmi = dmi_first_match(sysids);
2781 const char *ver;
2782
2783 if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
2784 return false;
2785
2786 ver = dmi_get_system_info(DMI_BIOS_VERSION);
2787
2788 return !ver || strcmp(ver, dmi->driver_data) < 0;
2789 }
2790
2791 static bool ahci_broken_online(struct pci_dev *pdev)
2792 {
2793 #define ENCODE_BUSDEVFN(bus, slot, func) \
2794 (void *)(unsigned long)(((bus) << 8) | PCI_DEVFN((slot), (func)))
2795 static const struct dmi_system_id sysids[] = {
2796 /*
2797 * There are several gigabyte boards which use
2798 * SIMG5723s configured as hardware RAID. Certain
2799 * 5723 firmware revisions shipped there keep the link
2800 * online but fail to answer properly to SRST or
2801 * IDENTIFY when no device is attached downstream
2802 * causing libata to retry quite a few times leading
2803 * to excessive detection delay.
2804 *
2805 * As these firmwares respond to the second reset try
2806 * with invalid device signature, considering unknown
2807 * sig as offline works around the problem acceptably.
2808 */
2809 {
2810 .ident = "EP45-DQ6",
2811 .matches = {
2812 DMI_MATCH(DMI_BOARD_VENDOR,
2813 "Gigabyte Technology Co., Ltd."),
2814 DMI_MATCH(DMI_BOARD_NAME, "EP45-DQ6"),
2815 },
2816 .driver_data = ENCODE_BUSDEVFN(0x0a, 0x00, 0),
2817 },
2818 {
2819 .ident = "EP45-DS5",
2820 .matches = {
2821 DMI_MATCH(DMI_BOARD_VENDOR,
2822 "Gigabyte Technology Co., Ltd."),
2823 DMI_MATCH(DMI_BOARD_NAME, "EP45-DS5"),
2824 },
2825 .driver_data = ENCODE_BUSDEVFN(0x03, 0x00, 0),
2826 },
2827 { } /* terminate list */
2828 };
2829 #undef ENCODE_BUSDEVFN
2830 const struct dmi_system_id *dmi = dmi_first_match(sysids);
2831 unsigned int val;
2832
2833 if (!dmi)
2834 return false;
2835
2836 val = (unsigned long)dmi->driver_data;
2837
2838 return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
2839 }
2840
2841 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2842 {
2843 static int printed_version;
2844 unsigned int board_id = ent->driver_data;
2845 struct ata_port_info pi = ahci_port_info[board_id];
2846 const struct ata_port_info *ppi[] = { &pi, NULL };
2847 struct device *dev = &pdev->dev;
2848 struct ahci_host_priv *hpriv;
2849 struct ata_host *host;
2850 int n_ports, i, rc;
2851
2852 VPRINTK("ENTER\n");
2853
2854 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
2855
2856 if (!printed_version++)
2857 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2858
2859 /* The AHCI driver can only drive the SATA ports, the PATA driver
2860 can drive them all so if both drivers are selected make sure
2861 AHCI stays out of the way */
2862 if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
2863 return -ENODEV;
2864
2865 /* acquire resources */
2866 rc = pcim_enable_device(pdev);
2867 if (rc)
2868 return rc;
2869
2870 /* AHCI controllers often implement SFF compatible interface.
2871 * Grab all PCI BARs just in case.
2872 */
2873 rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
2874 if (rc == -EBUSY)
2875 pcim_pin_device(pdev);
2876 if (rc)
2877 return rc;
2878
2879 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
2880 (pdev->device == 0x2652 || pdev->device == 0x2653)) {
2881 u8 map;
2882
2883 /* ICH6s share the same PCI ID for both piix and ahci
2884 * modes. Enabling ahci mode while MAP indicates
2885 * combined mode is a bad idea. Yield to ata_piix.
2886 */
2887 pci_read_config_byte(pdev, ICH_MAP, &map);
2888 if (map & 0x3) {
2889 dev_printk(KERN_INFO, &pdev->dev, "controller is in "
2890 "combined mode, can't enable AHCI mode\n");
2891 return -ENODEV;
2892 }
2893 }
2894
2895 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
2896 if (!hpriv)
2897 return -ENOMEM;
2898 hpriv->flags |= (unsigned long)pi.private_data;
2899
2900 /* MCP65 revision A1 and A2 can't do MSI */
2901 if (board_id == board_ahci_mcp65 &&
2902 (pdev->revision == 0xa1 || pdev->revision == 0xa2))
2903 hpriv->flags |= AHCI_HFLAG_NO_MSI;
2904
2905 /* SB800 does NOT need the workaround to ignore SERR_INTERNAL */
2906 if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
2907 hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
2908
2909 /* only some SB600s can do 64bit DMA */
2910 if (ahci_sb600_enable_64bit(pdev))
2911 hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY;
2912
2913 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
2914 pci_intx(pdev, 1);
2915
2916 /* save initial config */
2917 ahci_save_initial_config(pdev, hpriv);
2918
2919 /* prepare host */
2920 if (hpriv->cap & HOST_CAP_NCQ)
2921 pi.flags |= ATA_FLAG_NCQ | ATA_FLAG_FPDMA_AA;
2922
2923 if (hpriv->cap & HOST_CAP_PMP)
2924 pi.flags |= ATA_FLAG_PMP;
2925
2926 if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
2927 u8 messages;
2928 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
2929 u32 em_loc = readl(mmio + HOST_EM_LOC);
2930 u32 em_ctl = readl(mmio + HOST_EM_CTL);
2931
2932 messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
2933
2934 /* we only support LED message type right now */
2935 if ((messages & 0x01) && (ahci_em_messages == 1)) {
2936 /* store em_loc */
2937 hpriv->em_loc = ((em_loc >> 16) * 4);
2938 pi.flags |= ATA_FLAG_EM;
2939 if (!(em_ctl & EM_CTL_ALHD))
2940 pi.flags |= ATA_FLAG_SW_ACTIVITY;
2941 }
2942 }
2943
2944 if (ahci_broken_system_poweroff(pdev)) {
2945 pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
2946 dev_info(&pdev->dev,
2947 "quirky BIOS, skipping spindown on poweroff\n");
2948 }
2949
2950 if (ahci_broken_suspend(pdev)) {
2951 hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
2952 dev_printk(KERN_WARNING, &pdev->dev,
2953 "BIOS update required for suspend/resume\n");
2954 }
2955
2956 if (ahci_broken_online(pdev)) {
2957 hpriv->flags |= AHCI_HFLAG_SRST_TOUT_IS_OFFLINE;
2958 dev_info(&pdev->dev,
2959 "online status unreliable, applying workaround\n");
2960 }
2961
2962 /* CAP.NP sometimes indicate the index of the last enabled
2963 * port, at other times, that of the last possible port, so
2964 * determining the maximum port number requires looking at
2965 * both CAP.NP and port_map.
2966 */
2967 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
2968
2969 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2970 if (!host)
2971 return -ENOMEM;
2972 host->iomap = pcim_iomap_table(pdev);
2973 host->private_data = hpriv;
2974
2975 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
2976 host->flags |= ATA_HOST_PARALLEL_SCAN;
2977 else
2978 printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
2979
2980 if (pi.flags & ATA_FLAG_EM)
2981 ahci_reset_em(host);
2982
2983 for (i = 0; i < host->n_ports; i++) {
2984 struct ata_port *ap = host->ports[i];
2985
2986 ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
2987 ata_port_pbar_desc(ap, AHCI_PCI_BAR,
2988 0x100 + ap->port_no * 0x80, "port");
2989
2990 /* set initial link pm policy */
2991 ap->pm_policy = NOT_AVAILABLE;
2992
2993 /* set enclosure management message type */
2994 if (ap->flags & ATA_FLAG_EM)
2995 ap->em_message_type = ahci_em_messages;
2996
2997
2998 /* disabled/not-implemented port */
2999 if (!(hpriv->port_map & (1 << i)))
3000 ap->ops = &ata_dummy_port_ops;
3001 }
3002
3003 /* apply workaround for ASUS P5W DH Deluxe mainboard */
3004 ahci_p5wdh_workaround(host);
3005
3006 /* initialize adapter */
3007 rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
3008 if (rc)
3009 return rc;
3010
3011 rc = ahci_reset_controller(host);
3012 if (rc)
3013 return rc;
3014
3015 ahci_init_controller(host);
3016 ahci_print_info(host);
3017
3018 pci_set_master(pdev);
3019 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
3020 &ahci_sht);
3021 }
3022
3023 static int __init ahci_init(void)
3024 {
3025 return pci_register_driver(&ahci_pci_driver);
3026 }
3027
3028 static void __exit ahci_exit(void)
3029 {
3030 pci_unregister_driver(&ahci_pci_driver);
3031 }
3032
3033
3034 MODULE_AUTHOR("Jeff Garzik");
3035 MODULE_DESCRIPTION("AHCI SATA low-level driver");
3036 MODULE_LICENSE("GPL");
3037 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
3038 MODULE_VERSION(DRV_VERSION);
3039
3040 module_init(ahci_init);
3041 module_exit(ahci_exit);
This page took 0.092218 seconds and 4 git commands to generate.