Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6
[deliverable/linux.git] / drivers / ata / sata_mv.c
1 /*
2 * sata_mv.c - Marvell SATA support
3 *
4 * Copyright 2008-2009: Marvell Corporation, all rights reserved.
5 * Copyright 2005: EMC Corporation, all rights reserved.
6 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 *
8 * Originally written by Brett Russ.
9 * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
10 *
11 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; version 2 of the License.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 */
27
28 /*
29 * sata_mv TODO list:
30 *
31 * --> Develop a low-power-consumption strategy, and implement it.
32 *
33 * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
34 *
35 * --> [Experiment, Marvell value added] Is it possible to use target
36 * mode to cross-connect two Linux boxes with Marvell cards? If so,
37 * creating LibATA target mode support would be very interesting.
38 *
39 * Target mode, for those without docs, is the ability to directly
40 * connect two SATA ports.
41 */
42
43 /*
44 * 80x1-B2 errata PCI#11:
45 *
46 * Users of the 6041/6081 Rev.B2 chips (current is C0)
47 * should be careful to insert those cards only onto PCI-X bus #0,
48 * and only in device slots 0..7, not higher. The chips may not
49 * work correctly otherwise (note: this is a pretty rare condition).
50 */
51
52 #include <linux/kernel.h>
53 #include <linux/module.h>
54 #include <linux/pci.h>
55 #include <linux/init.h>
56 #include <linux/blkdev.h>
57 #include <linux/delay.h>
58 #include <linux/interrupt.h>
59 #include <linux/dmapool.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/device.h>
62 #include <linux/platform_device.h>
63 #include <linux/ata_platform.h>
64 #include <linux/mbus.h>
65 #include <linux/bitops.h>
66 #include <scsi/scsi_host.h>
67 #include <scsi/scsi_cmnd.h>
68 #include <scsi/scsi_device.h>
69 #include <linux/libata.h>
70
71 #define DRV_NAME "sata_mv"
72 #define DRV_VERSION "1.28"
73
74 /*
75 * module options
76 */
77
78 static int msi;
79 #ifdef CONFIG_PCI
80 module_param(msi, int, S_IRUGO);
81 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
82 #endif
83
84 static int irq_coalescing_io_count;
85 module_param(irq_coalescing_io_count, int, S_IRUGO);
86 MODULE_PARM_DESC(irq_coalescing_io_count,
87 "IRQ coalescing I/O count threshold (0..255)");
88
89 static int irq_coalescing_usecs;
90 module_param(irq_coalescing_usecs, int, S_IRUGO);
91 MODULE_PARM_DESC(irq_coalescing_usecs,
92 "IRQ coalescing time threshold in usecs");
93
94 enum {
95 /* BAR's are enumerated in terms of pci_resource_start() terms */
96 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
97 MV_IO_BAR = 2, /* offset 0x18: IO space */
98 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
99
100 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
101 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
102
103 /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
104 COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */
105 MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */
106 MAX_COAL_IO_COUNT = 255, /* completed I/O count */
107
108 MV_PCI_REG_BASE = 0,
109
110 /*
111 * Per-chip ("all ports") interrupt coalescing feature.
112 * This is only for GEN_II / GEN_IIE hardware.
113 *
114 * Coalescing defers the interrupt until either the IO_THRESHOLD
115 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
116 */
117 COAL_REG_BASE = 0x18000,
118 IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08),
119 ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */
120
121 IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc),
122 IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
123
124 /*
125 * Registers for the (unused here) transaction coalescing feature:
126 */
127 TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88),
128 TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c),
129
130 SATAHC0_REG_BASE = 0x20000,
131 FLASH_CTL = 0x1046c,
132 GPIO_PORT_CTL = 0x104f0,
133 RESET_CFG = 0x180d8,
134
135 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
136 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
137 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
138 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
139
140 MV_MAX_Q_DEPTH = 32,
141 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
142
143 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
144 * CRPB needs alignment on a 256B boundary. Size == 256B
145 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
146 */
147 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
148 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
149 MV_MAX_SG_CT = 256,
150 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
151
152 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
153 MV_PORT_HC_SHIFT = 2,
154 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
155 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
156 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
157
158 /* Host Flags */
159 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
160
161 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
162 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
163
164 MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
165
166 MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ |
167 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
168
169 MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN,
170
171 CRQB_FLAG_READ = (1 << 0),
172 CRQB_TAG_SHIFT = 1,
173 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
174 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
175 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
176 CRQB_CMD_ADDR_SHIFT = 8,
177 CRQB_CMD_CS = (0x2 << 11),
178 CRQB_CMD_LAST = (1 << 15),
179
180 CRPB_FLAG_STATUS_SHIFT = 8,
181 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
182 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
183
184 EPRD_FLAG_END_OF_TBL = (1 << 31),
185
186 /* PCI interface registers */
187
188 MV_PCI_COMMAND = 0xc00,
189 MV_PCI_COMMAND_MWRCOM = (1 << 4), /* PCI Master Write Combining */
190 MV_PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
191
192 PCI_MAIN_CMD_STS = 0xd30,
193 STOP_PCI_MASTER = (1 << 2),
194 PCI_MASTER_EMPTY = (1 << 3),
195 GLOB_SFT_RST = (1 << 4),
196
197 MV_PCI_MODE = 0xd00,
198 MV_PCI_MODE_MASK = 0x30,
199
200 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
201 MV_PCI_DISC_TIMER = 0xd04,
202 MV_PCI_MSI_TRIGGER = 0xc38,
203 MV_PCI_SERR_MASK = 0xc28,
204 MV_PCI_XBAR_TMOUT = 0x1d04,
205 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
206 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
207 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
208 MV_PCI_ERR_COMMAND = 0x1d50,
209
210 PCI_IRQ_CAUSE = 0x1d58,
211 PCI_IRQ_MASK = 0x1d5c,
212 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
213
214 PCIE_IRQ_CAUSE = 0x1900,
215 PCIE_IRQ_MASK = 0x1910,
216 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
217
218 /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
219 PCI_HC_MAIN_IRQ_CAUSE = 0x1d60,
220 PCI_HC_MAIN_IRQ_MASK = 0x1d64,
221 SOC_HC_MAIN_IRQ_CAUSE = 0x20020,
222 SOC_HC_MAIN_IRQ_MASK = 0x20024,
223 ERR_IRQ = (1 << 0), /* shift by (2 * port #) */
224 DONE_IRQ = (1 << 1), /* shift by (2 * port #) */
225 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
226 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
227 DONE_IRQ_0_3 = 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */
228 DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT), /* 4,5,6,7 */
229 PCI_ERR = (1 << 18),
230 TRAN_COAL_LO_DONE = (1 << 19), /* transaction coalescing */
231 TRAN_COAL_HI_DONE = (1 << 20), /* transaction coalescing */
232 PORTS_0_3_COAL_DONE = (1 << 8), /* HC0 IRQ coalescing */
233 PORTS_4_7_COAL_DONE = (1 << 17), /* HC1 IRQ coalescing */
234 ALL_PORTS_COAL_DONE = (1 << 21), /* GEN_II(E) IRQ coalescing */
235 GPIO_INT = (1 << 22),
236 SELF_INT = (1 << 23),
237 TWSI_INT = (1 << 24),
238 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
239 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
240 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
241
242 /* SATAHC registers */
243 HC_CFG = 0x00,
244
245 HC_IRQ_CAUSE = 0x14,
246 DMA_IRQ = (1 << 0), /* shift by port # */
247 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
248 DEV_IRQ = (1 << 8), /* shift by port # */
249
250 /*
251 * Per-HC (Host-Controller) interrupt coalescing feature.
252 * This is present on all chip generations.
253 *
254 * Coalescing defers the interrupt until either the IO_THRESHOLD
255 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
256 */
257 HC_IRQ_COAL_IO_THRESHOLD = 0x000c,
258 HC_IRQ_COAL_TIME_THRESHOLD = 0x0010,
259
260 SOC_LED_CTRL = 0x2c,
261 SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */
262 SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */
263 /* with dev activity LED */
264
265 /* Shadow block registers */
266 SHD_BLK = 0x100,
267 SHD_CTL_AST = 0x20, /* ofs from SHD_BLK */
268
269 /* SATA registers */
270 SATA_STATUS = 0x300, /* ctrl, err regs follow status */
271 SATA_ACTIVE = 0x350,
272 FIS_IRQ_CAUSE = 0x364,
273 FIS_IRQ_CAUSE_AN = (1 << 9), /* async notification */
274
275 LTMODE = 0x30c, /* requires read-after-write */
276 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
277
278 PHY_MODE2 = 0x330,
279 PHY_MODE3 = 0x310,
280
281 PHY_MODE4 = 0x314, /* requires read-after-write */
282 PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */
283 PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */
284 PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */
285 PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */
286
287 SATA_IFCTL = 0x344,
288 SATA_TESTCTL = 0x348,
289 SATA_IFSTAT = 0x34c,
290 VENDOR_UNIQUE_FIS = 0x35c,
291
292 FISCFG = 0x360,
293 FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
294 FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
295
296 MV5_PHY_MODE = 0x74,
297 MV5_LTMODE = 0x30,
298 MV5_PHY_CTL = 0x0C,
299 SATA_IFCFG = 0x050,
300
301 MV_M2_PREAMP_MASK = 0x7e0,
302
303 /* Port registers */
304 EDMA_CFG = 0,
305 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
306 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
307 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
308 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
309 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
310 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
311 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
312
313 EDMA_ERR_IRQ_CAUSE = 0x8,
314 EDMA_ERR_IRQ_MASK = 0xc,
315 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
316 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
317 EDMA_ERR_DEV = (1 << 2), /* device error */
318 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
319 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
320 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
321 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
322 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
323 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
324 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
325 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
326 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
327 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
328 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
329
330 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
331 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
332 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
333 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
334 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
335
336 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
337
338 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
339 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
340 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
341 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
342 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
343 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
344
345 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
346
347 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
348 EDMA_ERR_OVERRUN_5 = (1 << 5),
349 EDMA_ERR_UNDERRUN_5 = (1 << 6),
350
351 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
352 EDMA_ERR_LNK_CTRL_RX_1 |
353 EDMA_ERR_LNK_CTRL_RX_3 |
354 EDMA_ERR_LNK_CTRL_TX,
355
356 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
357 EDMA_ERR_PRD_PAR |
358 EDMA_ERR_DEV_DCON |
359 EDMA_ERR_DEV_CON |
360 EDMA_ERR_SERR |
361 EDMA_ERR_SELF_DIS |
362 EDMA_ERR_CRQB_PAR |
363 EDMA_ERR_CRPB_PAR |
364 EDMA_ERR_INTRL_PAR |
365 EDMA_ERR_IORDY |
366 EDMA_ERR_LNK_CTRL_RX_2 |
367 EDMA_ERR_LNK_DATA_RX |
368 EDMA_ERR_LNK_DATA_TX |
369 EDMA_ERR_TRANS_PROTO,
370
371 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
372 EDMA_ERR_PRD_PAR |
373 EDMA_ERR_DEV_DCON |
374 EDMA_ERR_DEV_CON |
375 EDMA_ERR_OVERRUN_5 |
376 EDMA_ERR_UNDERRUN_5 |
377 EDMA_ERR_SELF_DIS_5 |
378 EDMA_ERR_CRQB_PAR |
379 EDMA_ERR_CRPB_PAR |
380 EDMA_ERR_INTRL_PAR |
381 EDMA_ERR_IORDY,
382
383 EDMA_REQ_Q_BASE_HI = 0x10,
384 EDMA_REQ_Q_IN_PTR = 0x14, /* also contains BASE_LO */
385
386 EDMA_REQ_Q_OUT_PTR = 0x18,
387 EDMA_REQ_Q_PTR_SHIFT = 5,
388
389 EDMA_RSP_Q_BASE_HI = 0x1c,
390 EDMA_RSP_Q_IN_PTR = 0x20,
391 EDMA_RSP_Q_OUT_PTR = 0x24, /* also contains BASE_LO */
392 EDMA_RSP_Q_PTR_SHIFT = 3,
393
394 EDMA_CMD = 0x28, /* EDMA command register */
395 EDMA_EN = (1 << 0), /* enable EDMA */
396 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
397 EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
398
399 EDMA_STATUS = 0x30, /* EDMA engine status */
400 EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
401 EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
402
403 EDMA_IORDY_TMOUT = 0x34,
404 EDMA_ARB_CFG = 0x38,
405
406 EDMA_HALTCOND = 0x60, /* GenIIe halt conditions */
407 EDMA_UNKNOWN_RSVD = 0x6C, /* GenIIe unknown/reserved */
408
409 BMDMA_CMD = 0x224, /* bmdma command register */
410 BMDMA_STATUS = 0x228, /* bmdma status register */
411 BMDMA_PRD_LOW = 0x22c, /* bmdma PRD addr 31:0 */
412 BMDMA_PRD_HIGH = 0x230, /* bmdma PRD addr 63:32 */
413
414 /* Host private flags (hp_flags) */
415 MV_HP_FLAG_MSI = (1 << 0),
416 MV_HP_ERRATA_50XXB0 = (1 << 1),
417 MV_HP_ERRATA_50XXB2 = (1 << 2),
418 MV_HP_ERRATA_60X1B2 = (1 << 3),
419 MV_HP_ERRATA_60X1C0 = (1 << 4),
420 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
421 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
422 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
423 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
424 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
425 MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
426 MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */
427
428 /* Port private flags (pp_flags) */
429 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
430 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
431 MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */
432 MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */
433 MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */
434 };
435
436 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
437 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
438 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
439 #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
440 #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
441
442 #define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
443 #define WINDOW_BASE(i) (0x20034 + ((i) << 4))
444
445 enum {
446 /* DMA boundary 0xffff is required by the s/g splitting
447 * we need on /length/ in mv_fill-sg().
448 */
449 MV_DMA_BOUNDARY = 0xffffU,
450
451 /* mask of register bits containing lower 32 bits
452 * of EDMA request queue DMA address
453 */
454 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
455
456 /* ditto, for response queue */
457 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
458 };
459
460 enum chip_type {
461 chip_504x,
462 chip_508x,
463 chip_5080,
464 chip_604x,
465 chip_608x,
466 chip_6042,
467 chip_7042,
468 chip_soc,
469 };
470
471 /* Command ReQuest Block: 32B */
472 struct mv_crqb {
473 __le32 sg_addr;
474 __le32 sg_addr_hi;
475 __le16 ctrl_flags;
476 __le16 ata_cmd[11];
477 };
478
479 struct mv_crqb_iie {
480 __le32 addr;
481 __le32 addr_hi;
482 __le32 flags;
483 __le32 len;
484 __le32 ata_cmd[4];
485 };
486
487 /* Command ResPonse Block: 8B */
488 struct mv_crpb {
489 __le16 id;
490 __le16 flags;
491 __le32 tmstmp;
492 };
493
494 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
495 struct mv_sg {
496 __le32 addr;
497 __le32 flags_size;
498 __le32 addr_hi;
499 __le32 reserved;
500 };
501
502 /*
503 * We keep a local cache of a few frequently accessed port
504 * registers here, to avoid having to read them (very slow)
505 * when switching between EDMA and non-EDMA modes.
506 */
507 struct mv_cached_regs {
508 u32 fiscfg;
509 u32 ltmode;
510 u32 haltcond;
511 u32 unknown_rsvd;
512 };
513
514 struct mv_port_priv {
515 struct mv_crqb *crqb;
516 dma_addr_t crqb_dma;
517 struct mv_crpb *crpb;
518 dma_addr_t crpb_dma;
519 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
520 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
521
522 unsigned int req_idx;
523 unsigned int resp_idx;
524
525 u32 pp_flags;
526 struct mv_cached_regs cached;
527 unsigned int delayed_eh_pmp_map;
528 };
529
530 struct mv_port_signal {
531 u32 amps;
532 u32 pre;
533 };
534
535 struct mv_host_priv {
536 u32 hp_flags;
537 u32 main_irq_mask;
538 struct mv_port_signal signal[8];
539 const struct mv_hw_ops *ops;
540 int n_ports;
541 void __iomem *base;
542 void __iomem *main_irq_cause_addr;
543 void __iomem *main_irq_mask_addr;
544 u32 irq_cause_offset;
545 u32 irq_mask_offset;
546 u32 unmask_all_irqs;
547 /*
548 * These consistent DMA memory pools give us guaranteed
549 * alignment for hardware-accessed data structures,
550 * and less memory waste in accomplishing the alignment.
551 */
552 struct dma_pool *crqb_pool;
553 struct dma_pool *crpb_pool;
554 struct dma_pool *sg_tbl_pool;
555 };
556
557 struct mv_hw_ops {
558 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
559 unsigned int port);
560 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
561 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
562 void __iomem *mmio);
563 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
564 unsigned int n_hc);
565 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
566 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
567 };
568
569 static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
570 static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
571 static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
572 static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
573 static int mv_port_start(struct ata_port *ap);
574 static void mv_port_stop(struct ata_port *ap);
575 static int mv_qc_defer(struct ata_queued_cmd *qc);
576 static void mv_qc_prep(struct ata_queued_cmd *qc);
577 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
578 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
579 static int mv_hardreset(struct ata_link *link, unsigned int *class,
580 unsigned long deadline);
581 static void mv_eh_freeze(struct ata_port *ap);
582 static void mv_eh_thaw(struct ata_port *ap);
583 static void mv6_dev_config(struct ata_device *dev);
584
585 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
586 unsigned int port);
587 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
588 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
589 void __iomem *mmio);
590 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
591 unsigned int n_hc);
592 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
593 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
594
595 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
596 unsigned int port);
597 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
598 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
599 void __iomem *mmio);
600 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
601 unsigned int n_hc);
602 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
603 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
604 void __iomem *mmio);
605 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
606 void __iomem *mmio);
607 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
608 void __iomem *mmio, unsigned int n_hc);
609 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
610 void __iomem *mmio);
611 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
612 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
613 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
614 unsigned int port_no);
615 static int mv_stop_edma(struct ata_port *ap);
616 static int mv_stop_edma_engine(void __iomem *port_mmio);
617 static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
618
619 static void mv_pmp_select(struct ata_port *ap, int pmp);
620 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
621 unsigned long deadline);
622 static int mv_softreset(struct ata_link *link, unsigned int *class,
623 unsigned long deadline);
624 static void mv_pmp_error_handler(struct ata_port *ap);
625 static void mv_process_crpb_entries(struct ata_port *ap,
626 struct mv_port_priv *pp);
627
628 static void mv_sff_irq_clear(struct ata_port *ap);
629 static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
630 static void mv_bmdma_setup(struct ata_queued_cmd *qc);
631 static void mv_bmdma_start(struct ata_queued_cmd *qc);
632 static void mv_bmdma_stop(struct ata_queued_cmd *qc);
633 static u8 mv_bmdma_status(struct ata_port *ap);
634 static u8 mv_sff_check_status(struct ata_port *ap);
635
636 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
637 * because we have to allow room for worst case splitting of
638 * PRDs for 64K boundaries in mv_fill_sg().
639 */
640 static struct scsi_host_template mv5_sht = {
641 ATA_BASE_SHT(DRV_NAME),
642 .sg_tablesize = MV_MAX_SG_CT / 2,
643 .dma_boundary = MV_DMA_BOUNDARY,
644 };
645
646 static struct scsi_host_template mv6_sht = {
647 ATA_NCQ_SHT(DRV_NAME),
648 .can_queue = MV_MAX_Q_DEPTH - 1,
649 .sg_tablesize = MV_MAX_SG_CT / 2,
650 .dma_boundary = MV_DMA_BOUNDARY,
651 };
652
653 static struct ata_port_operations mv5_ops = {
654 .inherits = &ata_sff_port_ops,
655
656 .lost_interrupt = ATA_OP_NULL,
657
658 .qc_defer = mv_qc_defer,
659 .qc_prep = mv_qc_prep,
660 .qc_issue = mv_qc_issue,
661
662 .freeze = mv_eh_freeze,
663 .thaw = mv_eh_thaw,
664 .hardreset = mv_hardreset,
665 .error_handler = ata_std_error_handler, /* avoid SFF EH */
666 .post_internal_cmd = ATA_OP_NULL,
667
668 .scr_read = mv5_scr_read,
669 .scr_write = mv5_scr_write,
670
671 .port_start = mv_port_start,
672 .port_stop = mv_port_stop,
673 };
674
675 static struct ata_port_operations mv6_ops = {
676 .inherits = &mv5_ops,
677 .dev_config = mv6_dev_config,
678 .scr_read = mv_scr_read,
679 .scr_write = mv_scr_write,
680
681 .pmp_hardreset = mv_pmp_hardreset,
682 .pmp_softreset = mv_softreset,
683 .softreset = mv_softreset,
684 .error_handler = mv_pmp_error_handler,
685
686 .sff_check_status = mv_sff_check_status,
687 .sff_irq_clear = mv_sff_irq_clear,
688 .check_atapi_dma = mv_check_atapi_dma,
689 .bmdma_setup = mv_bmdma_setup,
690 .bmdma_start = mv_bmdma_start,
691 .bmdma_stop = mv_bmdma_stop,
692 .bmdma_status = mv_bmdma_status,
693 };
694
695 static struct ata_port_operations mv_iie_ops = {
696 .inherits = &mv6_ops,
697 .dev_config = ATA_OP_NULL,
698 .qc_prep = mv_qc_prep_iie,
699 };
700
701 static const struct ata_port_info mv_port_info[] = {
702 { /* chip_504x */
703 .flags = MV_GEN_I_FLAGS,
704 .pio_mask = ATA_PIO4,
705 .udma_mask = ATA_UDMA6,
706 .port_ops = &mv5_ops,
707 },
708 { /* chip_508x */
709 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
710 .pio_mask = ATA_PIO4,
711 .udma_mask = ATA_UDMA6,
712 .port_ops = &mv5_ops,
713 },
714 { /* chip_5080 */
715 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
716 .pio_mask = ATA_PIO4,
717 .udma_mask = ATA_UDMA6,
718 .port_ops = &mv5_ops,
719 },
720 { /* chip_604x */
721 .flags = MV_GEN_II_FLAGS,
722 .pio_mask = ATA_PIO4,
723 .udma_mask = ATA_UDMA6,
724 .port_ops = &mv6_ops,
725 },
726 { /* chip_608x */
727 .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
728 .pio_mask = ATA_PIO4,
729 .udma_mask = ATA_UDMA6,
730 .port_ops = &mv6_ops,
731 },
732 { /* chip_6042 */
733 .flags = MV_GEN_IIE_FLAGS,
734 .pio_mask = ATA_PIO4,
735 .udma_mask = ATA_UDMA6,
736 .port_ops = &mv_iie_ops,
737 },
738 { /* chip_7042 */
739 .flags = MV_GEN_IIE_FLAGS,
740 .pio_mask = ATA_PIO4,
741 .udma_mask = ATA_UDMA6,
742 .port_ops = &mv_iie_ops,
743 },
744 { /* chip_soc */
745 .flags = MV_GEN_IIE_FLAGS,
746 .pio_mask = ATA_PIO4,
747 .udma_mask = ATA_UDMA6,
748 .port_ops = &mv_iie_ops,
749 },
750 };
751
752 static const struct pci_device_id mv_pci_tbl[] = {
753 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
754 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
755 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
756 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
757 /* RocketRAID 1720/174x have different identifiers */
758 { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
759 { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
760 { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
761
762 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
763 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
764 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
765 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
766 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
767
768 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
769
770 /* Adaptec 1430SA */
771 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
772
773 /* Marvell 7042 support */
774 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
775
776 /* Highpoint RocketRAID PCIe series */
777 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
778 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
779
780 { } /* terminate list */
781 };
782
783 static const struct mv_hw_ops mv5xxx_ops = {
784 .phy_errata = mv5_phy_errata,
785 .enable_leds = mv5_enable_leds,
786 .read_preamp = mv5_read_preamp,
787 .reset_hc = mv5_reset_hc,
788 .reset_flash = mv5_reset_flash,
789 .reset_bus = mv5_reset_bus,
790 };
791
792 static const struct mv_hw_ops mv6xxx_ops = {
793 .phy_errata = mv6_phy_errata,
794 .enable_leds = mv6_enable_leds,
795 .read_preamp = mv6_read_preamp,
796 .reset_hc = mv6_reset_hc,
797 .reset_flash = mv6_reset_flash,
798 .reset_bus = mv_reset_pci_bus,
799 };
800
801 static const struct mv_hw_ops mv_soc_ops = {
802 .phy_errata = mv6_phy_errata,
803 .enable_leds = mv_soc_enable_leds,
804 .read_preamp = mv_soc_read_preamp,
805 .reset_hc = mv_soc_reset_hc,
806 .reset_flash = mv_soc_reset_flash,
807 .reset_bus = mv_soc_reset_bus,
808 };
809
810 /*
811 * Functions
812 */
813
814 static inline void writelfl(unsigned long data, void __iomem *addr)
815 {
816 writel(data, addr);
817 (void) readl(addr); /* flush to avoid PCI posted write */
818 }
819
820 static inline unsigned int mv_hc_from_port(unsigned int port)
821 {
822 return port >> MV_PORT_HC_SHIFT;
823 }
824
825 static inline unsigned int mv_hardport_from_port(unsigned int port)
826 {
827 return port & MV_PORT_MASK;
828 }
829
830 /*
831 * Consolidate some rather tricky bit shift calculations.
832 * This is hot-path stuff, so not a function.
833 * Simple code, with two return values, so macro rather than inline.
834 *
835 * port is the sole input, in range 0..7.
836 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
837 * hardport is the other output, in range 0..3.
838 *
839 * Note that port and hardport may be the same variable in some cases.
840 */
841 #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
842 { \
843 shift = mv_hc_from_port(port) * HC_SHIFT; \
844 hardport = mv_hardport_from_port(port); \
845 shift += hardport * 2; \
846 }
847
848 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
849 {
850 return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
851 }
852
853 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
854 unsigned int port)
855 {
856 return mv_hc_base(base, mv_hc_from_port(port));
857 }
858
859 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
860 {
861 return mv_hc_base_from_port(base, port) +
862 MV_SATAHC_ARBTR_REG_SZ +
863 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
864 }
865
866 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
867 {
868 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
869 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
870
871 return hc_mmio + ofs;
872 }
873
874 static inline void __iomem *mv_host_base(struct ata_host *host)
875 {
876 struct mv_host_priv *hpriv = host->private_data;
877 return hpriv->base;
878 }
879
880 static inline void __iomem *mv_ap_base(struct ata_port *ap)
881 {
882 return mv_port_base(mv_host_base(ap->host), ap->port_no);
883 }
884
885 static inline int mv_get_hc_count(unsigned long port_flags)
886 {
887 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
888 }
889
890 /**
891 * mv_save_cached_regs - (re-)initialize cached port registers
892 * @ap: the port whose registers we are caching
893 *
894 * Initialize the local cache of port registers,
895 * so that reading them over and over again can
896 * be avoided on the hotter paths of this driver.
897 * This saves a few microseconds each time we switch
898 * to/from EDMA mode to perform (eg.) a drive cache flush.
899 */
900 static void mv_save_cached_regs(struct ata_port *ap)
901 {
902 void __iomem *port_mmio = mv_ap_base(ap);
903 struct mv_port_priv *pp = ap->private_data;
904
905 pp->cached.fiscfg = readl(port_mmio + FISCFG);
906 pp->cached.ltmode = readl(port_mmio + LTMODE);
907 pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
908 pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
909 }
910
911 /**
912 * mv_write_cached_reg - write to a cached port register
913 * @addr: hardware address of the register
914 * @old: pointer to cached value of the register
915 * @new: new value for the register
916 *
917 * Write a new value to a cached register,
918 * but only if the value is different from before.
919 */
920 static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
921 {
922 if (new != *old) {
923 unsigned long laddr;
924 *old = new;
925 /*
926 * Workaround for 88SX60x1-B2 FEr SATA#13:
927 * Read-after-write is needed to prevent generating 64-bit
928 * write cycles on the PCI bus for SATA interface registers
929 * at offsets ending in 0x4 or 0xc.
930 *
931 * Looks like a lot of fuss, but it avoids an unnecessary
932 * +1 usec read-after-write delay for unaffected registers.
933 */
934 laddr = (long)addr & 0xffff;
935 if (laddr >= 0x300 && laddr <= 0x33c) {
936 laddr &= 0x000f;
937 if (laddr == 0x4 || laddr == 0xc) {
938 writelfl(new, addr); /* read after write */
939 return;
940 }
941 }
942 writel(new, addr); /* unaffected by the errata */
943 }
944 }
945
946 static void mv_set_edma_ptrs(void __iomem *port_mmio,
947 struct mv_host_priv *hpriv,
948 struct mv_port_priv *pp)
949 {
950 u32 index;
951
952 /*
953 * initialize request queue
954 */
955 pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
956 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
957
958 WARN_ON(pp->crqb_dma & 0x3ff);
959 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
960 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
961 port_mmio + EDMA_REQ_Q_IN_PTR);
962 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
963
964 /*
965 * initialize response queue
966 */
967 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
968 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
969
970 WARN_ON(pp->crpb_dma & 0xff);
971 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
972 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
973 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
974 port_mmio + EDMA_RSP_Q_OUT_PTR);
975 }
976
977 static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
978 {
979 /*
980 * When writing to the main_irq_mask in hardware,
981 * we must ensure exclusivity between the interrupt coalescing bits
982 * and the corresponding individual port DONE_IRQ bits.
983 *
984 * Note that this register is really an "IRQ enable" register,
985 * not an "IRQ mask" register as Marvell's naming might suggest.
986 */
987 if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
988 mask &= ~DONE_IRQ_0_3;
989 if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
990 mask &= ~DONE_IRQ_4_7;
991 writelfl(mask, hpriv->main_irq_mask_addr);
992 }
993
994 static void mv_set_main_irq_mask(struct ata_host *host,
995 u32 disable_bits, u32 enable_bits)
996 {
997 struct mv_host_priv *hpriv = host->private_data;
998 u32 old_mask, new_mask;
999
1000 old_mask = hpriv->main_irq_mask;
1001 new_mask = (old_mask & ~disable_bits) | enable_bits;
1002 if (new_mask != old_mask) {
1003 hpriv->main_irq_mask = new_mask;
1004 mv_write_main_irq_mask(new_mask, hpriv);
1005 }
1006 }
1007
1008 static void mv_enable_port_irqs(struct ata_port *ap,
1009 unsigned int port_bits)
1010 {
1011 unsigned int shift, hardport, port = ap->port_no;
1012 u32 disable_bits, enable_bits;
1013
1014 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1015
1016 disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
1017 enable_bits = port_bits << shift;
1018 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
1019 }
1020
1021 static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
1022 void __iomem *port_mmio,
1023 unsigned int port_irqs)
1024 {
1025 struct mv_host_priv *hpriv = ap->host->private_data;
1026 int hardport = mv_hardport_from_port(ap->port_no);
1027 void __iomem *hc_mmio = mv_hc_base_from_port(
1028 mv_host_base(ap->host), ap->port_no);
1029 u32 hc_irq_cause;
1030
1031 /* clear EDMA event indicators, if any */
1032 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
1033
1034 /* clear pending irq events */
1035 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
1036 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
1037
1038 /* clear FIS IRQ Cause */
1039 if (IS_GEN_IIE(hpriv))
1040 writelfl(0, port_mmio + FIS_IRQ_CAUSE);
1041
1042 mv_enable_port_irqs(ap, port_irqs);
1043 }
1044
1045 static void mv_set_irq_coalescing(struct ata_host *host,
1046 unsigned int count, unsigned int usecs)
1047 {
1048 struct mv_host_priv *hpriv = host->private_data;
1049 void __iomem *mmio = hpriv->base, *hc_mmio;
1050 u32 coal_enable = 0;
1051 unsigned long flags;
1052 unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
1053 const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
1054 ALL_PORTS_COAL_DONE;
1055
1056 /* Disable IRQ coalescing if either threshold is zero */
1057 if (!usecs || !count) {
1058 clks = count = 0;
1059 } else {
1060 /* Respect maximum limits of the hardware */
1061 clks = usecs * COAL_CLOCKS_PER_USEC;
1062 if (clks > MAX_COAL_TIME_THRESHOLD)
1063 clks = MAX_COAL_TIME_THRESHOLD;
1064 if (count > MAX_COAL_IO_COUNT)
1065 count = MAX_COAL_IO_COUNT;
1066 }
1067
1068 spin_lock_irqsave(&host->lock, flags);
1069 mv_set_main_irq_mask(host, coal_disable, 0);
1070
1071 if (is_dual_hc && !IS_GEN_I(hpriv)) {
1072 /*
1073 * GEN_II/GEN_IIE with dual host controllers:
1074 * one set of global thresholds for the entire chip.
1075 */
1076 writel(clks, mmio + IRQ_COAL_TIME_THRESHOLD);
1077 writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
1078 /* clear leftover coal IRQ bit */
1079 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
1080 if (count)
1081 coal_enable = ALL_PORTS_COAL_DONE;
1082 clks = count = 0; /* force clearing of regular regs below */
1083 }
1084
1085 /*
1086 * All chips: independent thresholds for each HC on the chip.
1087 */
1088 hc_mmio = mv_hc_base_from_port(mmio, 0);
1089 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1090 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1091 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1092 if (count)
1093 coal_enable |= PORTS_0_3_COAL_DONE;
1094 if (is_dual_hc) {
1095 hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
1096 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1097 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1098 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1099 if (count)
1100 coal_enable |= PORTS_4_7_COAL_DONE;
1101 }
1102
1103 mv_set_main_irq_mask(host, 0, coal_enable);
1104 spin_unlock_irqrestore(&host->lock, flags);
1105 }
1106
1107 /**
1108 * mv_start_edma - Enable eDMA engine
1109 * @base: port base address
1110 * @pp: port private data
1111 *
1112 * Verify the local cache of the eDMA state is accurate with a
1113 * WARN_ON.
1114 *
1115 * LOCKING:
1116 * Inherited from caller.
1117 */
1118 static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
1119 struct mv_port_priv *pp, u8 protocol)
1120 {
1121 int want_ncq = (protocol == ATA_PROT_NCQ);
1122
1123 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1124 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
1125 if (want_ncq != using_ncq)
1126 mv_stop_edma(ap);
1127 }
1128 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
1129 struct mv_host_priv *hpriv = ap->host->private_data;
1130
1131 mv_edma_cfg(ap, want_ncq, 1);
1132
1133 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1134 mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
1135
1136 writelfl(EDMA_EN, port_mmio + EDMA_CMD);
1137 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
1138 }
1139 }
1140
1141 static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
1142 {
1143 void __iomem *port_mmio = mv_ap_base(ap);
1144 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
1145 const int per_loop = 5, timeout = (15 * 1000 / per_loop);
1146 int i;
1147
1148 /*
1149 * Wait for the EDMA engine to finish transactions in progress.
1150 * No idea what a good "timeout" value might be, but measurements
1151 * indicate that it often requires hundreds of microseconds
1152 * with two drives in-use. So we use the 15msec value above
1153 * as a rough guess at what even more drives might require.
1154 */
1155 for (i = 0; i < timeout; ++i) {
1156 u32 edma_stat = readl(port_mmio + EDMA_STATUS);
1157 if ((edma_stat & empty_idle) == empty_idle)
1158 break;
1159 udelay(per_loop);
1160 }
1161 /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */
1162 }
1163
1164 /**
1165 * mv_stop_edma_engine - Disable eDMA engine
1166 * @port_mmio: io base address
1167 *
1168 * LOCKING:
1169 * Inherited from caller.
1170 */
1171 static int mv_stop_edma_engine(void __iomem *port_mmio)
1172 {
1173 int i;
1174
1175 /* Disable eDMA. The disable bit auto clears. */
1176 writelfl(EDMA_DS, port_mmio + EDMA_CMD);
1177
1178 /* Wait for the chip to confirm eDMA is off. */
1179 for (i = 10000; i > 0; i--) {
1180 u32 reg = readl(port_mmio + EDMA_CMD);
1181 if (!(reg & EDMA_EN))
1182 return 0;
1183 udelay(10);
1184 }
1185 return -EIO;
1186 }
1187
1188 static int mv_stop_edma(struct ata_port *ap)
1189 {
1190 void __iomem *port_mmio = mv_ap_base(ap);
1191 struct mv_port_priv *pp = ap->private_data;
1192 int err = 0;
1193
1194 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1195 return 0;
1196 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1197 mv_wait_for_edma_empty_idle(ap);
1198 if (mv_stop_edma_engine(port_mmio)) {
1199 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
1200 err = -EIO;
1201 }
1202 mv_edma_cfg(ap, 0, 0);
1203 return err;
1204 }
1205
1206 #ifdef ATA_DEBUG
1207 static void mv_dump_mem(void __iomem *start, unsigned bytes)
1208 {
1209 int b, w;
1210 for (b = 0; b < bytes; ) {
1211 DPRINTK("%p: ", start + b);
1212 for (w = 0; b < bytes && w < 4; w++) {
1213 printk("%08x ", readl(start + b));
1214 b += sizeof(u32);
1215 }
1216 printk("\n");
1217 }
1218 }
1219 #endif
1220
1221 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
1222 {
1223 #ifdef ATA_DEBUG
1224 int b, w;
1225 u32 dw;
1226 for (b = 0; b < bytes; ) {
1227 DPRINTK("%02x: ", b);
1228 for (w = 0; b < bytes && w < 4; w++) {
1229 (void) pci_read_config_dword(pdev, b, &dw);
1230 printk("%08x ", dw);
1231 b += sizeof(u32);
1232 }
1233 printk("\n");
1234 }
1235 #endif
1236 }
1237 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
1238 struct pci_dev *pdev)
1239 {
1240 #ifdef ATA_DEBUG
1241 void __iomem *hc_base = mv_hc_base(mmio_base,
1242 port >> MV_PORT_HC_SHIFT);
1243 void __iomem *port_base;
1244 int start_port, num_ports, p, start_hc, num_hcs, hc;
1245
1246 if (0 > port) {
1247 start_hc = start_port = 0;
1248 num_ports = 8; /* shld be benign for 4 port devs */
1249 num_hcs = 2;
1250 } else {
1251 start_hc = port >> MV_PORT_HC_SHIFT;
1252 start_port = port;
1253 num_ports = num_hcs = 1;
1254 }
1255 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1256 num_ports > 1 ? num_ports - 1 : start_port);
1257
1258 if (NULL != pdev) {
1259 DPRINTK("PCI config space regs:\n");
1260 mv_dump_pci_cfg(pdev, 0x68);
1261 }
1262 DPRINTK("PCI regs:\n");
1263 mv_dump_mem(mmio_base+0xc00, 0x3c);
1264 mv_dump_mem(mmio_base+0xd00, 0x34);
1265 mv_dump_mem(mmio_base+0xf00, 0x4);
1266 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1267 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1268 hc_base = mv_hc_base(mmio_base, hc);
1269 DPRINTK("HC regs (HC %i):\n", hc);
1270 mv_dump_mem(hc_base, 0x1c);
1271 }
1272 for (p = start_port; p < start_port + num_ports; p++) {
1273 port_base = mv_port_base(mmio_base, p);
1274 DPRINTK("EDMA regs (port %i):\n", p);
1275 mv_dump_mem(port_base, 0x54);
1276 DPRINTK("SATA regs (port %i):\n", p);
1277 mv_dump_mem(port_base+0x300, 0x60);
1278 }
1279 #endif
1280 }
1281
1282 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1283 {
1284 unsigned int ofs;
1285
1286 switch (sc_reg_in) {
1287 case SCR_STATUS:
1288 case SCR_CONTROL:
1289 case SCR_ERROR:
1290 ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
1291 break;
1292 case SCR_ACTIVE:
1293 ofs = SATA_ACTIVE; /* active is not with the others */
1294 break;
1295 default:
1296 ofs = 0xffffffffU;
1297 break;
1298 }
1299 return ofs;
1300 }
1301
1302 static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
1303 {
1304 unsigned int ofs = mv_scr_offset(sc_reg_in);
1305
1306 if (ofs != 0xffffffffU) {
1307 *val = readl(mv_ap_base(link->ap) + ofs);
1308 return 0;
1309 } else
1310 return -EINVAL;
1311 }
1312
1313 static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
1314 {
1315 unsigned int ofs = mv_scr_offset(sc_reg_in);
1316
1317 if (ofs != 0xffffffffU) {
1318 void __iomem *addr = mv_ap_base(link->ap) + ofs;
1319 if (sc_reg_in == SCR_CONTROL) {
1320 /*
1321 * Workaround for 88SX60x1 FEr SATA#26:
1322 *
1323 * COMRESETs have to take care not to accidently
1324 * put the drive to sleep when writing SCR_CONTROL.
1325 * Setting bits 12..15 prevents this problem.
1326 *
1327 * So if we see an outbound COMMRESET, set those bits.
1328 * Ditto for the followup write that clears the reset.
1329 *
1330 * The proprietary driver does this for
1331 * all chip versions, and so do we.
1332 */
1333 if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
1334 val |= 0xf000;
1335 }
1336 writelfl(val, addr);
1337 return 0;
1338 } else
1339 return -EINVAL;
1340 }
1341
1342 static void mv6_dev_config(struct ata_device *adev)
1343 {
1344 /*
1345 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1346 *
1347 * Gen-II does not support NCQ over a port multiplier
1348 * (no FIS-based switching).
1349 */
1350 if (adev->flags & ATA_DFLAG_NCQ) {
1351 if (sata_pmp_attached(adev->link->ap)) {
1352 adev->flags &= ~ATA_DFLAG_NCQ;
1353 ata_dev_printk(adev, KERN_INFO,
1354 "NCQ disabled for command-based switching\n");
1355 }
1356 }
1357 }
1358
1359 static int mv_qc_defer(struct ata_queued_cmd *qc)
1360 {
1361 struct ata_link *link = qc->dev->link;
1362 struct ata_port *ap = link->ap;
1363 struct mv_port_priv *pp = ap->private_data;
1364
1365 /*
1366 * Don't allow new commands if we're in a delayed EH state
1367 * for NCQ and/or FIS-based switching.
1368 */
1369 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1370 return ATA_DEFER_PORT;
1371 /*
1372 * If the port is completely idle, then allow the new qc.
1373 */
1374 if (ap->nr_active_links == 0)
1375 return 0;
1376
1377 /*
1378 * The port is operating in host queuing mode (EDMA) with NCQ
1379 * enabled, allow multiple NCQ commands. EDMA also allows
1380 * queueing multiple DMA commands but libata core currently
1381 * doesn't allow it.
1382 */
1383 if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
1384 (pp->pp_flags & MV_PP_FLAG_NCQ_EN) && ata_is_ncq(qc->tf.protocol))
1385 return 0;
1386
1387 return ATA_DEFER_PORT;
1388 }
1389
1390 static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
1391 {
1392 struct mv_port_priv *pp = ap->private_data;
1393 void __iomem *port_mmio;
1394
1395 u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg;
1396 u32 ltmode, *old_ltmode = &pp->cached.ltmode;
1397 u32 haltcond, *old_haltcond = &pp->cached.haltcond;
1398
1399 ltmode = *old_ltmode & ~LTMODE_BIT8;
1400 haltcond = *old_haltcond | EDMA_ERR_DEV;
1401
1402 if (want_fbs) {
1403 fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
1404 ltmode = *old_ltmode | LTMODE_BIT8;
1405 if (want_ncq)
1406 haltcond &= ~EDMA_ERR_DEV;
1407 else
1408 fiscfg |= FISCFG_WAIT_DEV_ERR;
1409 } else {
1410 fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
1411 }
1412
1413 port_mmio = mv_ap_base(ap);
1414 mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
1415 mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
1416 mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
1417 }
1418
1419 static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1420 {
1421 struct mv_host_priv *hpriv = ap->host->private_data;
1422 u32 old, new;
1423
1424 /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
1425 old = readl(hpriv->base + GPIO_PORT_CTL);
1426 if (want_ncq)
1427 new = old | (1 << 22);
1428 else
1429 new = old & ~(1 << 22);
1430 if (new != old)
1431 writel(new, hpriv->base + GPIO_PORT_CTL);
1432 }
1433
1434 /**
1435 * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
1436 * @ap: Port being initialized
1437 *
1438 * There are two DMA modes on these chips: basic DMA, and EDMA.
1439 *
1440 * Bit-0 of the "EDMA RESERVED" register enables/disables use
1441 * of basic DMA on the GEN_IIE versions of the chips.
1442 *
1443 * This bit survives EDMA resets, and must be set for basic DMA
1444 * to function, and should be cleared when EDMA is active.
1445 */
1446 static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
1447 {
1448 struct mv_port_priv *pp = ap->private_data;
1449 u32 new, *old = &pp->cached.unknown_rsvd;
1450
1451 if (enable_bmdma)
1452 new = *old | 1;
1453 else
1454 new = *old & ~1;
1455 mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
1456 }
1457
1458 /*
1459 * SOC chips have an issue whereby the HDD LEDs don't always blink
1460 * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
1461 * of the SOC takes care of it, generating a steady blink rate when
1462 * any drive on the chip is active.
1463 *
1464 * Unfortunately, the blink mode is a global hardware setting for the SOC,
1465 * so we must use it whenever at least one port on the SOC has NCQ enabled.
1466 *
1467 * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
1468 * LED operation works then, and provides better (more accurate) feedback.
1469 *
1470 * Note that this code assumes that an SOC never has more than one HC onboard.
1471 */
1472 static void mv_soc_led_blink_enable(struct ata_port *ap)
1473 {
1474 struct ata_host *host = ap->host;
1475 struct mv_host_priv *hpriv = host->private_data;
1476 void __iomem *hc_mmio;
1477 u32 led_ctrl;
1478
1479 if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
1480 return;
1481 hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
1482 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1483 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1484 writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1485 }
1486
1487 static void mv_soc_led_blink_disable(struct ata_port *ap)
1488 {
1489 struct ata_host *host = ap->host;
1490 struct mv_host_priv *hpriv = host->private_data;
1491 void __iomem *hc_mmio;
1492 u32 led_ctrl;
1493 unsigned int port;
1494
1495 if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
1496 return;
1497
1498 /* disable led-blink only if no ports are using NCQ */
1499 for (port = 0; port < hpriv->n_ports; port++) {
1500 struct ata_port *this_ap = host->ports[port];
1501 struct mv_port_priv *pp = this_ap->private_data;
1502
1503 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1504 return;
1505 }
1506
1507 hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
1508 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1509 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1510 writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1511 }
1512
1513 static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
1514 {
1515 u32 cfg;
1516 struct mv_port_priv *pp = ap->private_data;
1517 struct mv_host_priv *hpriv = ap->host->private_data;
1518 void __iomem *port_mmio = mv_ap_base(ap);
1519
1520 /* set up non-NCQ EDMA configuration */
1521 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1522 pp->pp_flags &=
1523 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
1524
1525 if (IS_GEN_I(hpriv))
1526 cfg |= (1 << 8); /* enab config burst size mask */
1527
1528 else if (IS_GEN_II(hpriv)) {
1529 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1530 mv_60x1_errata_sata25(ap, want_ncq);
1531
1532 } else if (IS_GEN_IIE(hpriv)) {
1533 int want_fbs = sata_pmp_attached(ap);
1534 /*
1535 * Possible future enhancement:
1536 *
1537 * The chip can use FBS with non-NCQ, if we allow it,
1538 * But first we need to have the error handling in place
1539 * for this mode (datasheet section 7.3.15.4.2.3).
1540 * So disallow non-NCQ FBS for now.
1541 */
1542 want_fbs &= want_ncq;
1543
1544 mv_config_fbs(ap, want_ncq, want_fbs);
1545
1546 if (want_fbs) {
1547 pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1548 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1549 }
1550
1551 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1552 if (want_edma) {
1553 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1554 if (!IS_SOC(hpriv))
1555 cfg |= (1 << 18); /* enab early completion */
1556 }
1557 if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1558 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
1559 mv_bmdma_enable_iie(ap, !want_edma);
1560
1561 if (IS_SOC(hpriv)) {
1562 if (want_ncq)
1563 mv_soc_led_blink_enable(ap);
1564 else
1565 mv_soc_led_blink_disable(ap);
1566 }
1567 }
1568
1569 if (want_ncq) {
1570 cfg |= EDMA_CFG_NCQ;
1571 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1572 }
1573
1574 writelfl(cfg, port_mmio + EDMA_CFG);
1575 }
1576
1577 static void mv_port_free_dma_mem(struct ata_port *ap)
1578 {
1579 struct mv_host_priv *hpriv = ap->host->private_data;
1580 struct mv_port_priv *pp = ap->private_data;
1581 int tag;
1582
1583 if (pp->crqb) {
1584 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1585 pp->crqb = NULL;
1586 }
1587 if (pp->crpb) {
1588 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1589 pp->crpb = NULL;
1590 }
1591 /*
1592 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1593 * For later hardware, we have one unique sg_tbl per NCQ tag.
1594 */
1595 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1596 if (pp->sg_tbl[tag]) {
1597 if (tag == 0 || !IS_GEN_I(hpriv))
1598 dma_pool_free(hpriv->sg_tbl_pool,
1599 pp->sg_tbl[tag],
1600 pp->sg_tbl_dma[tag]);
1601 pp->sg_tbl[tag] = NULL;
1602 }
1603 }
1604 }
1605
1606 /**
1607 * mv_port_start - Port specific init/start routine.
1608 * @ap: ATA channel to manipulate
1609 *
1610 * Allocate and point to DMA memory, init port private memory,
1611 * zero indices.
1612 *
1613 * LOCKING:
1614 * Inherited from caller.
1615 */
1616 static int mv_port_start(struct ata_port *ap)
1617 {
1618 struct device *dev = ap->host->dev;
1619 struct mv_host_priv *hpriv = ap->host->private_data;
1620 struct mv_port_priv *pp;
1621 unsigned long flags;
1622 int tag;
1623
1624 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1625 if (!pp)
1626 return -ENOMEM;
1627 ap->private_data = pp;
1628
1629 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1630 if (!pp->crqb)
1631 return -ENOMEM;
1632 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1633
1634 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1635 if (!pp->crpb)
1636 goto out_port_free_dma_mem;
1637 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1638
1639 /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
1640 if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
1641 ap->flags |= ATA_FLAG_AN;
1642 /*
1643 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1644 * For later hardware, we need one unique sg_tbl per NCQ tag.
1645 */
1646 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1647 if (tag == 0 || !IS_GEN_I(hpriv)) {
1648 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1649 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1650 if (!pp->sg_tbl[tag])
1651 goto out_port_free_dma_mem;
1652 } else {
1653 pp->sg_tbl[tag] = pp->sg_tbl[0];
1654 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1655 }
1656 }
1657
1658 spin_lock_irqsave(ap->lock, flags);
1659 mv_save_cached_regs(ap);
1660 mv_edma_cfg(ap, 0, 0);
1661 spin_unlock_irqrestore(ap->lock, flags);
1662
1663 return 0;
1664
1665 out_port_free_dma_mem:
1666 mv_port_free_dma_mem(ap);
1667 return -ENOMEM;
1668 }
1669
1670 /**
1671 * mv_port_stop - Port specific cleanup/stop routine.
1672 * @ap: ATA channel to manipulate
1673 *
1674 * Stop DMA, cleanup port memory.
1675 *
1676 * LOCKING:
1677 * This routine uses the host lock to protect the DMA stop.
1678 */
1679 static void mv_port_stop(struct ata_port *ap)
1680 {
1681 unsigned long flags;
1682
1683 spin_lock_irqsave(ap->lock, flags);
1684 mv_stop_edma(ap);
1685 mv_enable_port_irqs(ap, 0);
1686 spin_unlock_irqrestore(ap->lock, flags);
1687 mv_port_free_dma_mem(ap);
1688 }
1689
1690 /**
1691 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1692 * @qc: queued command whose SG list to source from
1693 *
1694 * Populate the SG list and mark the last entry.
1695 *
1696 * LOCKING:
1697 * Inherited from caller.
1698 */
1699 static void mv_fill_sg(struct ata_queued_cmd *qc)
1700 {
1701 struct mv_port_priv *pp = qc->ap->private_data;
1702 struct scatterlist *sg;
1703 struct mv_sg *mv_sg, *last_sg = NULL;
1704 unsigned int si;
1705
1706 mv_sg = pp->sg_tbl[qc->tag];
1707 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1708 dma_addr_t addr = sg_dma_address(sg);
1709 u32 sg_len = sg_dma_len(sg);
1710
1711 while (sg_len) {
1712 u32 offset = addr & 0xffff;
1713 u32 len = sg_len;
1714
1715 if (offset + len > 0x10000)
1716 len = 0x10000 - offset;
1717
1718 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1719 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1720 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1721 mv_sg->reserved = 0;
1722
1723 sg_len -= len;
1724 addr += len;
1725
1726 last_sg = mv_sg;
1727 mv_sg++;
1728 }
1729 }
1730
1731 if (likely(last_sg))
1732 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1733 mb(); /* ensure data structure is visible to the chipset */
1734 }
1735
1736 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1737 {
1738 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1739 (last ? CRQB_CMD_LAST : 0);
1740 *cmdw = cpu_to_le16(tmp);
1741 }
1742
1743 /**
1744 * mv_sff_irq_clear - Clear hardware interrupt after DMA.
1745 * @ap: Port associated with this ATA transaction.
1746 *
1747 * We need this only for ATAPI bmdma transactions,
1748 * as otherwise we experience spurious interrupts
1749 * after libata-sff handles the bmdma interrupts.
1750 */
1751 static void mv_sff_irq_clear(struct ata_port *ap)
1752 {
1753 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
1754 }
1755
1756 /**
1757 * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
1758 * @qc: queued command to check for chipset/DMA compatibility.
1759 *
1760 * The bmdma engines cannot handle speculative data sizes
1761 * (bytecount under/over flow). So only allow DMA for
1762 * data transfer commands with known data sizes.
1763 *
1764 * LOCKING:
1765 * Inherited from caller.
1766 */
1767 static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
1768 {
1769 struct scsi_cmnd *scmd = qc->scsicmd;
1770
1771 if (scmd) {
1772 switch (scmd->cmnd[0]) {
1773 case READ_6:
1774 case READ_10:
1775 case READ_12:
1776 case WRITE_6:
1777 case WRITE_10:
1778 case WRITE_12:
1779 case GPCMD_READ_CD:
1780 case GPCMD_SEND_DVD_STRUCTURE:
1781 case GPCMD_SEND_CUE_SHEET:
1782 return 0; /* DMA is safe */
1783 }
1784 }
1785 return -EOPNOTSUPP; /* use PIO instead */
1786 }
1787
1788 /**
1789 * mv_bmdma_setup - Set up BMDMA transaction
1790 * @qc: queued command to prepare DMA for.
1791 *
1792 * LOCKING:
1793 * Inherited from caller.
1794 */
1795 static void mv_bmdma_setup(struct ata_queued_cmd *qc)
1796 {
1797 struct ata_port *ap = qc->ap;
1798 void __iomem *port_mmio = mv_ap_base(ap);
1799 struct mv_port_priv *pp = ap->private_data;
1800
1801 mv_fill_sg(qc);
1802
1803 /* clear all DMA cmd bits */
1804 writel(0, port_mmio + BMDMA_CMD);
1805
1806 /* load PRD table addr. */
1807 writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16,
1808 port_mmio + BMDMA_PRD_HIGH);
1809 writelfl(pp->sg_tbl_dma[qc->tag],
1810 port_mmio + BMDMA_PRD_LOW);
1811
1812 /* issue r/w command */
1813 ap->ops->sff_exec_command(ap, &qc->tf);
1814 }
1815
1816 /**
1817 * mv_bmdma_start - Start a BMDMA transaction
1818 * @qc: queued command to start DMA on.
1819 *
1820 * LOCKING:
1821 * Inherited from caller.
1822 */
1823 static void mv_bmdma_start(struct ata_queued_cmd *qc)
1824 {
1825 struct ata_port *ap = qc->ap;
1826 void __iomem *port_mmio = mv_ap_base(ap);
1827 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
1828 u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
1829
1830 /* start host DMA transaction */
1831 writelfl(cmd, port_mmio + BMDMA_CMD);
1832 }
1833
1834 /**
1835 * mv_bmdma_stop - Stop BMDMA transfer
1836 * @qc: queued command to stop DMA on.
1837 *
1838 * Clears the ATA_DMA_START flag in the bmdma control register
1839 *
1840 * LOCKING:
1841 * Inherited from caller.
1842 */
1843 static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1844 {
1845 struct ata_port *ap = qc->ap;
1846 void __iomem *port_mmio = mv_ap_base(ap);
1847 u32 cmd;
1848
1849 /* clear start/stop bit */
1850 cmd = readl(port_mmio + BMDMA_CMD);
1851 cmd &= ~ATA_DMA_START;
1852 writelfl(cmd, port_mmio + BMDMA_CMD);
1853
1854 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
1855 ata_sff_dma_pause(ap);
1856 }
1857
1858 /**
1859 * mv_bmdma_status - Read BMDMA status
1860 * @ap: port for which to retrieve DMA status.
1861 *
1862 * Read and return equivalent of the sff BMDMA status register.
1863 *
1864 * LOCKING:
1865 * Inherited from caller.
1866 */
1867 static u8 mv_bmdma_status(struct ata_port *ap)
1868 {
1869 void __iomem *port_mmio = mv_ap_base(ap);
1870 u32 reg, status;
1871
1872 /*
1873 * Other bits are valid only if ATA_DMA_ACTIVE==0,
1874 * and the ATA_DMA_INTR bit doesn't exist.
1875 */
1876 reg = readl(port_mmio + BMDMA_STATUS);
1877 if (reg & ATA_DMA_ACTIVE)
1878 status = ATA_DMA_ACTIVE;
1879 else
1880 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
1881 return status;
1882 }
1883
1884 /**
1885 * mv_qc_prep - Host specific command preparation.
1886 * @qc: queued command to prepare
1887 *
1888 * This routine simply redirects to the general purpose routine
1889 * if command is not DMA. Else, it handles prep of the CRQB
1890 * (command request block), does some sanity checking, and calls
1891 * the SG load routine.
1892 *
1893 * LOCKING:
1894 * Inherited from caller.
1895 */
1896 static void mv_qc_prep(struct ata_queued_cmd *qc)
1897 {
1898 struct ata_port *ap = qc->ap;
1899 struct mv_port_priv *pp = ap->private_data;
1900 __le16 *cw;
1901 struct ata_taskfile *tf;
1902 u16 flags = 0;
1903 unsigned in_index;
1904
1905 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1906 (qc->tf.protocol != ATA_PROT_NCQ))
1907 return;
1908
1909 /* Fill in command request block
1910 */
1911 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1912 flags |= CRQB_FLAG_READ;
1913 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1914 flags |= qc->tag << CRQB_TAG_SHIFT;
1915 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1916
1917 /* get current queue index from software */
1918 in_index = pp->req_idx;
1919
1920 pp->crqb[in_index].sg_addr =
1921 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1922 pp->crqb[in_index].sg_addr_hi =
1923 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1924 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1925
1926 cw = &pp->crqb[in_index].ata_cmd[0];
1927 tf = &qc->tf;
1928
1929 /* Sadly, the CRQB cannot accomodate all registers--there are
1930 * only 11 bytes...so we must pick and choose required
1931 * registers based on the command. So, we drop feature and
1932 * hob_feature for [RW] DMA commands, but they are needed for
1933 * NCQ. NCQ will drop hob_nsect, which is not needed there
1934 * (nsect is used only for the tag; feat/hob_feat hold true nsect).
1935 */
1936 switch (tf->command) {
1937 case ATA_CMD_READ:
1938 case ATA_CMD_READ_EXT:
1939 case ATA_CMD_WRITE:
1940 case ATA_CMD_WRITE_EXT:
1941 case ATA_CMD_WRITE_FUA_EXT:
1942 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1943 break;
1944 case ATA_CMD_FPDMA_READ:
1945 case ATA_CMD_FPDMA_WRITE:
1946 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1947 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1948 break;
1949 default:
1950 /* The only other commands EDMA supports in non-queued and
1951 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1952 * of which are defined/used by Linux. If we get here, this
1953 * driver needs work.
1954 *
1955 * FIXME: modify libata to give qc_prep a return value and
1956 * return error here.
1957 */
1958 BUG_ON(tf->command);
1959 break;
1960 }
1961 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1962 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1963 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1964 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1965 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1966 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1967 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1968 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1969 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1970
1971 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1972 return;
1973 mv_fill_sg(qc);
1974 }
1975
1976 /**
1977 * mv_qc_prep_iie - Host specific command preparation.
1978 * @qc: queued command to prepare
1979 *
1980 * This routine simply redirects to the general purpose routine
1981 * if command is not DMA. Else, it handles prep of the CRQB
1982 * (command request block), does some sanity checking, and calls
1983 * the SG load routine.
1984 *
1985 * LOCKING:
1986 * Inherited from caller.
1987 */
1988 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1989 {
1990 struct ata_port *ap = qc->ap;
1991 struct mv_port_priv *pp = ap->private_data;
1992 struct mv_crqb_iie *crqb;
1993 struct ata_taskfile *tf;
1994 unsigned in_index;
1995 u32 flags = 0;
1996
1997 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1998 (qc->tf.protocol != ATA_PROT_NCQ))
1999 return;
2000
2001 /* Fill in Gen IIE command request block */
2002 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
2003 flags |= CRQB_FLAG_READ;
2004
2005 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
2006 flags |= qc->tag << CRQB_TAG_SHIFT;
2007 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
2008 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2009
2010 /* get current queue index from software */
2011 in_index = pp->req_idx;
2012
2013 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
2014 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
2015 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
2016 crqb->flags = cpu_to_le32(flags);
2017
2018 tf = &qc->tf;
2019 crqb->ata_cmd[0] = cpu_to_le32(
2020 (tf->command << 16) |
2021 (tf->feature << 24)
2022 );
2023 crqb->ata_cmd[1] = cpu_to_le32(
2024 (tf->lbal << 0) |
2025 (tf->lbam << 8) |
2026 (tf->lbah << 16) |
2027 (tf->device << 24)
2028 );
2029 crqb->ata_cmd[2] = cpu_to_le32(
2030 (tf->hob_lbal << 0) |
2031 (tf->hob_lbam << 8) |
2032 (tf->hob_lbah << 16) |
2033 (tf->hob_feature << 24)
2034 );
2035 crqb->ata_cmd[3] = cpu_to_le32(
2036 (tf->nsect << 0) |
2037 (tf->hob_nsect << 8)
2038 );
2039
2040 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2041 return;
2042 mv_fill_sg(qc);
2043 }
2044
2045 /**
2046 * mv_sff_check_status - fetch device status, if valid
2047 * @ap: ATA port to fetch status from
2048 *
2049 * When using command issue via mv_qc_issue_fis(),
2050 * the initial ATA_BUSY state does not show up in the
2051 * ATA status (shadow) register. This can confuse libata!
2052 *
2053 * So we have a hook here to fake ATA_BUSY for that situation,
2054 * until the first time a BUSY, DRQ, or ERR bit is seen.
2055 *
2056 * The rest of the time, it simply returns the ATA status register.
2057 */
2058 static u8 mv_sff_check_status(struct ata_port *ap)
2059 {
2060 u8 stat = ioread8(ap->ioaddr.status_addr);
2061 struct mv_port_priv *pp = ap->private_data;
2062
2063 if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
2064 if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
2065 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
2066 else
2067 stat = ATA_BUSY;
2068 }
2069 return stat;
2070 }
2071
2072 /**
2073 * mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
2074 * @fis: fis to be sent
2075 * @nwords: number of 32-bit words in the fis
2076 */
2077 static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
2078 {
2079 void __iomem *port_mmio = mv_ap_base(ap);
2080 u32 ifctl, old_ifctl, ifstat;
2081 int i, timeout = 200, final_word = nwords - 1;
2082
2083 /* Initiate FIS transmission mode */
2084 old_ifctl = readl(port_mmio + SATA_IFCTL);
2085 ifctl = 0x100 | (old_ifctl & 0xf);
2086 writelfl(ifctl, port_mmio + SATA_IFCTL);
2087
2088 /* Send all words of the FIS except for the final word */
2089 for (i = 0; i < final_word; ++i)
2090 writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
2091
2092 /* Flag end-of-transmission, and then send the final word */
2093 writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
2094 writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
2095
2096 /*
2097 * Wait for FIS transmission to complete.
2098 * This typically takes just a single iteration.
2099 */
2100 do {
2101 ifstat = readl(port_mmio + SATA_IFSTAT);
2102 } while (!(ifstat & 0x1000) && --timeout);
2103
2104 /* Restore original port configuration */
2105 writelfl(old_ifctl, port_mmio + SATA_IFCTL);
2106
2107 /* See if it worked */
2108 if ((ifstat & 0x3000) != 0x1000) {
2109 ata_port_printk(ap, KERN_WARNING,
2110 "%s transmission error, ifstat=%08x\n",
2111 __func__, ifstat);
2112 return AC_ERR_OTHER;
2113 }
2114 return 0;
2115 }
2116
2117 /**
2118 * mv_qc_issue_fis - Issue a command directly as a FIS
2119 * @qc: queued command to start
2120 *
2121 * Note that the ATA shadow registers are not updated
2122 * after command issue, so the device will appear "READY"
2123 * if polled, even while it is BUSY processing the command.
2124 *
2125 * So we use a status hook to fake ATA_BUSY until the drive changes state.
2126 *
2127 * Note: we don't get updated shadow regs on *completion*
2128 * of non-data commands. So avoid sending them via this function,
2129 * as they will appear to have completed immediately.
2130 *
2131 * GEN_IIE has special registers that we could get the result tf from,
2132 * but earlier chipsets do not. For now, we ignore those registers.
2133 */
2134 static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2135 {
2136 struct ata_port *ap = qc->ap;
2137 struct mv_port_priv *pp = ap->private_data;
2138 struct ata_link *link = qc->dev->link;
2139 u32 fis[5];
2140 int err = 0;
2141
2142 ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
2143 err = mv_send_fis(ap, fis, sizeof(fis) / sizeof(fis[0]));
2144 if (err)
2145 return err;
2146
2147 switch (qc->tf.protocol) {
2148 case ATAPI_PROT_PIO:
2149 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2150 /* fall through */
2151 case ATAPI_PROT_NODATA:
2152 ap->hsm_task_state = HSM_ST_FIRST;
2153 break;
2154 case ATA_PROT_PIO:
2155 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2156 if (qc->tf.flags & ATA_TFLAG_WRITE)
2157 ap->hsm_task_state = HSM_ST_FIRST;
2158 else
2159 ap->hsm_task_state = HSM_ST;
2160 break;
2161 default:
2162 ap->hsm_task_state = HSM_ST_LAST;
2163 break;
2164 }
2165
2166 if (qc->tf.flags & ATA_TFLAG_POLLING)
2167 ata_pio_queue_task(ap, qc, 0);
2168 return 0;
2169 }
2170
2171 /**
2172 * mv_qc_issue - Initiate a command to the host
2173 * @qc: queued command to start
2174 *
2175 * This routine simply redirects to the general purpose routine
2176 * if command is not DMA. Else, it sanity checks our local
2177 * caches of the request producer/consumer indices then enables
2178 * DMA and bumps the request producer index.
2179 *
2180 * LOCKING:
2181 * Inherited from caller.
2182 */
2183 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
2184 {
2185 static int limit_warnings = 10;
2186 struct ata_port *ap = qc->ap;
2187 void __iomem *port_mmio = mv_ap_base(ap);
2188 struct mv_port_priv *pp = ap->private_data;
2189 u32 in_index;
2190 unsigned int port_irqs;
2191
2192 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */
2193
2194 switch (qc->tf.protocol) {
2195 case ATA_PROT_DMA:
2196 case ATA_PROT_NCQ:
2197 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
2198 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2199 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
2200
2201 /* Write the request in pointer to kick the EDMA to life */
2202 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
2203 port_mmio + EDMA_REQ_Q_IN_PTR);
2204 return 0;
2205
2206 case ATA_PROT_PIO:
2207 /*
2208 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
2209 *
2210 * Someday, we might implement special polling workarounds
2211 * for these, but it all seems rather unnecessary since we
2212 * normally use only DMA for commands which transfer more
2213 * than a single block of data.
2214 *
2215 * Much of the time, this could just work regardless.
2216 * So for now, just log the incident, and allow the attempt.
2217 */
2218 if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
2219 --limit_warnings;
2220 ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME
2221 ": attempting PIO w/multiple DRQ: "
2222 "this may fail due to h/w errata\n");
2223 }
2224 /* drop through */
2225 case ATA_PROT_NODATA:
2226 case ATAPI_PROT_PIO:
2227 case ATAPI_PROT_NODATA:
2228 if (ap->flags & ATA_FLAG_PIO_POLLING)
2229 qc->tf.flags |= ATA_TFLAG_POLLING;
2230 break;
2231 }
2232
2233 if (qc->tf.flags & ATA_TFLAG_POLLING)
2234 port_irqs = ERR_IRQ; /* mask device interrupt when polling */
2235 else
2236 port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */
2237
2238 /*
2239 * We're about to send a non-EDMA capable command to the
2240 * port. Turn off EDMA so there won't be problems accessing
2241 * shadow block, etc registers.
2242 */
2243 mv_stop_edma(ap);
2244 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
2245 mv_pmp_select(ap, qc->dev->link->pmp);
2246
2247 if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
2248 struct mv_host_priv *hpriv = ap->host->private_data;
2249 /*
2250 * Workaround for 88SX60x1 FEr SATA#25 (part 2).
2251 *
2252 * After any NCQ error, the READ_LOG_EXT command
2253 * from libata-eh *must* use mv_qc_issue_fis().
2254 * Otherwise it might fail, due to chip errata.
2255 *
2256 * Rather than special-case it, we'll just *always*
2257 * use this method here for READ_LOG_EXT, making for
2258 * easier testing.
2259 */
2260 if (IS_GEN_II(hpriv))
2261 return mv_qc_issue_fis(qc);
2262 }
2263 return ata_sff_qc_issue(qc);
2264 }
2265
2266 static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
2267 {
2268 struct mv_port_priv *pp = ap->private_data;
2269 struct ata_queued_cmd *qc;
2270
2271 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
2272 return NULL;
2273 qc = ata_qc_from_tag(ap, ap->link.active_tag);
2274 if (qc) {
2275 if (qc->tf.flags & ATA_TFLAG_POLLING)
2276 qc = NULL;
2277 else if (!(qc->flags & ATA_QCFLAG_ACTIVE))
2278 qc = NULL;
2279 }
2280 return qc;
2281 }
2282
2283 static void mv_pmp_error_handler(struct ata_port *ap)
2284 {
2285 unsigned int pmp, pmp_map;
2286 struct mv_port_priv *pp = ap->private_data;
2287
2288 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
2289 /*
2290 * Perform NCQ error analysis on failed PMPs
2291 * before we freeze the port entirely.
2292 *
2293 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
2294 */
2295 pmp_map = pp->delayed_eh_pmp_map;
2296 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
2297 for (pmp = 0; pmp_map != 0; pmp++) {
2298 unsigned int this_pmp = (1 << pmp);
2299 if (pmp_map & this_pmp) {
2300 struct ata_link *link = &ap->pmp_link[pmp];
2301 pmp_map &= ~this_pmp;
2302 ata_eh_analyze_ncq_error(link);
2303 }
2304 }
2305 ata_port_freeze(ap);
2306 }
2307 sata_pmp_error_handler(ap);
2308 }
2309
2310 static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
2311 {
2312 void __iomem *port_mmio = mv_ap_base(ap);
2313
2314 return readl(port_mmio + SATA_TESTCTL) >> 16;
2315 }
2316
2317 static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
2318 {
2319 struct ata_eh_info *ehi;
2320 unsigned int pmp;
2321
2322 /*
2323 * Initialize EH info for PMPs which saw device errors
2324 */
2325 ehi = &ap->link.eh_info;
2326 for (pmp = 0; pmp_map != 0; pmp++) {
2327 unsigned int this_pmp = (1 << pmp);
2328 if (pmp_map & this_pmp) {
2329 struct ata_link *link = &ap->pmp_link[pmp];
2330
2331 pmp_map &= ~this_pmp;
2332 ehi = &link->eh_info;
2333 ata_ehi_clear_desc(ehi);
2334 ata_ehi_push_desc(ehi, "dev err");
2335 ehi->err_mask |= AC_ERR_DEV;
2336 ehi->action |= ATA_EH_RESET;
2337 ata_link_abort(link);
2338 }
2339 }
2340 }
2341
2342 static int mv_req_q_empty(struct ata_port *ap)
2343 {
2344 void __iomem *port_mmio = mv_ap_base(ap);
2345 u32 in_ptr, out_ptr;
2346
2347 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
2348 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2349 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
2350 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2351 return (in_ptr == out_ptr); /* 1 == queue_is_empty */
2352 }
2353
2354 static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
2355 {
2356 struct mv_port_priv *pp = ap->private_data;
2357 int failed_links;
2358 unsigned int old_map, new_map;
2359
2360 /*
2361 * Device error during FBS+NCQ operation:
2362 *
2363 * Set a port flag to prevent further I/O being enqueued.
2364 * Leave the EDMA running to drain outstanding commands from this port.
2365 * Perform the post-mortem/EH only when all responses are complete.
2366 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
2367 */
2368 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
2369 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
2370 pp->delayed_eh_pmp_map = 0;
2371 }
2372 old_map = pp->delayed_eh_pmp_map;
2373 new_map = old_map | mv_get_err_pmp_map(ap);
2374
2375 if (old_map != new_map) {
2376 pp->delayed_eh_pmp_map = new_map;
2377 mv_pmp_eh_prep(ap, new_map & ~old_map);
2378 }
2379 failed_links = hweight16(new_map);
2380
2381 ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x "
2382 "failed_links=%d nr_active_links=%d\n",
2383 __func__, pp->delayed_eh_pmp_map,
2384 ap->qc_active, failed_links,
2385 ap->nr_active_links);
2386
2387 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
2388 mv_process_crpb_entries(ap, pp);
2389 mv_stop_edma(ap);
2390 mv_eh_freeze(ap);
2391 ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__);
2392 return 1; /* handled */
2393 }
2394 ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__);
2395 return 1; /* handled */
2396 }
2397
2398 static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
2399 {
2400 /*
2401 * Possible future enhancement:
2402 *
2403 * FBS+non-NCQ operation is not yet implemented.
2404 * See related notes in mv_edma_cfg().
2405 *
2406 * Device error during FBS+non-NCQ operation:
2407 *
2408 * We need to snapshot the shadow registers for each failed command.
2409 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
2410 */
2411 return 0; /* not handled */
2412 }
2413
2414 static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
2415 {
2416 struct mv_port_priv *pp = ap->private_data;
2417
2418 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
2419 return 0; /* EDMA was not active: not handled */
2420 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
2421 return 0; /* FBS was not active: not handled */
2422
2423 if (!(edma_err_cause & EDMA_ERR_DEV))
2424 return 0; /* non DEV error: not handled */
2425 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
2426 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
2427 return 0; /* other problems: not handled */
2428
2429 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
2430 /*
2431 * EDMA should NOT have self-disabled for this case.
2432 * If it did, then something is wrong elsewhere,
2433 * and we cannot handle it here.
2434 */
2435 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2436 ata_port_printk(ap, KERN_WARNING,
2437 "%s: err_cause=0x%x pp_flags=0x%x\n",
2438 __func__, edma_err_cause, pp->pp_flags);
2439 return 0; /* not handled */
2440 }
2441 return mv_handle_fbs_ncq_dev_err(ap);
2442 } else {
2443 /*
2444 * EDMA should have self-disabled for this case.
2445 * If it did not, then something is wrong elsewhere,
2446 * and we cannot handle it here.
2447 */
2448 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
2449 ata_port_printk(ap, KERN_WARNING,
2450 "%s: err_cause=0x%x pp_flags=0x%x\n",
2451 __func__, edma_err_cause, pp->pp_flags);
2452 return 0; /* not handled */
2453 }
2454 return mv_handle_fbs_non_ncq_dev_err(ap);
2455 }
2456 return 0; /* not handled */
2457 }
2458
2459 static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
2460 {
2461 struct ata_eh_info *ehi = &ap->link.eh_info;
2462 char *when = "idle";
2463
2464 ata_ehi_clear_desc(ehi);
2465 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
2466 when = "disabled";
2467 } else if (edma_was_enabled) {
2468 when = "EDMA enabled";
2469 } else {
2470 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2471 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
2472 when = "polling";
2473 }
2474 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
2475 ehi->err_mask |= AC_ERR_OTHER;
2476 ehi->action |= ATA_EH_RESET;
2477 ata_port_freeze(ap);
2478 }
2479
2480 /**
2481 * mv_err_intr - Handle error interrupts on the port
2482 * @ap: ATA channel to manipulate
2483 *
2484 * Most cases require a full reset of the chip's state machine,
2485 * which also performs a COMRESET.
2486 * Also, if the port disabled DMA, update our cached copy to match.
2487 *
2488 * LOCKING:
2489 * Inherited from caller.
2490 */
2491 static void mv_err_intr(struct ata_port *ap)
2492 {
2493 void __iomem *port_mmio = mv_ap_base(ap);
2494 u32 edma_err_cause, eh_freeze_mask, serr = 0;
2495 u32 fis_cause = 0;
2496 struct mv_port_priv *pp = ap->private_data;
2497 struct mv_host_priv *hpriv = ap->host->private_data;
2498 unsigned int action = 0, err_mask = 0;
2499 struct ata_eh_info *ehi = &ap->link.eh_info;
2500 struct ata_queued_cmd *qc;
2501 int abort = 0;
2502
2503 /*
2504 * Read and clear the SError and err_cause bits.
2505 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
2506 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
2507 */
2508 sata_scr_read(&ap->link, SCR_ERROR, &serr);
2509 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
2510
2511 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE);
2512 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2513 fis_cause = readl(port_mmio + FIS_IRQ_CAUSE);
2514 writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE);
2515 }
2516 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE);
2517
2518 if (edma_err_cause & EDMA_ERR_DEV) {
2519 /*
2520 * Device errors during FIS-based switching operation
2521 * require special handling.
2522 */
2523 if (mv_handle_dev_err(ap, edma_err_cause))
2524 return;
2525 }
2526
2527 qc = mv_get_active_qc(ap);
2528 ata_ehi_clear_desc(ehi);
2529 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
2530 edma_err_cause, pp->pp_flags);
2531
2532 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2533 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
2534 if (fis_cause & FIS_IRQ_CAUSE_AN) {
2535 u32 ec = edma_err_cause &
2536 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
2537 sata_async_notification(ap);
2538 if (!ec)
2539 return; /* Just an AN; no need for the nukes */
2540 ata_ehi_push_desc(ehi, "SDB notify");
2541 }
2542 }
2543 /*
2544 * All generations share these EDMA error cause bits:
2545 */
2546 if (edma_err_cause & EDMA_ERR_DEV) {
2547 err_mask |= AC_ERR_DEV;
2548 action |= ATA_EH_RESET;
2549 ata_ehi_push_desc(ehi, "dev error");
2550 }
2551 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
2552 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
2553 EDMA_ERR_INTRL_PAR)) {
2554 err_mask |= AC_ERR_ATA_BUS;
2555 action |= ATA_EH_RESET;
2556 ata_ehi_push_desc(ehi, "parity error");
2557 }
2558 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
2559 ata_ehi_hotplugged(ehi);
2560 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
2561 "dev disconnect" : "dev connect");
2562 action |= ATA_EH_RESET;
2563 }
2564
2565 /*
2566 * Gen-I has a different SELF_DIS bit,
2567 * different FREEZE bits, and no SERR bit:
2568 */
2569 if (IS_GEN_I(hpriv)) {
2570 eh_freeze_mask = EDMA_EH_FREEZE_5;
2571 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
2572 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2573 ata_ehi_push_desc(ehi, "EDMA self-disable");
2574 }
2575 } else {
2576 eh_freeze_mask = EDMA_EH_FREEZE;
2577 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2578 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2579 ata_ehi_push_desc(ehi, "EDMA self-disable");
2580 }
2581 if (edma_err_cause & EDMA_ERR_SERR) {
2582 ata_ehi_push_desc(ehi, "SError=%08x", serr);
2583 err_mask |= AC_ERR_ATA_BUS;
2584 action |= ATA_EH_RESET;
2585 }
2586 }
2587
2588 if (!err_mask) {
2589 err_mask = AC_ERR_OTHER;
2590 action |= ATA_EH_RESET;
2591 }
2592
2593 ehi->serror |= serr;
2594 ehi->action |= action;
2595
2596 if (qc)
2597 qc->err_mask |= err_mask;
2598 else
2599 ehi->err_mask |= err_mask;
2600
2601 if (err_mask == AC_ERR_DEV) {
2602 /*
2603 * Cannot do ata_port_freeze() here,
2604 * because it would kill PIO access,
2605 * which is needed for further diagnosis.
2606 */
2607 mv_eh_freeze(ap);
2608 abort = 1;
2609 } else if (edma_err_cause & eh_freeze_mask) {
2610 /*
2611 * Note to self: ata_port_freeze() calls ata_port_abort()
2612 */
2613 ata_port_freeze(ap);
2614 } else {
2615 abort = 1;
2616 }
2617
2618 if (abort) {
2619 if (qc)
2620 ata_link_abort(qc->dev->link);
2621 else
2622 ata_port_abort(ap);
2623 }
2624 }
2625
2626 static void mv_process_crpb_response(struct ata_port *ap,
2627 struct mv_crpb *response, unsigned int tag, int ncq_enabled)
2628 {
2629 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
2630
2631 if (qc) {
2632 u8 ata_status;
2633 u16 edma_status = le16_to_cpu(response->flags);
2634 /*
2635 * edma_status from a response queue entry:
2636 * LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
2637 * MSB is saved ATA status from command completion.
2638 */
2639 if (!ncq_enabled) {
2640 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
2641 if (err_cause) {
2642 /*
2643 * Error will be seen/handled by mv_err_intr().
2644 * So do nothing at all here.
2645 */
2646 return;
2647 }
2648 }
2649 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
2650 if (!ac_err_mask(ata_status))
2651 ata_qc_complete(qc);
2652 /* else: leave it for mv_err_intr() */
2653 } else {
2654 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
2655 __func__, tag);
2656 }
2657 }
2658
2659 static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
2660 {
2661 void __iomem *port_mmio = mv_ap_base(ap);
2662 struct mv_host_priv *hpriv = ap->host->private_data;
2663 u32 in_index;
2664 bool work_done = false;
2665 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
2666
2667 /* Get the hardware queue position index */
2668 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
2669 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2670
2671 /* Process new responses from since the last time we looked */
2672 while (in_index != pp->resp_idx) {
2673 unsigned int tag;
2674 struct mv_crpb *response = &pp->crpb[pp->resp_idx];
2675
2676 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2677
2678 if (IS_GEN_I(hpriv)) {
2679 /* 50xx: no NCQ, only one command active at a time */
2680 tag = ap->link.active_tag;
2681 } else {
2682 /* Gen II/IIE: get command tag from CRPB entry */
2683 tag = le16_to_cpu(response->id) & 0x1f;
2684 }
2685 mv_process_crpb_response(ap, response, tag, ncq_enabled);
2686 work_done = true;
2687 }
2688
2689 /* Update the software queue position index in hardware */
2690 if (work_done)
2691 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
2692 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
2693 port_mmio + EDMA_RSP_Q_OUT_PTR);
2694 }
2695
2696 static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2697 {
2698 struct mv_port_priv *pp;
2699 int edma_was_enabled;
2700
2701 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
2702 mv_unexpected_intr(ap, 0);
2703 return;
2704 }
2705 /*
2706 * Grab a snapshot of the EDMA_EN flag setting,
2707 * so that we have a consistent view for this port,
2708 * even if something we call of our routines changes it.
2709 */
2710 pp = ap->private_data;
2711 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2712 /*
2713 * Process completed CRPB response(s) before other events.
2714 */
2715 if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2716 mv_process_crpb_entries(ap, pp);
2717 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2718 mv_handle_fbs_ncq_dev_err(ap);
2719 }
2720 /*
2721 * Handle chip-reported errors, or continue on to handle PIO.
2722 */
2723 if (unlikely(port_cause & ERR_IRQ)) {
2724 mv_err_intr(ap);
2725 } else if (!edma_was_enabled) {
2726 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2727 if (qc)
2728 ata_sff_host_intr(ap, qc);
2729 else
2730 mv_unexpected_intr(ap, edma_was_enabled);
2731 }
2732 }
2733
2734 /**
2735 * mv_host_intr - Handle all interrupts on the given host controller
2736 * @host: host specific structure
2737 * @main_irq_cause: Main interrupt cause register for the chip.
2738 *
2739 * LOCKING:
2740 * Inherited from caller.
2741 */
2742 static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
2743 {
2744 struct mv_host_priv *hpriv = host->private_data;
2745 void __iomem *mmio = hpriv->base, *hc_mmio;
2746 unsigned int handled = 0, port;
2747
2748 /* If asserted, clear the "all ports" IRQ coalescing bit */
2749 if (main_irq_cause & ALL_PORTS_COAL_DONE)
2750 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
2751
2752 for (port = 0; port < hpriv->n_ports; port++) {
2753 struct ata_port *ap = host->ports[port];
2754 unsigned int p, shift, hardport, port_cause;
2755
2756 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2757 /*
2758 * Each hc within the host has its own hc_irq_cause register,
2759 * where the interrupting ports bits get ack'd.
2760 */
2761 if (hardport == 0) { /* first port on this hc ? */
2762 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2763 u32 port_mask, ack_irqs;
2764 /*
2765 * Skip this entire hc if nothing pending for any ports
2766 */
2767 if (!hc_cause) {
2768 port += MV_PORTS_PER_HC - 1;
2769 continue;
2770 }
2771 /*
2772 * We don't need/want to read the hc_irq_cause register,
2773 * because doing so hurts performance, and
2774 * main_irq_cause already gives us everything we need.
2775 *
2776 * But we do have to *write* to the hc_irq_cause to ack
2777 * the ports that we are handling this time through.
2778 *
2779 * This requires that we create a bitmap for those
2780 * ports which interrupted us, and use that bitmap
2781 * to ack (only) those ports via hc_irq_cause.
2782 */
2783 ack_irqs = 0;
2784 if (hc_cause & PORTS_0_3_COAL_DONE)
2785 ack_irqs = HC_COAL_IRQ;
2786 for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2787 if ((port + p) >= hpriv->n_ports)
2788 break;
2789 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2790 if (hc_cause & port_mask)
2791 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2792 }
2793 hc_mmio = mv_hc_base_from_port(mmio, port);
2794 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE);
2795 handled = 1;
2796 }
2797 /*
2798 * Handle interrupts signalled for this port:
2799 */
2800 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
2801 if (port_cause)
2802 mv_port_intr(ap, port_cause);
2803 }
2804 return handled;
2805 }
2806
2807 static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
2808 {
2809 struct mv_host_priv *hpriv = host->private_data;
2810 struct ata_port *ap;
2811 struct ata_queued_cmd *qc;
2812 struct ata_eh_info *ehi;
2813 unsigned int i, err_mask, printed = 0;
2814 u32 err_cause;
2815
2816 err_cause = readl(mmio + hpriv->irq_cause_offset);
2817
2818 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
2819 err_cause);
2820
2821 DPRINTK("All regs @ PCI error\n");
2822 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
2823
2824 writelfl(0, mmio + hpriv->irq_cause_offset);
2825
2826 for (i = 0; i < host->n_ports; i++) {
2827 ap = host->ports[i];
2828 if (!ata_link_offline(&ap->link)) {
2829 ehi = &ap->link.eh_info;
2830 ata_ehi_clear_desc(ehi);
2831 if (!printed++)
2832 ata_ehi_push_desc(ehi,
2833 "PCI err cause 0x%08x", err_cause);
2834 err_mask = AC_ERR_HOST_BUS;
2835 ehi->action = ATA_EH_RESET;
2836 qc = ata_qc_from_tag(ap, ap->link.active_tag);
2837 if (qc)
2838 qc->err_mask |= err_mask;
2839 else
2840 ehi->err_mask |= err_mask;
2841
2842 ata_port_freeze(ap);
2843 }
2844 }
2845 return 1; /* handled */
2846 }
2847
2848 /**
2849 * mv_interrupt - Main interrupt event handler
2850 * @irq: unused
2851 * @dev_instance: private data; in this case the host structure
2852 *
2853 * Read the read only register to determine if any host
2854 * controllers have pending interrupts. If so, call lower level
2855 * routine to handle. Also check for PCI errors which are only
2856 * reported here.
2857 *
2858 * LOCKING:
2859 * This routine holds the host lock while processing pending
2860 * interrupts.
2861 */
2862 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
2863 {
2864 struct ata_host *host = dev_instance;
2865 struct mv_host_priv *hpriv = host->private_data;
2866 unsigned int handled = 0;
2867 int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
2868 u32 main_irq_cause, pending_irqs;
2869
2870 spin_lock(&host->lock);
2871
2872 /* for MSI: block new interrupts while in here */
2873 if (using_msi)
2874 mv_write_main_irq_mask(0, hpriv);
2875
2876 main_irq_cause = readl(hpriv->main_irq_cause_addr);
2877 pending_irqs = main_irq_cause & hpriv->main_irq_mask;
2878 /*
2879 * Deal with cases where we either have nothing pending, or have read
2880 * a bogus register value which can indicate HW removal or PCI fault.
2881 */
2882 if (pending_irqs && main_irq_cause != 0xffffffffU) {
2883 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
2884 handled = mv_pci_error(host, hpriv->base);
2885 else
2886 handled = mv_host_intr(host, pending_irqs);
2887 }
2888
2889 /* for MSI: unmask; interrupt cause bits will retrigger now */
2890 if (using_msi)
2891 mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
2892
2893 spin_unlock(&host->lock);
2894
2895 return IRQ_RETVAL(handled);
2896 }
2897
2898 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
2899 {
2900 unsigned int ofs;
2901
2902 switch (sc_reg_in) {
2903 case SCR_STATUS:
2904 case SCR_ERROR:
2905 case SCR_CONTROL:
2906 ofs = sc_reg_in * sizeof(u32);
2907 break;
2908 default:
2909 ofs = 0xffffffffU;
2910 break;
2911 }
2912 return ofs;
2913 }
2914
2915 static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
2916 {
2917 struct mv_host_priv *hpriv = link->ap->host->private_data;
2918 void __iomem *mmio = hpriv->base;
2919 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
2920 unsigned int ofs = mv5_scr_offset(sc_reg_in);
2921
2922 if (ofs != 0xffffffffU) {
2923 *val = readl(addr + ofs);
2924 return 0;
2925 } else
2926 return -EINVAL;
2927 }
2928
2929 static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
2930 {
2931 struct mv_host_priv *hpriv = link->ap->host->private_data;
2932 void __iomem *mmio = hpriv->base;
2933 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
2934 unsigned int ofs = mv5_scr_offset(sc_reg_in);
2935
2936 if (ofs != 0xffffffffU) {
2937 writelfl(val, addr + ofs);
2938 return 0;
2939 } else
2940 return -EINVAL;
2941 }
2942
2943 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
2944 {
2945 struct pci_dev *pdev = to_pci_dev(host->dev);
2946 int early_5080;
2947
2948 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
2949
2950 if (!early_5080) {
2951 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
2952 tmp |= (1 << 0);
2953 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
2954 }
2955
2956 mv_reset_pci_bus(host, mmio);
2957 }
2958
2959 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2960 {
2961 writel(0x0fcfffff, mmio + FLASH_CTL);
2962 }
2963
2964 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
2965 void __iomem *mmio)
2966 {
2967 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
2968 u32 tmp;
2969
2970 tmp = readl(phy_mmio + MV5_PHY_MODE);
2971
2972 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
2973 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
2974 }
2975
2976 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2977 {
2978 u32 tmp;
2979
2980 writel(0, mmio + GPIO_PORT_CTL);
2981
2982 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
2983
2984 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
2985 tmp |= ~(1 << 0);
2986 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
2987 }
2988
2989 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2990 unsigned int port)
2991 {
2992 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
2993 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
2994 u32 tmp;
2995 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
2996
2997 if (fix_apm_sq) {
2998 tmp = readl(phy_mmio + MV5_LTMODE);
2999 tmp |= (1 << 19);
3000 writel(tmp, phy_mmio + MV5_LTMODE);
3001
3002 tmp = readl(phy_mmio + MV5_PHY_CTL);
3003 tmp &= ~0x3;
3004 tmp |= 0x1;
3005 writel(tmp, phy_mmio + MV5_PHY_CTL);
3006 }
3007
3008 tmp = readl(phy_mmio + MV5_PHY_MODE);
3009 tmp &= ~mask;
3010 tmp |= hpriv->signal[port].pre;
3011 tmp |= hpriv->signal[port].amps;
3012 writel(tmp, phy_mmio + MV5_PHY_MODE);
3013 }
3014
3015
3016 #undef ZERO
3017 #define ZERO(reg) writel(0, port_mmio + (reg))
3018 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
3019 unsigned int port)
3020 {
3021 void __iomem *port_mmio = mv_port_base(mmio, port);
3022
3023 mv_reset_channel(hpriv, mmio, port);
3024
3025 ZERO(0x028); /* command */
3026 writel(0x11f, port_mmio + EDMA_CFG);
3027 ZERO(0x004); /* timer */
3028 ZERO(0x008); /* irq err cause */
3029 ZERO(0x00c); /* irq err mask */
3030 ZERO(0x010); /* rq bah */
3031 ZERO(0x014); /* rq inp */
3032 ZERO(0x018); /* rq outp */
3033 ZERO(0x01c); /* respq bah */
3034 ZERO(0x024); /* respq outp */
3035 ZERO(0x020); /* respq inp */
3036 ZERO(0x02c); /* test control */
3037 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
3038 }
3039 #undef ZERO
3040
3041 #define ZERO(reg) writel(0, hc_mmio + (reg))
3042 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3043 unsigned int hc)
3044 {
3045 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3046 u32 tmp;
3047
3048 ZERO(0x00c);
3049 ZERO(0x010);
3050 ZERO(0x014);
3051 ZERO(0x018);
3052
3053 tmp = readl(hc_mmio + 0x20);
3054 tmp &= 0x1c1c1c1c;
3055 tmp |= 0x03030303;
3056 writel(tmp, hc_mmio + 0x20);
3057 }
3058 #undef ZERO
3059
3060 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3061 unsigned int n_hc)
3062 {
3063 unsigned int hc, port;
3064
3065 for (hc = 0; hc < n_hc; hc++) {
3066 for (port = 0; port < MV_PORTS_PER_HC; port++)
3067 mv5_reset_hc_port(hpriv, mmio,
3068 (hc * MV_PORTS_PER_HC) + port);
3069
3070 mv5_reset_one_hc(hpriv, mmio, hc);
3071 }
3072
3073 return 0;
3074 }
3075
3076 #undef ZERO
3077 #define ZERO(reg) writel(0, mmio + (reg))
3078 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
3079 {
3080 struct mv_host_priv *hpriv = host->private_data;
3081 u32 tmp;
3082
3083 tmp = readl(mmio + MV_PCI_MODE);
3084 tmp &= 0xff00ffff;
3085 writel(tmp, mmio + MV_PCI_MODE);
3086
3087 ZERO(MV_PCI_DISC_TIMER);
3088 ZERO(MV_PCI_MSI_TRIGGER);
3089 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
3090 ZERO(MV_PCI_SERR_MASK);
3091 ZERO(hpriv->irq_cause_offset);
3092 ZERO(hpriv->irq_mask_offset);
3093 ZERO(MV_PCI_ERR_LOW_ADDRESS);
3094 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
3095 ZERO(MV_PCI_ERR_ATTRIBUTE);
3096 ZERO(MV_PCI_ERR_COMMAND);
3097 }
3098 #undef ZERO
3099
3100 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3101 {
3102 u32 tmp;
3103
3104 mv5_reset_flash(hpriv, mmio);
3105
3106 tmp = readl(mmio + GPIO_PORT_CTL);
3107 tmp &= 0x3;
3108 tmp |= (1 << 5) | (1 << 6);
3109 writel(tmp, mmio + GPIO_PORT_CTL);
3110 }
3111
3112 /**
3113 * mv6_reset_hc - Perform the 6xxx global soft reset
3114 * @mmio: base address of the HBA
3115 *
3116 * This routine only applies to 6xxx parts.
3117 *
3118 * LOCKING:
3119 * Inherited from caller.
3120 */
3121 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3122 unsigned int n_hc)
3123 {
3124 void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
3125 int i, rc = 0;
3126 u32 t;
3127
3128 /* Following procedure defined in PCI "main command and status
3129 * register" table.
3130 */
3131 t = readl(reg);
3132 writel(t | STOP_PCI_MASTER, reg);
3133
3134 for (i = 0; i < 1000; i++) {
3135 udelay(1);
3136 t = readl(reg);
3137 if (PCI_MASTER_EMPTY & t)
3138 break;
3139 }
3140 if (!(PCI_MASTER_EMPTY & t)) {
3141 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
3142 rc = 1;
3143 goto done;
3144 }
3145
3146 /* set reset */
3147 i = 5;
3148 do {
3149 writel(t | GLOB_SFT_RST, reg);
3150 t = readl(reg);
3151 udelay(1);
3152 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
3153
3154 if (!(GLOB_SFT_RST & t)) {
3155 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
3156 rc = 1;
3157 goto done;
3158 }
3159
3160 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
3161 i = 5;
3162 do {
3163 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
3164 t = readl(reg);
3165 udelay(1);
3166 } while ((GLOB_SFT_RST & t) && (i-- > 0));
3167
3168 if (GLOB_SFT_RST & t) {
3169 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
3170 rc = 1;
3171 }
3172 done:
3173 return rc;
3174 }
3175
3176 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
3177 void __iomem *mmio)
3178 {
3179 void __iomem *port_mmio;
3180 u32 tmp;
3181
3182 tmp = readl(mmio + RESET_CFG);
3183 if ((tmp & (1 << 0)) == 0) {
3184 hpriv->signal[idx].amps = 0x7 << 8;
3185 hpriv->signal[idx].pre = 0x1 << 5;
3186 return;
3187 }
3188
3189 port_mmio = mv_port_base(mmio, idx);
3190 tmp = readl(port_mmio + PHY_MODE2);
3191
3192 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
3193 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
3194 }
3195
3196 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3197 {
3198 writel(0x00000060, mmio + GPIO_PORT_CTL);
3199 }
3200
3201 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3202 unsigned int port)
3203 {
3204 void __iomem *port_mmio = mv_port_base(mmio, port);
3205
3206 u32 hp_flags = hpriv->hp_flags;
3207 int fix_phy_mode2 =
3208 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3209 int fix_phy_mode4 =
3210 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3211 u32 m2, m3;
3212
3213 if (fix_phy_mode2) {
3214 m2 = readl(port_mmio + PHY_MODE2);
3215 m2 &= ~(1 << 16);
3216 m2 |= (1 << 31);
3217 writel(m2, port_mmio + PHY_MODE2);
3218
3219 udelay(200);
3220
3221 m2 = readl(port_mmio + PHY_MODE2);
3222 m2 &= ~((1 << 16) | (1 << 31));
3223 writel(m2, port_mmio + PHY_MODE2);
3224
3225 udelay(200);
3226 }
3227
3228 /*
3229 * Gen-II/IIe PHY_MODE3 errata RM#2:
3230 * Achieves better receiver noise performance than the h/w default:
3231 */
3232 m3 = readl(port_mmio + PHY_MODE3);
3233 m3 = (m3 & 0x1f) | (0x5555601 << 5);
3234
3235 /* Guideline 88F5182 (GL# SATA-S11) */
3236 if (IS_SOC(hpriv))
3237 m3 &= ~0x1c;
3238
3239 if (fix_phy_mode4) {
3240 u32 m4 = readl(port_mmio + PHY_MODE4);
3241 /*
3242 * Enforce reserved-bit restrictions on GenIIe devices only.
3243 * For earlier chipsets, force only the internal config field
3244 * (workaround for errata FEr SATA#10 part 1).
3245 */
3246 if (IS_GEN_IIE(hpriv))
3247 m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
3248 else
3249 m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
3250 writel(m4, port_mmio + PHY_MODE4);
3251 }
3252 /*
3253 * Workaround for 60x1-B2 errata SATA#13:
3254 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
3255 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
3256 * Or ensure we use writelfl() when writing PHY_MODE4.
3257 */
3258 writel(m3, port_mmio + PHY_MODE3);
3259
3260 /* Revert values of pre-emphasis and signal amps to the saved ones */
3261 m2 = readl(port_mmio + PHY_MODE2);
3262
3263 m2 &= ~MV_M2_PREAMP_MASK;
3264 m2 |= hpriv->signal[port].amps;
3265 m2 |= hpriv->signal[port].pre;
3266 m2 &= ~(1 << 16);
3267
3268 /* according to mvSata 3.6.1, some IIE values are fixed */
3269 if (IS_GEN_IIE(hpriv)) {
3270 m2 &= ~0xC30FF01F;
3271 m2 |= 0x0000900F;
3272 }
3273
3274 writel(m2, port_mmio + PHY_MODE2);
3275 }
3276
3277 /* TODO: use the generic LED interface to configure the SATA Presence */
3278 /* & Acitivy LEDs on the board */
3279 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
3280 void __iomem *mmio)
3281 {
3282 return;
3283 }
3284
3285 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
3286 void __iomem *mmio)
3287 {
3288 void __iomem *port_mmio;
3289 u32 tmp;
3290
3291 port_mmio = mv_port_base(mmio, idx);
3292 tmp = readl(port_mmio + PHY_MODE2);
3293
3294 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
3295 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
3296 }
3297
3298 #undef ZERO
3299 #define ZERO(reg) writel(0, port_mmio + (reg))
3300 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
3301 void __iomem *mmio, unsigned int port)
3302 {
3303 void __iomem *port_mmio = mv_port_base(mmio, port);
3304
3305 mv_reset_channel(hpriv, mmio, port);
3306
3307 ZERO(0x028); /* command */
3308 writel(0x101f, port_mmio + EDMA_CFG);
3309 ZERO(0x004); /* timer */
3310 ZERO(0x008); /* irq err cause */
3311 ZERO(0x00c); /* irq err mask */
3312 ZERO(0x010); /* rq bah */
3313 ZERO(0x014); /* rq inp */
3314 ZERO(0x018); /* rq outp */
3315 ZERO(0x01c); /* respq bah */
3316 ZERO(0x024); /* respq outp */
3317 ZERO(0x020); /* respq inp */
3318 ZERO(0x02c); /* test control */
3319 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
3320 }
3321
3322 #undef ZERO
3323
3324 #define ZERO(reg) writel(0, hc_mmio + (reg))
3325 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
3326 void __iomem *mmio)
3327 {
3328 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
3329
3330 ZERO(0x00c);
3331 ZERO(0x010);
3332 ZERO(0x014);
3333
3334 }
3335
3336 #undef ZERO
3337
3338 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
3339 void __iomem *mmio, unsigned int n_hc)
3340 {
3341 unsigned int port;
3342
3343 for (port = 0; port < hpriv->n_ports; port++)
3344 mv_soc_reset_hc_port(hpriv, mmio, port);
3345
3346 mv_soc_reset_one_hc(hpriv, mmio);
3347
3348 return 0;
3349 }
3350
3351 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
3352 void __iomem *mmio)
3353 {
3354 return;
3355 }
3356
3357 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
3358 {
3359 return;
3360 }
3361
3362 static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
3363 {
3364 u32 ifcfg = readl(port_mmio + SATA_IFCFG);
3365
3366 ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */
3367 if (want_gen2i)
3368 ifcfg |= (1 << 7); /* enable gen2i speed */
3369 writelfl(ifcfg, port_mmio + SATA_IFCFG);
3370 }
3371
3372 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
3373 unsigned int port_no)
3374 {
3375 void __iomem *port_mmio = mv_port_base(mmio, port_no);
3376
3377 /*
3378 * The datasheet warns against setting EDMA_RESET when EDMA is active
3379 * (but doesn't say what the problem might be). So we first try
3380 * to disable the EDMA engine before doing the EDMA_RESET operation.
3381 */
3382 mv_stop_edma_engine(port_mmio);
3383 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3384
3385 if (!IS_GEN_I(hpriv)) {
3386 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
3387 mv_setup_ifcfg(port_mmio, 1);
3388 }
3389 /*
3390 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
3391 * link, and physical layers. It resets all SATA interface registers
3392 * (except for SATA_IFCFG), and issues a COMRESET to the dev.
3393 */
3394 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3395 udelay(25); /* allow reset propagation */
3396 writelfl(0, port_mmio + EDMA_CMD);
3397
3398 hpriv->ops->phy_errata(hpriv, mmio, port_no);
3399
3400 if (IS_GEN_I(hpriv))
3401 mdelay(1);
3402 }
3403
3404 static void mv_pmp_select(struct ata_port *ap, int pmp)
3405 {
3406 if (sata_pmp_supported(ap)) {
3407 void __iomem *port_mmio = mv_ap_base(ap);
3408 u32 reg = readl(port_mmio + SATA_IFCTL);
3409 int old = reg & 0xf;
3410
3411 if (old != pmp) {
3412 reg = (reg & ~0xf) | pmp;
3413 writelfl(reg, port_mmio + SATA_IFCTL);
3414 }
3415 }
3416 }
3417
3418 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
3419 unsigned long deadline)
3420 {
3421 mv_pmp_select(link->ap, sata_srst_pmp(link));
3422 return sata_std_hardreset(link, class, deadline);
3423 }
3424
3425 static int mv_softreset(struct ata_link *link, unsigned int *class,
3426 unsigned long deadline)
3427 {
3428 mv_pmp_select(link->ap, sata_srst_pmp(link));
3429 return ata_sff_softreset(link, class, deadline);
3430 }
3431
3432 static int mv_hardreset(struct ata_link *link, unsigned int *class,
3433 unsigned long deadline)
3434 {
3435 struct ata_port *ap = link->ap;
3436 struct mv_host_priv *hpriv = ap->host->private_data;
3437 struct mv_port_priv *pp = ap->private_data;
3438 void __iomem *mmio = hpriv->base;
3439 int rc, attempts = 0, extra = 0;
3440 u32 sstatus;
3441 bool online;
3442
3443 mv_reset_channel(hpriv, mmio, ap->port_no);
3444 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
3445 pp->pp_flags &=
3446 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
3447
3448 /* Workaround for errata FEr SATA#10 (part 2) */
3449 do {
3450 const unsigned long *timing =
3451 sata_ehc_deb_timing(&link->eh_context);
3452
3453 rc = sata_link_hardreset(link, timing, deadline + extra,
3454 &online, NULL);
3455 rc = online ? -EAGAIN : rc;
3456 if (rc)
3457 return rc;
3458 sata_scr_read(link, SCR_STATUS, &sstatus);
3459 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
3460 /* Force 1.5gb/s link speed and try again */
3461 mv_setup_ifcfg(mv_ap_base(ap), 0);
3462 if (time_after(jiffies + HZ, deadline))
3463 extra = HZ; /* only extend it once, max */
3464 }
3465 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
3466 mv_save_cached_regs(ap);
3467 mv_edma_cfg(ap, 0, 0);
3468
3469 return rc;
3470 }
3471
3472 static void mv_eh_freeze(struct ata_port *ap)
3473 {
3474 mv_stop_edma(ap);
3475 mv_enable_port_irqs(ap, 0);
3476 }
3477
3478 static void mv_eh_thaw(struct ata_port *ap)
3479 {
3480 struct mv_host_priv *hpriv = ap->host->private_data;
3481 unsigned int port = ap->port_no;
3482 unsigned int hardport = mv_hardport_from_port(port);
3483 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
3484 void __iomem *port_mmio = mv_ap_base(ap);
3485 u32 hc_irq_cause;
3486
3487 /* clear EDMA errors on this port */
3488 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3489
3490 /* clear pending irq events */
3491 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
3492 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
3493
3494 mv_enable_port_irqs(ap, ERR_IRQ);
3495 }
3496
3497 /**
3498 * mv_port_init - Perform some early initialization on a single port.
3499 * @port: libata data structure storing shadow register addresses
3500 * @port_mmio: base address of the port
3501 *
3502 * Initialize shadow register mmio addresses, clear outstanding
3503 * interrupts on the port, and unmask interrupts for the future
3504 * start of the port.
3505 *
3506 * LOCKING:
3507 * Inherited from caller.
3508 */
3509 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
3510 {
3511 void __iomem *serr, *shd_base = port_mmio + SHD_BLK;
3512
3513 /* PIO related setup
3514 */
3515 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
3516 port->error_addr =
3517 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
3518 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
3519 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
3520 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
3521 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
3522 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
3523 port->status_addr =
3524 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
3525 /* special case: control/altstatus doesn't have ATA_REG_ address */
3526 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
3527
3528 /* unused: */
3529 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
3530
3531 /* Clear any currently outstanding port interrupt conditions */
3532 serr = port_mmio + mv_scr_offset(SCR_ERROR);
3533 writelfl(readl(serr), serr);
3534 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3535
3536 /* unmask all non-transient EDMA error interrupts */
3537 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
3538
3539 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
3540 readl(port_mmio + EDMA_CFG),
3541 readl(port_mmio + EDMA_ERR_IRQ_CAUSE),
3542 readl(port_mmio + EDMA_ERR_IRQ_MASK));
3543 }
3544
3545 static unsigned int mv_in_pcix_mode(struct ata_host *host)
3546 {
3547 struct mv_host_priv *hpriv = host->private_data;
3548 void __iomem *mmio = hpriv->base;
3549 u32 reg;
3550
3551 if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
3552 return 0; /* not PCI-X capable */
3553 reg = readl(mmio + MV_PCI_MODE);
3554 if ((reg & MV_PCI_MODE_MASK) == 0)
3555 return 0; /* conventional PCI mode */
3556 return 1; /* chip is in PCI-X mode */
3557 }
3558
3559 static int mv_pci_cut_through_okay(struct ata_host *host)
3560 {
3561 struct mv_host_priv *hpriv = host->private_data;
3562 void __iomem *mmio = hpriv->base;
3563 u32 reg;
3564
3565 if (!mv_in_pcix_mode(host)) {
3566 reg = readl(mmio + MV_PCI_COMMAND);
3567 if (reg & MV_PCI_COMMAND_MRDTRIG)
3568 return 0; /* not okay */
3569 }
3570 return 1; /* okay */
3571 }
3572
3573 static void mv_60x1b2_errata_pci7(struct ata_host *host)
3574 {
3575 struct mv_host_priv *hpriv = host->private_data;
3576 void __iomem *mmio = hpriv->base;
3577
3578 /* workaround for 60x1-B2 errata PCI#7 */
3579 if (mv_in_pcix_mode(host)) {
3580 u32 reg = readl(mmio + MV_PCI_COMMAND);
3581 writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND);
3582 }
3583 }
3584
3585 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
3586 {
3587 struct pci_dev *pdev = to_pci_dev(host->dev);
3588 struct mv_host_priv *hpriv = host->private_data;
3589 u32 hp_flags = hpriv->hp_flags;
3590
3591 switch (board_idx) {
3592 case chip_5080:
3593 hpriv->ops = &mv5xxx_ops;
3594 hp_flags |= MV_HP_GEN_I;
3595
3596 switch (pdev->revision) {
3597 case 0x1:
3598 hp_flags |= MV_HP_ERRATA_50XXB0;
3599 break;
3600 case 0x3:
3601 hp_flags |= MV_HP_ERRATA_50XXB2;
3602 break;
3603 default:
3604 dev_printk(KERN_WARNING, &pdev->dev,
3605 "Applying 50XXB2 workarounds to unknown rev\n");
3606 hp_flags |= MV_HP_ERRATA_50XXB2;
3607 break;
3608 }
3609 break;
3610
3611 case chip_504x:
3612 case chip_508x:
3613 hpriv->ops = &mv5xxx_ops;
3614 hp_flags |= MV_HP_GEN_I;
3615
3616 switch (pdev->revision) {
3617 case 0x0:
3618 hp_flags |= MV_HP_ERRATA_50XXB0;
3619 break;
3620 case 0x3:
3621 hp_flags |= MV_HP_ERRATA_50XXB2;
3622 break;
3623 default:
3624 dev_printk(KERN_WARNING, &pdev->dev,
3625 "Applying B2 workarounds to unknown rev\n");
3626 hp_flags |= MV_HP_ERRATA_50XXB2;
3627 break;
3628 }
3629 break;
3630
3631 case chip_604x:
3632 case chip_608x:
3633 hpriv->ops = &mv6xxx_ops;
3634 hp_flags |= MV_HP_GEN_II;
3635
3636 switch (pdev->revision) {
3637 case 0x7:
3638 mv_60x1b2_errata_pci7(host);
3639 hp_flags |= MV_HP_ERRATA_60X1B2;
3640 break;
3641 case 0x9:
3642 hp_flags |= MV_HP_ERRATA_60X1C0;
3643 break;
3644 default:
3645 dev_printk(KERN_WARNING, &pdev->dev,
3646 "Applying B2 workarounds to unknown rev\n");
3647 hp_flags |= MV_HP_ERRATA_60X1B2;
3648 break;
3649 }
3650 break;
3651
3652 case chip_7042:
3653 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
3654 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
3655 (pdev->device == 0x2300 || pdev->device == 0x2310))
3656 {
3657 /*
3658 * Highpoint RocketRAID PCIe 23xx series cards:
3659 *
3660 * Unconfigured drives are treated as "Legacy"
3661 * by the BIOS, and it overwrites sector 8 with
3662 * a "Lgcy" metadata block prior to Linux boot.
3663 *
3664 * Configured drives (RAID or JBOD) leave sector 8
3665 * alone, but instead overwrite a high numbered
3666 * sector for the RAID metadata. This sector can
3667 * be determined exactly, by truncating the physical
3668 * drive capacity to a nice even GB value.
3669 *
3670 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
3671 *
3672 * Warn the user, lest they think we're just buggy.
3673 */
3674 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
3675 " BIOS CORRUPTS DATA on all attached drives,"
3676 " regardless of if/how they are configured."
3677 " BEWARE!\n");
3678 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
3679 " use sectors 8-9 on \"Legacy\" drives,"
3680 " and avoid the final two gigabytes on"
3681 " all RocketRAID BIOS initialized drives.\n");
3682 }
3683 /* drop through */
3684 case chip_6042:
3685 hpriv->ops = &mv6xxx_ops;
3686 hp_flags |= MV_HP_GEN_IIE;
3687 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
3688 hp_flags |= MV_HP_CUT_THROUGH;
3689
3690 switch (pdev->revision) {
3691 case 0x2: /* Rev.B0: the first/only public release */
3692 hp_flags |= MV_HP_ERRATA_60X1C0;
3693 break;
3694 default:
3695 dev_printk(KERN_WARNING, &pdev->dev,
3696 "Applying 60X1C0 workarounds to unknown rev\n");
3697 hp_flags |= MV_HP_ERRATA_60X1C0;
3698 break;
3699 }
3700 break;
3701 case chip_soc:
3702 hpriv->ops = &mv_soc_ops;
3703 hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
3704 MV_HP_ERRATA_60X1C0;
3705 break;
3706
3707 default:
3708 dev_printk(KERN_ERR, host->dev,
3709 "BUG: invalid board index %u\n", board_idx);
3710 return 1;
3711 }
3712
3713 hpriv->hp_flags = hp_flags;
3714 if (hp_flags & MV_HP_PCIE) {
3715 hpriv->irq_cause_offset = PCIE_IRQ_CAUSE;
3716 hpriv->irq_mask_offset = PCIE_IRQ_MASK;
3717 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
3718 } else {
3719 hpriv->irq_cause_offset = PCI_IRQ_CAUSE;
3720 hpriv->irq_mask_offset = PCI_IRQ_MASK;
3721 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
3722 }
3723
3724 return 0;
3725 }
3726
3727 /**
3728 * mv_init_host - Perform some early initialization of the host.
3729 * @host: ATA host to initialize
3730 * @board_idx: controller index
3731 *
3732 * If possible, do an early global reset of the host. Then do
3733 * our port init and clear/unmask all/relevant host interrupts.
3734 *
3735 * LOCKING:
3736 * Inherited from caller.
3737 */
3738 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
3739 {
3740 int rc = 0, n_hc, port, hc;
3741 struct mv_host_priv *hpriv = host->private_data;
3742 void __iomem *mmio = hpriv->base;
3743
3744 rc = mv_chip_id(host, board_idx);
3745 if (rc)
3746 goto done;
3747
3748 if (IS_SOC(hpriv)) {
3749 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
3750 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK;
3751 } else {
3752 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
3753 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK;
3754 }
3755
3756 /* initialize shadow irq mask with register's value */
3757 hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
3758
3759 /* global interrupt mask: 0 == mask everything */
3760 mv_set_main_irq_mask(host, ~0, 0);
3761
3762 n_hc = mv_get_hc_count(host->ports[0]->flags);
3763
3764 for (port = 0; port < host->n_ports; port++)
3765 hpriv->ops->read_preamp(hpriv, port, mmio);
3766
3767 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
3768 if (rc)
3769 goto done;
3770
3771 hpriv->ops->reset_flash(hpriv, mmio);
3772 hpriv->ops->reset_bus(host, mmio);
3773 hpriv->ops->enable_leds(hpriv, mmio);
3774
3775 for (port = 0; port < host->n_ports; port++) {
3776 struct ata_port *ap = host->ports[port];
3777 void __iomem *port_mmio = mv_port_base(mmio, port);
3778
3779 mv_port_init(&ap->ioaddr, port_mmio);
3780
3781 #ifdef CONFIG_PCI
3782 if (!IS_SOC(hpriv)) {
3783 unsigned int offset = port_mmio - mmio;
3784 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
3785 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
3786 }
3787 #endif
3788 }
3789
3790 for (hc = 0; hc < n_hc; hc++) {
3791 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3792
3793 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
3794 "(before clear)=0x%08x\n", hc,
3795 readl(hc_mmio + HC_CFG),
3796 readl(hc_mmio + HC_IRQ_CAUSE));
3797
3798 /* Clear any currently outstanding hc interrupt conditions */
3799 writelfl(0, hc_mmio + HC_IRQ_CAUSE);
3800 }
3801
3802 if (!IS_SOC(hpriv)) {
3803 /* Clear any currently outstanding host interrupt conditions */
3804 writelfl(0, mmio + hpriv->irq_cause_offset);
3805
3806 /* and unmask interrupt generation for host regs */
3807 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
3808 }
3809
3810 /*
3811 * enable only global host interrupts for now.
3812 * The per-port interrupts get done later as ports are set up.
3813 */
3814 mv_set_main_irq_mask(host, 0, PCI_ERR);
3815 mv_set_irq_coalescing(host, irq_coalescing_io_count,
3816 irq_coalescing_usecs);
3817 done:
3818 return rc;
3819 }
3820
3821 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
3822 {
3823 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
3824 MV_CRQB_Q_SZ, 0);
3825 if (!hpriv->crqb_pool)
3826 return -ENOMEM;
3827
3828 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
3829 MV_CRPB_Q_SZ, 0);
3830 if (!hpriv->crpb_pool)
3831 return -ENOMEM;
3832
3833 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
3834 MV_SG_TBL_SZ, 0);
3835 if (!hpriv->sg_tbl_pool)
3836 return -ENOMEM;
3837
3838 return 0;
3839 }
3840
3841 static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
3842 struct mbus_dram_target_info *dram)
3843 {
3844 int i;
3845
3846 for (i = 0; i < 4; i++) {
3847 writel(0, hpriv->base + WINDOW_CTRL(i));
3848 writel(0, hpriv->base + WINDOW_BASE(i));
3849 }
3850
3851 for (i = 0; i < dram->num_cs; i++) {
3852 struct mbus_dram_window *cs = dram->cs + i;
3853
3854 writel(((cs->size - 1) & 0xffff0000) |
3855 (cs->mbus_attr << 8) |
3856 (dram->mbus_dram_target_id << 4) | 1,
3857 hpriv->base + WINDOW_CTRL(i));
3858 writel(cs->base, hpriv->base + WINDOW_BASE(i));
3859 }
3860 }
3861
3862 /**
3863 * mv_platform_probe - handle a positive probe of an soc Marvell
3864 * host
3865 * @pdev: platform device found
3866 *
3867 * LOCKING:
3868 * Inherited from caller.
3869 */
3870 static int mv_platform_probe(struct platform_device *pdev)
3871 {
3872 static int printed_version;
3873 const struct mv_sata_platform_data *mv_platform_data;
3874 const struct ata_port_info *ppi[] =
3875 { &mv_port_info[chip_soc], NULL };
3876 struct ata_host *host;
3877 struct mv_host_priv *hpriv;
3878 struct resource *res;
3879 int n_ports, rc;
3880
3881 if (!printed_version++)
3882 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3883
3884 /*
3885 * Simple resource validation ..
3886 */
3887 if (unlikely(pdev->num_resources != 2)) {
3888 dev_err(&pdev->dev, "invalid number of resources\n");
3889 return -EINVAL;
3890 }
3891
3892 /*
3893 * Get the register base first
3894 */
3895 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3896 if (res == NULL)
3897 return -EINVAL;
3898
3899 /* allocate host */
3900 mv_platform_data = pdev->dev.platform_data;
3901 n_ports = mv_platform_data->n_ports;
3902
3903 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3904 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3905
3906 if (!host || !hpriv)
3907 return -ENOMEM;
3908 host->private_data = hpriv;
3909 hpriv->n_ports = n_ports;
3910
3911 host->iomap = NULL;
3912 hpriv->base = devm_ioremap(&pdev->dev, res->start,
3913 res->end - res->start + 1);
3914 hpriv->base -= SATAHC0_REG_BASE;
3915
3916 /*
3917 * (Re-)program MBUS remapping windows if we are asked to.
3918 */
3919 if (mv_platform_data->dram != NULL)
3920 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
3921
3922 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3923 if (rc)
3924 return rc;
3925
3926 /* initialize adapter */
3927 rc = mv_init_host(host, chip_soc);
3928 if (rc)
3929 return rc;
3930
3931 dev_printk(KERN_INFO, &pdev->dev,
3932 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
3933 host->n_ports);
3934
3935 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
3936 IRQF_SHARED, &mv6_sht);
3937 }
3938
3939 /*
3940 *
3941 * mv_platform_remove - unplug a platform interface
3942 * @pdev: platform device
3943 *
3944 * A platform bus SATA device has been unplugged. Perform the needed
3945 * cleanup. Also called on module unload for any active devices.
3946 */
3947 static int __devexit mv_platform_remove(struct platform_device *pdev)
3948 {
3949 struct device *dev = &pdev->dev;
3950 struct ata_host *host = dev_get_drvdata(dev);
3951
3952 ata_host_detach(host);
3953 return 0;
3954 }
3955
3956 static struct platform_driver mv_platform_driver = {
3957 .probe = mv_platform_probe,
3958 .remove = __devexit_p(mv_platform_remove),
3959 .driver = {
3960 .name = DRV_NAME,
3961 .owner = THIS_MODULE,
3962 },
3963 };
3964
3965
3966 #ifdef CONFIG_PCI
3967 static int mv_pci_init_one(struct pci_dev *pdev,
3968 const struct pci_device_id *ent);
3969
3970
3971 static struct pci_driver mv_pci_driver = {
3972 .name = DRV_NAME,
3973 .id_table = mv_pci_tbl,
3974 .probe = mv_pci_init_one,
3975 .remove = ata_pci_remove_one,
3976 };
3977
3978 /* move to PCI layer or libata core? */
3979 static int pci_go_64(struct pci_dev *pdev)
3980 {
3981 int rc;
3982
3983 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3984 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3985 if (rc) {
3986 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3987 if (rc) {
3988 dev_printk(KERN_ERR, &pdev->dev,
3989 "64-bit DMA enable failed\n");
3990 return rc;
3991 }
3992 }
3993 } else {
3994 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3995 if (rc) {
3996 dev_printk(KERN_ERR, &pdev->dev,
3997 "32-bit DMA enable failed\n");
3998 return rc;
3999 }
4000 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4001 if (rc) {
4002 dev_printk(KERN_ERR, &pdev->dev,
4003 "32-bit consistent DMA enable failed\n");
4004 return rc;
4005 }
4006 }
4007
4008 return rc;
4009 }
4010
4011 /**
4012 * mv_print_info - Dump key info to kernel log for perusal.
4013 * @host: ATA host to print info about
4014 *
4015 * FIXME: complete this.
4016 *
4017 * LOCKING:
4018 * Inherited from caller.
4019 */
4020 static void mv_print_info(struct ata_host *host)
4021 {
4022 struct pci_dev *pdev = to_pci_dev(host->dev);
4023 struct mv_host_priv *hpriv = host->private_data;
4024 u8 scc;
4025 const char *scc_s, *gen;
4026
4027 /* Use this to determine the HW stepping of the chip so we know
4028 * what errata to workaround
4029 */
4030 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
4031 if (scc == 0)
4032 scc_s = "SCSI";
4033 else if (scc == 0x01)
4034 scc_s = "RAID";
4035 else
4036 scc_s = "?";
4037
4038 if (IS_GEN_I(hpriv))
4039 gen = "I";
4040 else if (IS_GEN_II(hpriv))
4041 gen = "II";
4042 else if (IS_GEN_IIE(hpriv))
4043 gen = "IIE";
4044 else
4045 gen = "?";
4046
4047 dev_printk(KERN_INFO, &pdev->dev,
4048 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
4049 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
4050 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
4051 }
4052
4053 /**
4054 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
4055 * @pdev: PCI device found
4056 * @ent: PCI device ID entry for the matched host
4057 *
4058 * LOCKING:
4059 * Inherited from caller.
4060 */
4061 static int mv_pci_init_one(struct pci_dev *pdev,
4062 const struct pci_device_id *ent)
4063 {
4064 static int printed_version;
4065 unsigned int board_idx = (unsigned int)ent->driver_data;
4066 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
4067 struct ata_host *host;
4068 struct mv_host_priv *hpriv;
4069 int n_ports, rc;
4070
4071 if (!printed_version++)
4072 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
4073
4074 /* allocate host */
4075 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
4076
4077 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4078 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4079 if (!host || !hpriv)
4080 return -ENOMEM;
4081 host->private_data = hpriv;
4082 hpriv->n_ports = n_ports;
4083
4084 /* acquire resources */
4085 rc = pcim_enable_device(pdev);
4086 if (rc)
4087 return rc;
4088
4089 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
4090 if (rc == -EBUSY)
4091 pcim_pin_device(pdev);
4092 if (rc)
4093 return rc;
4094 host->iomap = pcim_iomap_table(pdev);
4095 hpriv->base = host->iomap[MV_PRIMARY_BAR];
4096
4097 rc = pci_go_64(pdev);
4098 if (rc)
4099 return rc;
4100
4101 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4102 if (rc)
4103 return rc;
4104
4105 /* initialize adapter */
4106 rc = mv_init_host(host, board_idx);
4107 if (rc)
4108 return rc;
4109
4110 /* Enable message-switched interrupts, if requested */
4111 if (msi && pci_enable_msi(pdev) == 0)
4112 hpriv->hp_flags |= MV_HP_FLAG_MSI;
4113
4114 mv_dump_pci_cfg(pdev, 0x68);
4115 mv_print_info(host);
4116
4117 pci_set_master(pdev);
4118 pci_try_set_mwi(pdev);
4119 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
4120 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
4121 }
4122 #endif
4123
4124 static int mv_platform_probe(struct platform_device *pdev);
4125 static int __devexit mv_platform_remove(struct platform_device *pdev);
4126
4127 static int __init mv_init(void)
4128 {
4129 int rc = -ENODEV;
4130 #ifdef CONFIG_PCI
4131 rc = pci_register_driver(&mv_pci_driver);
4132 if (rc < 0)
4133 return rc;
4134 #endif
4135 rc = platform_driver_register(&mv_platform_driver);
4136
4137 #ifdef CONFIG_PCI
4138 if (rc < 0)
4139 pci_unregister_driver(&mv_pci_driver);
4140 #endif
4141 return rc;
4142 }
4143
4144 static void __exit mv_exit(void)
4145 {
4146 #ifdef CONFIG_PCI
4147 pci_unregister_driver(&mv_pci_driver);
4148 #endif
4149 platform_driver_unregister(&mv_platform_driver);
4150 }
4151
4152 MODULE_AUTHOR("Brett Russ");
4153 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
4154 MODULE_LICENSE("GPL");
4155 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
4156 MODULE_VERSION(DRV_VERSION);
4157 MODULE_ALIAS("platform:" DRV_NAME);
4158
4159 module_init(mv_init);
4160 module_exit(mv_exit);
This page took 0.188823 seconds and 6 git commands to generate.