Commit | Line | Data |
---|---|---|
62936009 RS |
1 | /* |
2 | * drivers/ata/sata_dwc_460ex.c | |
3 | * | |
4 | * Synopsys DesignWare Cores (DWC) SATA host driver | |
5 | * | |
6 | * Author: Mark Miesfeld <mmiesfeld@amcc.com> | |
7 | * | |
8 | * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de> | |
9 | * Copyright 2008 DENX Software Engineering | |
10 | * | |
11 | * Based on versions provided by AMCC and Synopsys which are: | |
12 | * Copyright 2006 Applied Micro Circuits Corporation | |
13 | * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED | |
14 | * | |
15 | * This program is free software; you can redistribute it and/or modify it | |
16 | * under the terms of the GNU General Public License as published by the | |
17 | * Free Software Foundation; either version 2 of the License, or (at your | |
18 | * option) any later version. | |
19 | */ | |
20 | ||
21 | #ifdef CONFIG_SATA_DWC_DEBUG | |
22 | #define DEBUG | |
23 | #endif | |
24 | ||
25 | #ifdef CONFIG_SATA_DWC_VDEBUG | |
26 | #define VERBOSE_DEBUG | |
27 | #define DEBUG_NCQ | |
28 | #endif | |
29 | ||
30 | #include <linux/kernel.h> | |
31 | #include <linux/module.h> | |
32 | #include <linux/init.h> | |
33 | #include <linux/device.h> | |
34 | #include <linux/of_platform.h> | |
35 | #include <linux/platform_device.h> | |
36 | #include <linux/libata.h> | |
37 | #include <linux/slab.h> | |
38 | #include "libata.h" | |
39 | ||
40 | #include <scsi/scsi_host.h> | |
41 | #include <scsi/scsi_cmnd.h> | |
42 | ||
43 | #define DRV_NAME "sata-dwc" | |
44 | #define DRV_VERSION "1.0" | |
45 | ||
46 | /* SATA DMA driver Globals */ | |
47 | #define DMA_NUM_CHANS 1 | |
48 | #define DMA_NUM_CHAN_REGS 8 | |
49 | ||
50 | /* SATA DMA Register definitions */ | |
51 | #define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length*/ | |
52 | ||
53 | struct dmareg { | |
54 | u32 low; /* Low bits 0-31 */ | |
55 | u32 high; /* High bits 32-63 */ | |
56 | }; | |
57 | ||
58 | /* DMA Per Channel registers */ | |
59 | struct dma_chan_regs { | |
60 | struct dmareg sar; /* Source Address */ | |
61 | struct dmareg dar; /* Destination address */ | |
62 | struct dmareg llp; /* Linked List Pointer */ | |
63 | struct dmareg ctl; /* Control */ | |
64 | struct dmareg sstat; /* Source Status not implemented in core */ | |
65 | struct dmareg dstat; /* Destination Status not implemented in core*/ | |
66 | struct dmareg sstatar; /* Source Status Address not impl in core */ | |
67 | struct dmareg dstatar; /* Destination Status Address not implemente */ | |
68 | struct dmareg cfg; /* Config */ | |
69 | struct dmareg sgr; /* Source Gather */ | |
70 | struct dmareg dsr; /* Destination Scatter */ | |
71 | }; | |
72 | ||
73 | /* Generic Interrupt Registers */ | |
74 | struct dma_interrupt_regs { | |
75 | struct dmareg tfr; /* Transfer Interrupt */ | |
76 | struct dmareg block; /* Block Interrupt */ | |
77 | struct dmareg srctran; /* Source Transfer Interrupt */ | |
78 | struct dmareg dsttran; /* Dest Transfer Interrupt */ | |
79 | struct dmareg error; /* Error */ | |
80 | }; | |
81 | ||
82 | struct ahb_dma_regs { | |
83 | struct dma_chan_regs chan_regs[DMA_NUM_CHAN_REGS]; | |
84 | struct dma_interrupt_regs interrupt_raw; /* Raw Interrupt */ | |
85 | struct dma_interrupt_regs interrupt_status; /* Interrupt Status */ | |
86 | struct dma_interrupt_regs interrupt_mask; /* Interrupt Mask */ | |
87 | struct dma_interrupt_regs interrupt_clear; /* Interrupt Clear */ | |
88 | struct dmareg statusInt; /* Interrupt combined*/ | |
89 | struct dmareg rq_srcreg; /* Src Trans Req */ | |
90 | struct dmareg rq_dstreg; /* Dst Trans Req */ | |
91 | struct dmareg rq_sgl_srcreg; /* Sngl Src Trans Req*/ | |
92 | struct dmareg rq_sgl_dstreg; /* Sngl Dst Trans Req*/ | |
93 | struct dmareg rq_lst_srcreg; /* Last Src Trans Req*/ | |
94 | struct dmareg rq_lst_dstreg; /* Last Dst Trans Req*/ | |
95 | struct dmareg dma_cfg; /* DMA Config */ | |
96 | struct dmareg dma_chan_en; /* DMA Channel Enable*/ | |
97 | struct dmareg dma_id; /* DMA ID */ | |
98 | struct dmareg dma_test; /* DMA Test */ | |
99 | struct dmareg res1; /* reserved */ | |
100 | struct dmareg res2; /* reserved */ | |
101 | /* | |
102 | * DMA Comp Params | |
103 | * Param 6 = dma_param[0], Param 5 = dma_param[1], | |
104 | * Param 4 = dma_param[2] ... | |
105 | */ | |
106 | struct dmareg dma_params[6]; | |
107 | }; | |
108 | ||
109 | /* Data structure for linked list item */ | |
110 | struct lli { | |
111 | u32 sar; /* Source Address */ | |
112 | u32 dar; /* Destination address */ | |
113 | u32 llp; /* Linked List Pointer */ | |
114 | struct dmareg ctl; /* Control */ | |
115 | struct dmareg dstat; /* Destination Status */ | |
116 | }; | |
117 | ||
118 | enum { | |
119 | SATA_DWC_DMAC_LLI_SZ = (sizeof(struct lli)), | |
120 | SATA_DWC_DMAC_LLI_NUM = 256, | |
121 | SATA_DWC_DMAC_LLI_TBL_SZ = (SATA_DWC_DMAC_LLI_SZ * \ | |
122 | SATA_DWC_DMAC_LLI_NUM), | |
123 | SATA_DWC_DMAC_TWIDTH_BYTES = 4, | |
124 | SATA_DWC_DMAC_CTRL_TSIZE_MAX = (0x00000800 * \ | |
125 | SATA_DWC_DMAC_TWIDTH_BYTES), | |
126 | }; | |
127 | ||
128 | /* DMA Register Operation Bits */ | |
129 | enum { | |
130 | DMA_EN = 0x00000001, /* Enable AHB DMA */ | |
131 | DMA_CTL_LLP_SRCEN = 0x10000000, /* Blk chain enable Src */ | |
132 | DMA_CTL_LLP_DSTEN = 0x08000000, /* Blk chain enable Dst */ | |
133 | }; | |
134 | ||
135 | #define DMA_CTL_BLK_TS(size) ((size) & 0x000000FFF) /* Blk Transfer size */ | |
136 | #define DMA_CHANNEL(ch) (0x00000001 << (ch)) /* Select channel */ | |
137 | /* Enable channel */ | |
138 | #define DMA_ENABLE_CHAN(ch) ((0x00000001 << (ch)) | \ | |
139 | ((0x000000001 << (ch)) << 8)) | |
140 | /* Disable channel */ | |
141 | #define DMA_DISABLE_CHAN(ch) (0x00000000 | ((0x000000001 << (ch)) << 8)) | |
142 | /* Transfer Type & Flow Controller */ | |
143 | #define DMA_CTL_TTFC(type) (((type) & 0x7) << 20) | |
144 | #define DMA_CTL_SMS(num) (((num) & 0x3) << 25) /* Src Master Select */ | |
145 | #define DMA_CTL_DMS(num) (((num) & 0x3) << 23)/* Dst Master Select */ | |
146 | /* Src Burst Transaction Length */ | |
147 | #define DMA_CTL_SRC_MSIZE(size) (((size) & 0x7) << 14) | |
148 | /* Dst Burst Transaction Length */ | |
149 | #define DMA_CTL_DST_MSIZE(size) (((size) & 0x7) << 11) | |
150 | /* Source Transfer Width */ | |
151 | #define DMA_CTL_SRC_TRWID(size) (((size) & 0x7) << 4) | |
152 | /* Destination Transfer Width */ | |
153 | #define DMA_CTL_DST_TRWID(size) (((size) & 0x7) << 1) | |
154 | ||
155 | /* Assign HW handshaking interface (x) to destination / source peripheral */ | |
156 | #define DMA_CFG_HW_HS_DEST(int_num) (((int_num) & 0xF) << 11) | |
157 | #define DMA_CFG_HW_HS_SRC(int_num) (((int_num) & 0xF) << 7) | |
158 | #define DMA_LLP_LMS(addr, master) (((addr) & 0xfffffffc) | (master)) | |
159 | ||
160 | /* | |
161 | * This define is used to set block chaining disabled in the control low | |
162 | * register. It is already in little endian format so it can be &'d dirctly. | |
163 | * It is essentially: cpu_to_le32(~(DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN)) | |
164 | */ | |
165 | enum { | |
166 | DMA_CTL_LLP_DISABLE_LE32 = 0xffffffe7, | |
167 | DMA_CTL_TTFC_P2M_DMAC = 0x00000002, /* Per to mem, DMAC cntr */ | |
168 | DMA_CTL_TTFC_M2P_PER = 0x00000003, /* Mem to per, peripheral cntr */ | |
169 | DMA_CTL_SINC_INC = 0x00000000, /* Source Address Increment */ | |
170 | DMA_CTL_SINC_DEC = 0x00000200, | |
171 | DMA_CTL_SINC_NOCHANGE = 0x00000400, | |
172 | DMA_CTL_DINC_INC = 0x00000000, /* Destination Address Increment */ | |
173 | DMA_CTL_DINC_DEC = 0x00000080, | |
174 | DMA_CTL_DINC_NOCHANGE = 0x00000100, | |
175 | DMA_CTL_INT_EN = 0x00000001, /* Interrupt Enable */ | |
176 | ||
177 | /* Channel Configuration Register high bits */ | |
178 | DMA_CFG_FCMOD_REQ = 0x00000001, /* Flow Control - request based */ | |
179 | DMA_CFG_PROTCTL = (0x00000003 << 2),/* Protection Control */ | |
180 | ||
181 | /* Channel Configuration Register low bits */ | |
182 | DMA_CFG_RELD_DST = 0x80000000, /* Reload Dest / Src Addr */ | |
183 | DMA_CFG_RELD_SRC = 0x40000000, | |
184 | DMA_CFG_HS_SELSRC = 0x00000800, /* Software handshake Src/ Dest */ | |
185 | DMA_CFG_HS_SELDST = 0x00000400, | |
186 | DMA_CFG_FIFOEMPTY = (0x00000001 << 9), /* FIFO Empty bit */ | |
187 | ||
188 | /* Channel Linked List Pointer Register */ | |
189 | DMA_LLP_AHBMASTER1 = 0, /* List Master Select */ | |
190 | DMA_LLP_AHBMASTER2 = 1, | |
191 | ||
192 | SATA_DWC_MAX_PORTS = 1, | |
193 | ||
194 | SATA_DWC_SCR_OFFSET = 0x24, | |
195 | SATA_DWC_REG_OFFSET = 0x64, | |
196 | }; | |
197 | ||
198 | /* DWC SATA Registers */ | |
199 | struct sata_dwc_regs { | |
200 | u32 fptagr; /* 1st party DMA tag */ | |
201 | u32 fpbor; /* 1st party DMA buffer offset */ | |
202 | u32 fptcr; /* 1st party DMA Xfr count */ | |
203 | u32 dmacr; /* DMA Control */ | |
204 | u32 dbtsr; /* DMA Burst Transac size */ | |
205 | u32 intpr; /* Interrupt Pending */ | |
206 | u32 intmr; /* Interrupt Mask */ | |
207 | u32 errmr; /* Error Mask */ | |
208 | u32 llcr; /* Link Layer Control */ | |
209 | u32 phycr; /* PHY Control */ | |
210 | u32 physr; /* PHY Status */ | |
211 | u32 rxbistpd; /* Recvd BIST pattern def register */ | |
212 | u32 rxbistpd1; /* Recvd BIST data dword1 */ | |
213 | u32 rxbistpd2; /* Recvd BIST pattern data dword2 */ | |
214 | u32 txbistpd; /* Trans BIST pattern def register */ | |
215 | u32 txbistpd1; /* Trans BIST data dword1 */ | |
216 | u32 txbistpd2; /* Trans BIST data dword2 */ | |
217 | u32 bistcr; /* BIST Control Register */ | |
218 | u32 bistfctr; /* BIST FIS Count Register */ | |
219 | u32 bistsr; /* BIST Status Register */ | |
220 | u32 bistdecr; /* BIST Dword Error count register */ | |
221 | u32 res[15]; /* Reserved locations */ | |
222 | u32 testr; /* Test Register */ | |
223 | u32 versionr; /* Version Register */ | |
224 | u32 idr; /* ID Register */ | |
225 | u32 unimpl[192]; /* Unimplemented */ | |
226 | u32 dmadr[256]; /* FIFO Locations in DMA Mode */ | |
227 | }; | |
228 | ||
229 | enum { | |
230 | SCR_SCONTROL_DET_ENABLE = 0x00000001, | |
231 | SCR_SSTATUS_DET_PRESENT = 0x00000001, | |
232 | SCR_SERROR_DIAG_X = 0x04000000, | |
233 | /* DWC SATA Register Operations */ | |
234 | SATA_DWC_TXFIFO_DEPTH = 0x01FF, | |
235 | SATA_DWC_RXFIFO_DEPTH = 0x01FF, | |
236 | SATA_DWC_DMACR_TMOD_TXCHEN = 0x00000004, | |
237 | SATA_DWC_DMACR_TXCHEN = (0x00000001 | SATA_DWC_DMACR_TMOD_TXCHEN), | |
238 | SATA_DWC_DMACR_RXCHEN = (0x00000002 | SATA_DWC_DMACR_TMOD_TXCHEN), | |
239 | SATA_DWC_DMACR_TXRXCH_CLEAR = SATA_DWC_DMACR_TMOD_TXCHEN, | |
240 | SATA_DWC_INTPR_DMAT = 0x00000001, | |
241 | SATA_DWC_INTPR_NEWFP = 0x00000002, | |
242 | SATA_DWC_INTPR_PMABRT = 0x00000004, | |
243 | SATA_DWC_INTPR_ERR = 0x00000008, | |
244 | SATA_DWC_INTPR_NEWBIST = 0x00000010, | |
245 | SATA_DWC_INTPR_IPF = 0x10000000, | |
246 | SATA_DWC_INTMR_DMATM = 0x00000001, | |
247 | SATA_DWC_INTMR_NEWFPM = 0x00000002, | |
248 | SATA_DWC_INTMR_PMABRTM = 0x00000004, | |
249 | SATA_DWC_INTMR_ERRM = 0x00000008, | |
250 | SATA_DWC_INTMR_NEWBISTM = 0x00000010, | |
251 | SATA_DWC_LLCR_SCRAMEN = 0x00000001, | |
252 | SATA_DWC_LLCR_DESCRAMEN = 0x00000002, | |
253 | SATA_DWC_LLCR_RPDEN = 0x00000004, | |
254 | /* This is all error bits, zero's are reserved fields. */ | |
255 | SATA_DWC_SERROR_ERR_BITS = 0x0FFF0F03 | |
256 | }; | |
257 | ||
258 | #define SATA_DWC_SCR0_SPD_GET(v) (((v) >> 4) & 0x0000000F) | |
259 | #define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) |\ | |
260 | SATA_DWC_DMACR_TMOD_TXCHEN) | |
261 | #define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) |\ | |
262 | SATA_DWC_DMACR_TMOD_TXCHEN) | |
263 | #define SATA_DWC_DBTSR_MWR(size) (((size)/4) & SATA_DWC_TXFIFO_DEPTH) | |
264 | #define SATA_DWC_DBTSR_MRD(size) ((((size)/4) & SATA_DWC_RXFIFO_DEPTH)\ | |
265 | << 16) | |
266 | struct sata_dwc_device { | |
267 | struct device *dev; /* generic device struct */ | |
268 | struct ata_probe_ent *pe; /* ptr to probe-ent */ | |
269 | struct ata_host *host; | |
270 | u8 *reg_base; | |
271 | struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */ | |
272 | int irq_dma; | |
273 | }; | |
274 | ||
275 | #define SATA_DWC_QCMD_MAX 32 | |
276 | ||
277 | struct sata_dwc_device_port { | |
278 | struct sata_dwc_device *hsdev; | |
279 | int cmd_issued[SATA_DWC_QCMD_MAX]; | |
280 | struct lli *llit[SATA_DWC_QCMD_MAX]; /* DMA LLI table */ | |
281 | dma_addr_t llit_dma[SATA_DWC_QCMD_MAX]; | |
282 | u32 dma_chan[SATA_DWC_QCMD_MAX]; | |
283 | int dma_pending[SATA_DWC_QCMD_MAX]; | |
284 | }; | |
285 | ||
286 | /* | |
287 | * Commonly used DWC SATA driver Macros | |
288 | */ | |
289 | #define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)\ | |
290 | (host)->private_data) | |
291 | #define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)\ | |
292 | (ap)->host->private_data) | |
293 | #define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)\ | |
294 | (ap)->private_data) | |
295 | #define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)\ | |
296 | (qc)->ap->host->private_data) | |
297 | #define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)\ | |
298 | (hsdevp)->hsdev) | |
299 | ||
300 | enum { | |
301 | SATA_DWC_CMD_ISSUED_NOT = 0, | |
302 | SATA_DWC_CMD_ISSUED_PEND = 1, | |
303 | SATA_DWC_CMD_ISSUED_EXEC = 2, | |
304 | SATA_DWC_CMD_ISSUED_NODATA = 3, | |
305 | ||
306 | SATA_DWC_DMA_PENDING_NONE = 0, | |
307 | SATA_DWC_DMA_PENDING_TX = 1, | |
308 | SATA_DWC_DMA_PENDING_RX = 2, | |
309 | }; | |
310 | ||
311 | struct sata_dwc_host_priv { | |
312 | void __iomem *scr_addr_sstatus; | |
313 | u32 sata_dwc_sactive_issued ; | |
314 | u32 sata_dwc_sactive_queued ; | |
315 | u32 dma_interrupt_count; | |
316 | struct ahb_dma_regs *sata_dma_regs; | |
317 | struct device *dwc_dev; | |
318 | }; | |
319 | struct sata_dwc_host_priv host_pvt; | |
320 | /* | |
321 | * Prototypes | |
322 | */ | |
323 | static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag); | |
324 | static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, | |
325 | u32 check_status); | |
326 | static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status); | |
327 | static void sata_dwc_port_stop(struct ata_port *ap); | |
328 | static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag); | |
329 | static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq); | |
330 | static void dma_dwc_exit(struct sata_dwc_device *hsdev); | |
331 | static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems, | |
332 | struct lli *lli, dma_addr_t dma_lli, | |
333 | void __iomem *addr, int dir); | |
334 | static void dma_dwc_xfer_start(int dma_ch); | |
335 | ||
336 | static void sata_dwc_tf_dump(struct ata_taskfile *tf) | |
337 | { | |
338 | dev_vdbg(host_pvt.dwc_dev, "taskfile cmd: 0x%02x protocol: %s flags:" | |
339 | "0x%lx device: %x\n", tf->command, ata_get_cmd_descript\ | |
340 | (tf->protocol), tf->flags, tf->device); | |
341 | dev_vdbg(host_pvt.dwc_dev, "feature: 0x%02x nsect: 0x%x lbal: 0x%x " | |
342 | "lbam: 0x%x lbah: 0x%x\n", tf->feature, tf->nsect, tf->lbal, | |
343 | tf->lbam, tf->lbah); | |
344 | dev_vdbg(host_pvt.dwc_dev, "hob_feature: 0x%02x hob_nsect: 0x%x " | |
345 | "hob_lbal: 0x%x hob_lbam: 0x%x hob_lbah: 0x%x\n", | |
346 | tf->hob_feature, tf->hob_nsect, tf->hob_lbal, tf->hob_lbam, | |
347 | tf->hob_lbah); | |
348 | } | |
349 | ||
350 | /* | |
351 | * Function: get_burst_length_encode | |
352 | * arguments: datalength: length in bytes of data | |
353 | * returns value to be programmed in register corrresponding to data length | |
354 | * This value is effectively the log(base 2) of the length | |
355 | */ | |
356 | static int get_burst_length_encode(int datalength) | |
357 | { | |
358 | int items = datalength >> 2; /* div by 4 to get lword count */ | |
359 | ||
360 | if (items >= 64) | |
361 | return 5; | |
362 | ||
363 | if (items >= 32) | |
364 | return 4; | |
365 | ||
366 | if (items >= 16) | |
367 | return 3; | |
368 | ||
369 | if (items >= 8) | |
370 | return 2; | |
371 | ||
372 | if (items >= 4) | |
373 | return 1; | |
374 | ||
375 | return 0; | |
376 | } | |
377 | ||
378 | static void clear_chan_interrupts(int c) | |
379 | { | |
380 | out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.tfr.low), | |
381 | DMA_CHANNEL(c)); | |
382 | out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.block.low), | |
383 | DMA_CHANNEL(c)); | |
384 | out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.srctran.low), | |
385 | DMA_CHANNEL(c)); | |
386 | out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.dsttran.low), | |
387 | DMA_CHANNEL(c)); | |
388 | out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.error.low), | |
389 | DMA_CHANNEL(c)); | |
390 | } | |
391 | ||
392 | /* | |
393 | * Function: dma_request_channel | |
394 | * arguments: None | |
395 | * returns channel number if available else -1 | |
396 | * This function assigns the next available DMA channel from the list to the | |
397 | * requester | |
398 | */ | |
399 | static int dma_request_channel(void) | |
400 | { | |
401 | int i; | |
402 | ||
403 | for (i = 0; i < DMA_NUM_CHANS; i++) { | |
404 | if (!(in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) &\ | |
405 | DMA_CHANNEL(i))) | |
406 | return i; | |
407 | } | |
408 | dev_err(host_pvt.dwc_dev, "%s NO channel chan_en: 0x%08x\n", __func__, | |
409 | in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low))); | |
410 | return -1; | |
411 | } | |
412 | ||
413 | /* | |
414 | * Function: dma_dwc_interrupt | |
415 | * arguments: irq, dev_id, pt_regs | |
416 | * returns channel number if available else -1 | |
417 | * Interrupt Handler for DW AHB SATA DMA | |
418 | */ | |
419 | static irqreturn_t dma_dwc_interrupt(int irq, void *hsdev_instance) | |
420 | { | |
421 | int chan; | |
422 | u32 tfr_reg, err_reg; | |
423 | unsigned long flags; | |
424 | struct sata_dwc_device *hsdev = | |
425 | (struct sata_dwc_device *)hsdev_instance; | |
426 | struct ata_host *host = (struct ata_host *)hsdev->host; | |
427 | struct ata_port *ap; | |
428 | struct sata_dwc_device_port *hsdevp; | |
429 | u8 tag = 0; | |
430 | unsigned int port = 0; | |
431 | ||
432 | spin_lock_irqsave(&host->lock, flags); | |
433 | ap = host->ports[port]; | |
434 | hsdevp = HSDEVP_FROM_AP(ap); | |
435 | tag = ap->link.active_tag; | |
436 | ||
437 | tfr_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.tfr\ | |
438 | .low)); | |
439 | err_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.error\ | |
440 | .low)); | |
441 | ||
442 | dev_dbg(ap->dev, "eot=0x%08x err=0x%08x pending=%d active port=%d\n", | |
443 | tfr_reg, err_reg, hsdevp->dma_pending[tag], port); | |
444 | ||
445 | for (chan = 0; chan < DMA_NUM_CHANS; chan++) { | |
446 | /* Check for end-of-transfer interrupt. */ | |
447 | if (tfr_reg & DMA_CHANNEL(chan)) { | |
448 | /* | |
449 | * Each DMA command produces 2 interrupts. Only | |
450 | * complete the command after both interrupts have been | |
451 | * seen. (See sata_dwc_isr()) | |
452 | */ | |
453 | host_pvt.dma_interrupt_count++; | |
454 | sata_dwc_clear_dmacr(hsdevp, tag); | |
455 | ||
456 | if (hsdevp->dma_pending[tag] == | |
457 | SATA_DWC_DMA_PENDING_NONE) { | |
458 | dev_err(ap->dev, "DMA not pending eot=0x%08x " | |
459 | "err=0x%08x tag=0x%02x pending=%d\n", | |
460 | tfr_reg, err_reg, tag, | |
461 | hsdevp->dma_pending[tag]); | |
462 | } | |
463 | ||
464 | if ((host_pvt.dma_interrupt_count % 2) == 0) | |
465 | sata_dwc_dma_xfer_complete(ap, 1); | |
466 | ||
467 | /* Clear the interrupt */ | |
468 | out_le32(&(host_pvt.sata_dma_regs->interrupt_clear\ | |
469 | .tfr.low), | |
470 | DMA_CHANNEL(chan)); | |
471 | } | |
472 | ||
473 | /* Check for error interrupt. */ | |
474 | if (err_reg & DMA_CHANNEL(chan)) { | |
475 | /* TODO Need error handler ! */ | |
476 | dev_err(ap->dev, "error interrupt err_reg=0x%08x\n", | |
477 | err_reg); | |
478 | ||
479 | /* Clear the interrupt. */ | |
480 | out_le32(&(host_pvt.sata_dma_regs->interrupt_clear\ | |
481 | .error.low), | |
482 | DMA_CHANNEL(chan)); | |
483 | } | |
484 | } | |
485 | spin_unlock_irqrestore(&host->lock, flags); | |
486 | return IRQ_HANDLED; | |
487 | } | |
488 | ||
489 | /* | |
490 | * Function: dma_request_interrupts | |
491 | * arguments: hsdev | |
492 | * returns status | |
493 | * This function registers ISR for a particular DMA channel interrupt | |
494 | */ | |
495 | static int dma_request_interrupts(struct sata_dwc_device *hsdev, int irq) | |
496 | { | |
497 | int retval = 0; | |
498 | int chan; | |
499 | ||
500 | for (chan = 0; chan < DMA_NUM_CHANS; chan++) { | |
501 | /* Unmask error interrupt */ | |
502 | out_le32(&(host_pvt.sata_dma_regs)->interrupt_mask.error.low, | |
503 | DMA_ENABLE_CHAN(chan)); | |
504 | ||
505 | /* Unmask end-of-transfer interrupt */ | |
506 | out_le32(&(host_pvt.sata_dma_regs)->interrupt_mask.tfr.low, | |
507 | DMA_ENABLE_CHAN(chan)); | |
508 | } | |
509 | ||
510 | retval = request_irq(irq, dma_dwc_interrupt, 0, "SATA DMA", hsdev); | |
511 | if (retval) { | |
512 | dev_err(host_pvt.dwc_dev, "%s: could not get IRQ %d\n", | |
513 | __func__, irq); | |
514 | return -ENODEV; | |
515 | } | |
516 | ||
517 | /* Mark this interrupt as requested */ | |
518 | hsdev->irq_dma = irq; | |
519 | return 0; | |
520 | } | |
521 | ||
522 | /* | |
523 | * Function: map_sg_to_lli | |
524 | * The Synopsis driver has a comment proposing that better performance | |
525 | * is possible by only enabling interrupts on the last item in the linked list. | |
526 | * However, it seems that could be a problem if an error happened on one of the | |
527 | * first items. The transfer would halt, but no error interrupt would occur. | |
528 | * Currently this function sets interrupts enabled for each linked list item: | |
529 | * DMA_CTL_INT_EN. | |
530 | */ | |
531 | static int map_sg_to_lli(struct scatterlist *sg, int num_elems, | |
532 | struct lli *lli, dma_addr_t dma_lli, | |
533 | void __iomem *dmadr_addr, int dir) | |
534 | { | |
535 | int i, idx = 0; | |
536 | int fis_len = 0; | |
537 | dma_addr_t next_llp; | |
538 | int bl; | |
539 | ||
540 | dev_dbg(host_pvt.dwc_dev, "%s: sg=%p nelem=%d lli=%p dma_lli=0x%08x" | |
541 | " dmadr=0x%08x\n", __func__, sg, num_elems, lli, (u32)dma_lli, | |
542 | (u32)dmadr_addr); | |
543 | ||
544 | bl = get_burst_length_encode(AHB_DMA_BRST_DFLT); | |
545 | ||
546 | for (i = 0; i < num_elems; i++, sg++) { | |
547 | u32 addr, offset; | |
548 | u32 sg_len, len; | |
549 | ||
550 | addr = (u32) sg_dma_address(sg); | |
551 | sg_len = sg_dma_len(sg); | |
552 | ||
553 | dev_dbg(host_pvt.dwc_dev, "%s: elem=%d sg_addr=0x%x sg_len" | |
554 | "=%d\n", __func__, i, addr, sg_len); | |
555 | ||
556 | while (sg_len) { | |
557 | if (idx >= SATA_DWC_DMAC_LLI_NUM) { | |
558 | /* The LLI table is not large enough. */ | |
559 | dev_err(host_pvt.dwc_dev, "LLI table overrun " | |
560 | "(idx=%d)\n", idx); | |
561 | break; | |
562 | } | |
563 | len = (sg_len > SATA_DWC_DMAC_CTRL_TSIZE_MAX) ? | |
564 | SATA_DWC_DMAC_CTRL_TSIZE_MAX : sg_len; | |
565 | ||
566 | offset = addr & 0xffff; | |
567 | if ((offset + sg_len) > 0x10000) | |
568 | len = 0x10000 - offset; | |
569 | ||
570 | /* | |
571 | * Make sure a LLI block is not created that will span | |
572 | * 8K max FIS boundary. If the block spans such a FIS | |
573 | * boundary, there is a chance that a DMA burst will | |
574 | * cross that boundary -- this results in an error in | |
575 | * the host controller. | |
576 | */ | |
577 | if (fis_len + len > 8192) { | |
578 | dev_dbg(host_pvt.dwc_dev, "SPLITTING: fis_len=" | |
579 | "%d(0x%x) len=%d(0x%x)\n", fis_len, | |
580 | fis_len, len, len); | |
581 | len = 8192 - fis_len; | |
582 | fis_len = 0; | |
583 | } else { | |
584 | fis_len += len; | |
585 | } | |
586 | if (fis_len == 8192) | |
587 | fis_len = 0; | |
588 | ||
589 | /* | |
590 | * Set DMA addresses and lower half of control register | |
591 | * based on direction. | |
592 | */ | |
593 | if (dir == DMA_FROM_DEVICE) { | |
594 | lli[idx].dar = cpu_to_le32(addr); | |
595 | lli[idx].sar = cpu_to_le32((u32)dmadr_addr); | |
596 | ||
597 | lli[idx].ctl.low = cpu_to_le32( | |
598 | DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) | | |
599 | DMA_CTL_SMS(0) | | |
600 | DMA_CTL_DMS(1) | | |
601 | DMA_CTL_SRC_MSIZE(bl) | | |
602 | DMA_CTL_DST_MSIZE(bl) | | |
603 | DMA_CTL_SINC_NOCHANGE | | |
604 | DMA_CTL_SRC_TRWID(2) | | |
605 | DMA_CTL_DST_TRWID(2) | | |
606 | DMA_CTL_INT_EN | | |
607 | DMA_CTL_LLP_SRCEN | | |
608 | DMA_CTL_LLP_DSTEN); | |
609 | } else { /* DMA_TO_DEVICE */ | |
610 | lli[idx].sar = cpu_to_le32(addr); | |
611 | lli[idx].dar = cpu_to_le32((u32)dmadr_addr); | |
612 | ||
613 | lli[idx].ctl.low = cpu_to_le32( | |
614 | DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) | | |
615 | DMA_CTL_SMS(1) | | |
616 | DMA_CTL_DMS(0) | | |
617 | DMA_CTL_SRC_MSIZE(bl) | | |
618 | DMA_CTL_DST_MSIZE(bl) | | |
619 | DMA_CTL_DINC_NOCHANGE | | |
620 | DMA_CTL_SRC_TRWID(2) | | |
621 | DMA_CTL_DST_TRWID(2) | | |
622 | DMA_CTL_INT_EN | | |
623 | DMA_CTL_LLP_SRCEN | | |
624 | DMA_CTL_LLP_DSTEN); | |
625 | } | |
626 | ||
627 | dev_dbg(host_pvt.dwc_dev, "%s setting ctl.high len: " | |
628 | "0x%08x val: 0x%08x\n", __func__, | |
629 | len, DMA_CTL_BLK_TS(len / 4)); | |
630 | ||
631 | /* Program the LLI CTL high register */ | |
632 | lli[idx].ctl.high = cpu_to_le32(DMA_CTL_BLK_TS\ | |
633 | (len / 4)); | |
634 | ||
635 | /* Program the next pointer. The next pointer must be | |
636 | * the physical address, not the virtual address. | |
637 | */ | |
638 | next_llp = (dma_lli + ((idx + 1) * sizeof(struct \ | |
639 | lli))); | |
640 | ||
641 | /* The last 2 bits encode the list master select. */ | |
642 | next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER2); | |
643 | ||
644 | lli[idx].llp = cpu_to_le32(next_llp); | |
645 | idx++; | |
646 | sg_len -= len; | |
647 | addr += len; | |
648 | } | |
649 | } | |
650 | ||
651 | /* | |
652 | * The last next ptr has to be zero and the last control low register | |
653 | * has to have LLP_SRC_EN and LLP_DST_EN (linked list pointer source | |
654 | * and destination enable) set back to 0 (disabled.) This is what tells | |
655 | * the core that this is the last item in the linked list. | |
656 | */ | |
657 | if (idx) { | |
658 | lli[idx-1].llp = 0x00000000; | |
659 | lli[idx-1].ctl.low &= DMA_CTL_LLP_DISABLE_LE32; | |
660 | ||
661 | /* Flush cache to memory */ | |
662 | dma_cache_sync(NULL, lli, (sizeof(struct lli) * idx), | |
663 | DMA_BIDIRECTIONAL); | |
664 | } | |
665 | ||
666 | return idx; | |
667 | } | |
668 | ||
669 | /* | |
670 | * Function: dma_dwc_xfer_start | |
671 | * arguments: Channel number | |
672 | * Return : None | |
673 | * Enables the DMA channel | |
674 | */ | |
675 | static void dma_dwc_xfer_start(int dma_ch) | |
676 | { | |
677 | /* Enable the DMA channel */ | |
678 | out_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low), | |
679 | in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) | | |
680 | DMA_ENABLE_CHAN(dma_ch)); | |
681 | } | |
682 | ||
683 | static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems, | |
684 | struct lli *lli, dma_addr_t dma_lli, | |
685 | void __iomem *addr, int dir) | |
686 | { | |
687 | int dma_ch; | |
688 | int num_lli; | |
689 | /* Acquire DMA channel */ | |
690 | dma_ch = dma_request_channel(); | |
691 | if (dma_ch == -1) { | |
692 | dev_err(host_pvt.dwc_dev, "%s: dma channel unavailable\n", | |
693 | __func__); | |
694 | return -EAGAIN; | |
695 | } | |
696 | ||
697 | /* Convert SG list to linked list of items (LLIs) for AHB DMA */ | |
698 | num_lli = map_sg_to_lli(sg, num_elems, lli, dma_lli, addr, dir); | |
699 | ||
700 | dev_dbg(host_pvt.dwc_dev, "%s sg: 0x%p, count: %d lli: %p dma_lli:" | |
701 | " 0x%0xlx addr: %p lli count: %d\n", __func__, sg, num_elems, | |
702 | lli, (u32)dma_lli, addr, num_lli); | |
703 | ||
704 | clear_chan_interrupts(dma_ch); | |
705 | ||
706 | /* Program the CFG register. */ | |
707 | out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.high), | |
708 | DMA_CFG_PROTCTL | DMA_CFG_FCMOD_REQ); | |
709 | out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.low), 0); | |
710 | ||
711 | /* Program the address of the linked list */ | |
712 | out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].llp.low), | |
713 | DMA_LLP_LMS(dma_lli, DMA_LLP_AHBMASTER2)); | |
714 | ||
715 | /* Program the CTL register with src enable / dst enable */ | |
716 | out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].ctl.low), | |
717 | DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN); | |
718 | return 0; | |
719 | } | |
720 | ||
721 | /* | |
722 | * Function: dma_dwc_exit | |
723 | * arguments: None | |
724 | * returns status | |
725 | * This function exits the SATA DMA driver | |
726 | */ | |
727 | static void dma_dwc_exit(struct sata_dwc_device *hsdev) | |
728 | { | |
729 | dev_dbg(host_pvt.dwc_dev, "%s:\n", __func__); | |
730 | if (host_pvt.sata_dma_regs) | |
731 | iounmap(host_pvt.sata_dma_regs); | |
732 | ||
733 | if (hsdev->irq_dma) | |
734 | free_irq(hsdev->irq_dma, hsdev); | |
735 | } | |
736 | ||
737 | /* | |
738 | * Function: dma_dwc_init | |
739 | * arguments: hsdev | |
740 | * returns status | |
741 | * This function initializes the SATA DMA driver | |
742 | */ | |
743 | static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq) | |
744 | { | |
745 | int err; | |
746 | ||
747 | err = dma_request_interrupts(hsdev, irq); | |
748 | if (err) { | |
749 | dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns" | |
750 | " %d\n", __func__, err); | |
751 | goto error_out; | |
752 | } | |
753 | ||
754 | /* Enabe DMA */ | |
755 | out_le32(&(host_pvt.sata_dma_regs->dma_cfg.low), DMA_EN); | |
756 | ||
757 | dev_notice(host_pvt.dwc_dev, "DMA initialized\n"); | |
758 | dev_dbg(host_pvt.dwc_dev, "SATA DMA registers=0x%p\n", host_pvt.\ | |
759 | sata_dma_regs); | |
760 | ||
761 | return 0; | |
762 | ||
763 | error_out: | |
764 | dma_dwc_exit(hsdev); | |
765 | ||
766 | return err; | |
767 | } | |
768 | ||
769 | static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val) | |
770 | { | |
771 | if (scr > SCR_NOTIFICATION) { | |
772 | dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n", | |
773 | __func__, scr); | |
774 | return -EINVAL; | |
775 | } | |
776 | ||
777 | *val = in_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4)); | |
778 | dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n", | |
779 | __func__, link->ap->print_id, scr, *val); | |
780 | ||
781 | return 0; | |
782 | } | |
783 | ||
784 | static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val) | |
785 | { | |
786 | dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n", | |
787 | __func__, link->ap->print_id, scr, val); | |
788 | if (scr > SCR_NOTIFICATION) { | |
789 | dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n", | |
790 | __func__, scr); | |
791 | return -EINVAL; | |
792 | } | |
793 | out_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4), val); | |
794 | ||
795 | return 0; | |
796 | } | |
797 | ||
798 | static u32 core_scr_read(unsigned int scr) | |
799 | { | |
800 | return in_le32((void __iomem *)(host_pvt.scr_addr_sstatus) +\ | |
801 | (scr * 4)); | |
802 | } | |
803 | ||
804 | static void core_scr_write(unsigned int scr, u32 val) | |
805 | { | |
806 | out_le32((void __iomem *)(host_pvt.scr_addr_sstatus) + (scr * 4), | |
807 | val); | |
808 | } | |
809 | ||
810 | static void clear_serror(void) | |
811 | { | |
812 | u32 val; | |
813 | val = core_scr_read(SCR_ERROR); | |
814 | core_scr_write(SCR_ERROR, val); | |
815 | ||
816 | } | |
817 | ||
818 | static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit) | |
819 | { | |
820 | out_le32(&hsdev->sata_dwc_regs->intpr, | |
821 | in_le32(&hsdev->sata_dwc_regs->intpr)); | |
822 | } | |
823 | ||
824 | static u32 qcmd_tag_to_mask(u8 tag) | |
825 | { | |
826 | return 0x00000001 << (tag & 0x1f); | |
827 | } | |
828 | ||
829 | /* See ahci.c */ | |
830 | static void sata_dwc_error_intr(struct ata_port *ap, | |
831 | struct sata_dwc_device *hsdev, uint intpr) | |
832 | { | |
833 | struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); | |
834 | struct ata_eh_info *ehi = &ap->link.eh_info; | |
835 | unsigned int err_mask = 0, action = 0; | |
836 | struct ata_queued_cmd *qc; | |
837 | u32 serror; | |
838 | u8 status, tag; | |
839 | u32 err_reg; | |
840 | ||
841 | ata_ehi_clear_desc(ehi); | |
842 | ||
843 | serror = core_scr_read(SCR_ERROR); | |
844 | status = ap->ops->sff_check_status(ap); | |
845 | ||
846 | err_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.error.\ | |
847 | low)); | |
848 | tag = ap->link.active_tag; | |
849 | ||
850 | dev_err(ap->dev, "%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x " | |
851 | "dma_intp=%d pending=%d issued=%d dma_err_status=0x%08x\n", | |
852 | __func__, serror, intpr, status, host_pvt.dma_interrupt_count, | |
853 | hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag], err_reg); | |
854 | ||
855 | /* Clear error register and interrupt bit */ | |
856 | clear_serror(); | |
857 | clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR); | |
858 | ||
859 | /* This is the only error happening now. TODO check for exact error */ | |
860 | ||
861 | err_mask |= AC_ERR_HOST_BUS; | |
862 | action |= ATA_EH_RESET; | |
863 | ||
864 | /* Pass this on to EH */ | |
865 | ehi->serror |= serror; | |
866 | ehi->action |= action; | |
867 | ||
868 | qc = ata_qc_from_tag(ap, tag); | |
869 | if (qc) | |
870 | qc->err_mask |= err_mask; | |
871 | else | |
872 | ehi->err_mask |= err_mask; | |
873 | ||
874 | ata_port_abort(ap); | |
875 | } | |
876 | ||
877 | /* | |
878 | * Function : sata_dwc_isr | |
879 | * arguments : irq, void *dev_instance, struct pt_regs *regs | |
880 | * Return value : irqreturn_t - status of IRQ | |
881 | * This Interrupt handler called via port ops registered function. | |
882 | * .irq_handler = sata_dwc_isr | |
883 | */ | |
884 | static irqreturn_t sata_dwc_isr(int irq, void *dev_instance) | |
885 | { | |
886 | struct ata_host *host = (struct ata_host *)dev_instance; | |
887 | struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host); | |
888 | struct ata_port *ap; | |
889 | struct ata_queued_cmd *qc; | |
890 | unsigned long flags; | |
891 | u8 status, tag; | |
892 | int handled, num_processed, port = 0; | |
893 | uint intpr, sactive, sactive2, tag_mask; | |
894 | struct sata_dwc_device_port *hsdevp; | |
895 | host_pvt.sata_dwc_sactive_issued = 0; | |
896 | ||
897 | spin_lock_irqsave(&host->lock, flags); | |
898 | ||
899 | /* Read the interrupt register */ | |
900 | intpr = in_le32(&hsdev->sata_dwc_regs->intpr); | |
901 | ||
902 | ap = host->ports[port]; | |
903 | hsdevp = HSDEVP_FROM_AP(ap); | |
904 | ||
905 | dev_dbg(ap->dev, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr, | |
906 | ap->link.active_tag); | |
907 | ||
908 | /* Check for error interrupt */ | |
909 | if (intpr & SATA_DWC_INTPR_ERR) { | |
910 | sata_dwc_error_intr(ap, hsdev, intpr); | |
911 | handled = 1; | |
912 | goto DONE; | |
913 | } | |
914 | ||
915 | /* Check for DMA SETUP FIS (FP DMA) interrupt */ | |
916 | if (intpr & SATA_DWC_INTPR_NEWFP) { | |
917 | clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP); | |
918 | ||
919 | tag = (u8)(in_le32(&hsdev->sata_dwc_regs->fptagr)); | |
920 | dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag); | |
921 | if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND) | |
922 | dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag); | |
923 | ||
924 | host_pvt.sata_dwc_sactive_issued |= qcmd_tag_to_mask(tag); | |
925 | ||
926 | qc = ata_qc_from_tag(ap, tag); | |
927 | /* | |
928 | * Start FP DMA for NCQ command. At this point the tag is the | |
929 | * active tag. It is the tag that matches the command about to | |
930 | * be completed. | |
931 | */ | |
932 | qc->ap->link.active_tag = tag; | |
933 | sata_dwc_bmdma_start_by_tag(qc, tag); | |
934 | ||
935 | handled = 1; | |
936 | goto DONE; | |
937 | } | |
938 | sactive = core_scr_read(SCR_ACTIVE); | |
939 | tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive; | |
940 | ||
941 | /* If no sactive issued and tag_mask is zero then this is not NCQ */ | |
942 | if (host_pvt.sata_dwc_sactive_issued == 0 && tag_mask == 0) { | |
943 | if (ap->link.active_tag == ATA_TAG_POISON) | |
944 | tag = 0; | |
945 | else | |
946 | tag = ap->link.active_tag; | |
947 | qc = ata_qc_from_tag(ap, tag); | |
948 | ||
949 | /* DEV interrupt w/ no active qc? */ | |
950 | if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { | |
951 | dev_err(ap->dev, "%s interrupt with no active qc " | |
952 | "qc=%p\n", __func__, qc); | |
953 | ap->ops->sff_check_status(ap); | |
954 | handled = 1; | |
955 | goto DONE; | |
956 | } | |
957 | status = ap->ops->sff_check_status(ap); | |
958 | ||
959 | qc->ap->link.active_tag = tag; | |
960 | hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT; | |
961 | ||
962 | if (status & ATA_ERR) { | |
963 | dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status); | |
964 | sata_dwc_qc_complete(ap, qc, 1); | |
965 | handled = 1; | |
966 | goto DONE; | |
967 | } | |
968 | ||
969 | dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n", | |
970 | __func__, ata_get_cmd_descript(qc->tf.protocol)); | |
971 | DRVSTILLBUSY: | |
972 | if (ata_is_dma(qc->tf.protocol)) { | |
973 | /* | |
974 | * Each DMA transaction produces 2 interrupts. The DMAC | |
975 | * transfer complete interrupt and the SATA controller | |
976 | * operation done interrupt. The command should be | |
977 | * completed only after both interrupts are seen. | |
978 | */ | |
979 | host_pvt.dma_interrupt_count++; | |
980 | if (hsdevp->dma_pending[tag] == \ | |
981 | SATA_DWC_DMA_PENDING_NONE) { | |
982 | dev_err(ap->dev, "%s: DMA not pending " | |
983 | "intpr=0x%08x status=0x%08x pending" | |
984 | "=%d\n", __func__, intpr, status, | |
985 | hsdevp->dma_pending[tag]); | |
986 | } | |
987 | ||
988 | if ((host_pvt.dma_interrupt_count % 2) == 0) | |
989 | sata_dwc_dma_xfer_complete(ap, 1); | |
990 | } else if (ata_is_pio(qc->tf.protocol)) { | |
991 | ata_sff_hsm_move(ap, qc, status, 0); | |
992 | handled = 1; | |
993 | goto DONE; | |
994 | } else { | |
995 | if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) | |
996 | goto DRVSTILLBUSY; | |
997 | } | |
998 | ||
999 | handled = 1; | |
1000 | goto DONE; | |
1001 | } | |
1002 | ||
1003 | /* | |
1004 | * This is a NCQ command. At this point we need to figure out for which | |
1005 | * tags we have gotten a completion interrupt. One interrupt may serve | |
1006 | * as completion for more than one operation when commands are queued | |
1007 | * (NCQ). We need to process each completed command. | |
1008 | */ | |
1009 | ||
1010 | /* process completed commands */ | |
1011 | sactive = core_scr_read(SCR_ACTIVE); | |
1012 | tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive; | |
1013 | ||
1014 | if (sactive != 0 || (host_pvt.sata_dwc_sactive_issued) > 1 || \ | |
1015 | tag_mask > 1) { | |
1016 | dev_dbg(ap->dev, "%s NCQ:sactive=0x%08x sactive_issued=0x%08x" | |
1017 | "tag_mask=0x%08x\n", __func__, sactive, | |
1018 | host_pvt.sata_dwc_sactive_issued, tag_mask); | |
1019 | } | |
1020 | ||
1021 | if ((tag_mask | (host_pvt.sata_dwc_sactive_issued)) != \ | |
1022 | (host_pvt.sata_dwc_sactive_issued)) { | |
1023 | dev_warn(ap->dev, "Bad tag mask? sactive=0x%08x " | |
1024 | "(host_pvt.sata_dwc_sactive_issued)=0x%08x tag_mask" | |
1025 | "=0x%08x\n", sactive, host_pvt.sata_dwc_sactive_issued, | |
1026 | tag_mask); | |
1027 | } | |
1028 | ||
1029 | /* read just to clear ... not bad if currently still busy */ | |
1030 | status = ap->ops->sff_check_status(ap); | |
1031 | dev_dbg(ap->dev, "%s ATA status register=0x%x\n", __func__, status); | |
1032 | ||
1033 | tag = 0; | |
1034 | num_processed = 0; | |
1035 | while (tag_mask) { | |
1036 | num_processed++; | |
1037 | while (!(tag_mask & 0x00000001)) { | |
1038 | tag++; | |
1039 | tag_mask <<= 1; | |
1040 | } | |
1041 | ||
1042 | tag_mask &= (~0x00000001); | |
1043 | qc = ata_qc_from_tag(ap, tag); | |
1044 | ||
1045 | /* To be picked up by completion functions */ | |
1046 | qc->ap->link.active_tag = tag; | |
1047 | hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT; | |
1048 | ||
1049 | /* Let libata/scsi layers handle error */ | |
1050 | if (status & ATA_ERR) { | |
1051 | dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n", __func__, | |
1052 | status); | |
1053 | sata_dwc_qc_complete(ap, qc, 1); | |
1054 | handled = 1; | |
1055 | goto DONE; | |
1056 | } | |
1057 | ||
1058 | /* Process completed command */ | |
1059 | dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__, | |
1060 | ata_get_cmd_descript(qc->tf.protocol)); | |
1061 | if (ata_is_dma(qc->tf.protocol)) { | |
1062 | host_pvt.dma_interrupt_count++; | |
1063 | if (hsdevp->dma_pending[tag] == \ | |
1064 | SATA_DWC_DMA_PENDING_NONE) | |
1065 | dev_warn(ap->dev, "%s: DMA not pending?\n", | |
1066 | __func__); | |
1067 | if ((host_pvt.dma_interrupt_count % 2) == 0) | |
1068 | sata_dwc_dma_xfer_complete(ap, 1); | |
1069 | } else { | |
1070 | if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) | |
1071 | goto STILLBUSY; | |
1072 | } | |
1073 | continue; | |
1074 | ||
1075 | STILLBUSY: | |
1076 | ap->stats.idle_irq++; | |
1077 | dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n", | |
1078 | ap->print_id); | |
1079 | } /* while tag_mask */ | |
1080 | ||
1081 | /* | |
1082 | * Check to see if any commands completed while we were processing our | |
1083 | * initial set of completed commands (read status clears interrupts, | |
1084 | * so we might miss a completed command interrupt if one came in while | |
1085 | * we were processing --we read status as part of processing a completed | |
1086 | * command). | |
1087 | */ | |
1088 | sactive2 = core_scr_read(SCR_ACTIVE); | |
1089 | if (sactive2 != sactive) { | |
1090 | dev_dbg(ap->dev, "More completed - sactive=0x%x sactive2" | |
1091 | "=0x%x\n", sactive, sactive2); | |
1092 | } | |
1093 | handled = 1; | |
1094 | ||
1095 | DONE: | |
1096 | spin_unlock_irqrestore(&host->lock, flags); | |
1097 | return IRQ_RETVAL(handled); | |
1098 | } | |
1099 | ||
1100 | static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag) | |
1101 | { | |
1102 | struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp); | |
1103 | ||
1104 | if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) { | |
1105 | out_le32(&(hsdev->sata_dwc_regs->dmacr), | |
1106 | SATA_DWC_DMACR_RX_CLEAR( | |
1107 | in_le32(&(hsdev->sata_dwc_regs->dmacr)))); | |
1108 | } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) { | |
1109 | out_le32(&(hsdev->sata_dwc_regs->dmacr), | |
1110 | SATA_DWC_DMACR_TX_CLEAR( | |
1111 | in_le32(&(hsdev->sata_dwc_regs->dmacr)))); | |
1112 | } else { | |
1113 | /* | |
1114 | * This should not happen, it indicates the driver is out of | |
1115 | * sync. If it does happen, clear dmacr anyway. | |
1116 | */ | |
1117 | dev_err(host_pvt.dwc_dev, "%s DMA protocol RX and" | |
1118 | "TX DMA not pending tag=0x%02x pending=%d" | |
1119 | " dmacr: 0x%08x\n", __func__, tag, | |
1120 | hsdevp->dma_pending[tag], | |
1121 | in_le32(&(hsdev->sata_dwc_regs->dmacr))); | |
1122 | out_le32(&(hsdev->sata_dwc_regs->dmacr), | |
1123 | SATA_DWC_DMACR_TXRXCH_CLEAR); | |
1124 | } | |
1125 | } | |
1126 | ||
1127 | static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status) | |
1128 | { | |
1129 | struct ata_queued_cmd *qc; | |
1130 | struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); | |
1131 | struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); | |
1132 | u8 tag = 0; | |
1133 | ||
1134 | tag = ap->link.active_tag; | |
1135 | qc = ata_qc_from_tag(ap, tag); | |
1136 | if (!qc) { | |
1137 | dev_err(ap->dev, "failed to get qc"); | |
1138 | return; | |
1139 | } | |
1140 | ||
1141 | #ifdef DEBUG_NCQ | |
1142 | if (tag > 0) { | |
1143 | dev_info(ap->dev, "%s tag=%u cmd=0x%02x dma dir=%s proto=%s " | |
1144 | "dmacr=0x%08x\n", __func__, qc->tag, qc->tf.command, | |
1145 | ata_get_cmd_descript(qc->dma_dir), | |
1146 | ata_get_cmd_descript(qc->tf.protocol), | |
1147 | in_le32(&(hsdev->sata_dwc_regs->dmacr))); | |
1148 | } | |
1149 | #endif | |
1150 | ||
1151 | if (ata_is_dma(qc->tf.protocol)) { | |
1152 | if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) { | |
1153 | dev_err(ap->dev, "%s DMA protocol RX and TX DMA not " | |
1154 | "pending dmacr: 0x%08x\n", __func__, | |
1155 | in_le32(&(hsdev->sata_dwc_regs->dmacr))); | |
1156 | } | |
1157 | ||
1158 | hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE; | |
1159 | sata_dwc_qc_complete(ap, qc, check_status); | |
1160 | ap->link.active_tag = ATA_TAG_POISON; | |
1161 | } else { | |
1162 | sata_dwc_qc_complete(ap, qc, check_status); | |
1163 | } | |
1164 | } | |
1165 | ||
1166 | static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, | |
1167 | u32 check_status) | |
1168 | { | |
1169 | u8 status = 0; | |
1170 | u32 mask = 0x0; | |
1171 | u8 tag = qc->tag; | |
1172 | struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); | |
1173 | host_pvt.sata_dwc_sactive_queued = 0; | |
1174 | dev_dbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status); | |
1175 | ||
1176 | if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) | |
1177 | dev_err(ap->dev, "TX DMA PENDING\n"); | |
1178 | else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) | |
1179 | dev_err(ap->dev, "RX DMA PENDING\n"); | |
1180 | dev_dbg(ap->dev, "QC complete cmd=0x%02x status=0x%02x ata%u:" | |
1181 | " protocol=%d\n", qc->tf.command, status, ap->print_id, | |
1182 | qc->tf.protocol); | |
1183 | ||
1184 | /* clear active bit */ | |
1185 | mask = (~(qcmd_tag_to_mask(tag))); | |
1186 | host_pvt.sata_dwc_sactive_queued = (host_pvt.sata_dwc_sactive_queued) \ | |
1187 | & mask; | |
1188 | host_pvt.sata_dwc_sactive_issued = (host_pvt.sata_dwc_sactive_issued) \ | |
1189 | & mask; | |
1190 | ata_qc_complete(qc); | |
1191 | return 0; | |
1192 | } | |
1193 | ||
1194 | static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev) | |
1195 | { | |
1196 | /* Enable selective interrupts by setting the interrupt maskregister*/ | |
1197 | out_le32(&hsdev->sata_dwc_regs->intmr, | |
1198 | SATA_DWC_INTMR_ERRM | | |
1199 | SATA_DWC_INTMR_NEWFPM | | |
1200 | SATA_DWC_INTMR_PMABRTM | | |
1201 | SATA_DWC_INTMR_DMATM); | |
1202 | /* | |
1203 | * Unmask the error bits that should trigger an error interrupt by | |
1204 | * setting the error mask register. | |
1205 | */ | |
1206 | out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS); | |
1207 | ||
1208 | dev_dbg(host_pvt.dwc_dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n", | |
1209 | __func__, in_le32(&hsdev->sata_dwc_regs->intmr), | |
1210 | in_le32(&hsdev->sata_dwc_regs->errmr)); | |
1211 | } | |
1212 | ||
1213 | static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base) | |
1214 | { | |
1215 | port->cmd_addr = (void *)base + 0x00; | |
1216 | port->data_addr = (void *)base + 0x00; | |
1217 | ||
1218 | port->error_addr = (void *)base + 0x04; | |
1219 | port->feature_addr = (void *)base + 0x04; | |
1220 | ||
1221 | port->nsect_addr = (void *)base + 0x08; | |
1222 | ||
1223 | port->lbal_addr = (void *)base + 0x0c; | |
1224 | port->lbam_addr = (void *)base + 0x10; | |
1225 | port->lbah_addr = (void *)base + 0x14; | |
1226 | ||
1227 | port->device_addr = (void *)base + 0x18; | |
1228 | port->command_addr = (void *)base + 0x1c; | |
1229 | port->status_addr = (void *)base + 0x1c; | |
1230 | ||
1231 | port->altstatus_addr = (void *)base + 0x20; | |
1232 | port->ctl_addr = (void *)base + 0x20; | |
1233 | } | |
1234 | ||
1235 | /* | |
1236 | * Function : sata_dwc_port_start | |
1237 | * arguments : struct ata_ioports *port | |
1238 | * Return value : returns 0 if success, error code otherwise | |
1239 | * This function allocates the scatter gather LLI table for AHB DMA | |
1240 | */ | |
1241 | static int sata_dwc_port_start(struct ata_port *ap) | |
1242 | { | |
1243 | int err = 0; | |
1244 | struct sata_dwc_device *hsdev; | |
1245 | struct sata_dwc_device_port *hsdevp = NULL; | |
1246 | struct device *pdev; | |
1247 | int i; | |
1248 | ||
1249 | hsdev = HSDEV_FROM_AP(ap); | |
1250 | ||
1251 | dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no); | |
1252 | ||
1253 | hsdev->host = ap->host; | |
1254 | pdev = ap->host->dev; | |
1255 | if (!pdev) { | |
1256 | dev_err(ap->dev, "%s: no ap->host->dev\n", __func__); | |
1257 | err = -ENODEV; | |
1258 | goto CLEANUP; | |
1259 | } | |
1260 | ||
1261 | /* Allocate Port Struct */ | |
1262 | hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL); | |
1263 | if (!hsdevp) { | |
1264 | dev_err(ap->dev, "%s: kmalloc failed for hsdevp\n", __func__); | |
1265 | err = -ENOMEM; | |
1266 | goto CLEANUP; | |
1267 | } | |
1268 | hsdevp->hsdev = hsdev; | |
1269 | ||
1270 | for (i = 0; i < SATA_DWC_QCMD_MAX; i++) | |
1271 | hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT; | |
1272 | ||
1273 | ap->bmdma_prd = 0; /* set these so libata doesn't use them */ | |
1274 | ap->bmdma_prd_dma = 0; | |
1275 | ||
1276 | /* | |
1277 | * DMA - Assign scatter gather LLI table. We can't use the libata | |
1278 | * version since it's PRD is IDE PCI specific. | |
1279 | */ | |
1280 | for (i = 0; i < SATA_DWC_QCMD_MAX; i++) { | |
1281 | hsdevp->llit[i] = dma_alloc_coherent(pdev, | |
1282 | SATA_DWC_DMAC_LLI_TBL_SZ, | |
1283 | &(hsdevp->llit_dma[i]), | |
1284 | GFP_ATOMIC); | |
1285 | if (!hsdevp->llit[i]) { | |
1286 | dev_err(ap->dev, "%s: dma_alloc_coherent failed\n", | |
1287 | __func__); | |
1288 | err = -ENOMEM; | |
1289 | goto CLEANUP; | |
1290 | } | |
1291 | } | |
1292 | ||
1293 | if (ap->port_no == 0) { | |
1294 | dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n", | |
1295 | __func__); | |
1296 | out_le32(&hsdev->sata_dwc_regs->dmacr, | |
1297 | SATA_DWC_DMACR_TXRXCH_CLEAR); | |
1298 | ||
1299 | dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n", | |
1300 | __func__); | |
1301 | out_le32(&hsdev->sata_dwc_regs->dbtsr, | |
1302 | (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) | | |
1303 | SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT))); | |
1304 | } | |
1305 | ||
1306 | /* Clear any error bits before libata starts issuing commands */ | |
1307 | clear_serror(); | |
1308 | ap->private_data = hsdevp; | |
1309 | ||
1310 | CLEANUP: | |
1311 | if (err) { | |
1312 | sata_dwc_port_stop(ap); | |
1313 | dev_dbg(ap->dev, "%s: fail\n", __func__); | |
1314 | } else { | |
1315 | dev_dbg(ap->dev, "%s: done\n", __func__); | |
1316 | } | |
1317 | ||
1318 | return err; | |
1319 | } | |
1320 | ||
1321 | static void sata_dwc_port_stop(struct ata_port *ap) | |
1322 | { | |
1323 | int i; | |
1324 | struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); | |
1325 | struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); | |
1326 | ||
1327 | dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id); | |
1328 | ||
1329 | if (hsdevp && hsdev) { | |
1330 | /* deallocate LLI table */ | |
1331 | for (i = 0; i < SATA_DWC_QCMD_MAX; i++) { | |
1332 | dma_free_coherent(ap->host->dev, | |
1333 | SATA_DWC_DMAC_LLI_TBL_SZ, | |
1334 | hsdevp->llit[i], hsdevp->llit_dma[i]); | |
1335 | } | |
1336 | ||
1337 | kfree(hsdevp); | |
1338 | } | |
1339 | ap->private_data = NULL; | |
1340 | } | |
1341 | ||
1342 | /* | |
1343 | * Function : sata_dwc_exec_command_by_tag | |
1344 | * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued | |
1345 | * Return value : None | |
1346 | * This function keeps track of individual command tag ids and calls | |
1347 | * ata_exec_command in libata | |
1348 | */ | |
1349 | static void sata_dwc_exec_command_by_tag(struct ata_port *ap, | |
1350 | struct ata_taskfile *tf, | |
1351 | u8 tag, u32 cmd_issued) | |
1352 | { | |
1353 | unsigned long flags; | |
1354 | struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); | |
1355 | ||
1356 | dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command, | |
1357 | ata_get_cmd_descript(tf), tag); | |
1358 | ||
1359 | spin_lock_irqsave(&ap->host->lock, flags); | |
1360 | hsdevp->cmd_issued[tag] = cmd_issued; | |
1361 | spin_unlock_irqrestore(&ap->host->lock, flags); | |
1362 | /* | |
1363 | * Clear SError before executing a new command. | |
1364 | * sata_dwc_scr_write and read can not be used here. Clearing the PM | |
1365 | * managed SError register for the disk needs to be done before the | |
1366 | * task file is loaded. | |
1367 | */ | |
1368 | clear_serror(); | |
1369 | ata_sff_exec_command(ap, tf); | |
1370 | } | |
1371 | ||
1372 | static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag) | |
1373 | { | |
1374 | sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag, | |
1375 | SATA_DWC_CMD_ISSUED_PEND); | |
1376 | } | |
1377 | ||
1378 | static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc) | |
1379 | { | |
1380 | u8 tag = qc->tag; | |
1381 | ||
1382 | if (ata_is_ncq(qc->tf.protocol)) { | |
1383 | dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", | |
1384 | __func__, qc->ap->link.sactive, tag); | |
1385 | } else { | |
1386 | tag = 0; | |
1387 | } | |
1388 | sata_dwc_bmdma_setup_by_tag(qc, tag); | |
1389 | } | |
1390 | ||
1391 | static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag) | |
1392 | { | |
1393 | int start_dma; | |
1394 | u32 reg, dma_chan; | |
1395 | struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc); | |
1396 | struct ata_port *ap = qc->ap; | |
1397 | struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); | |
1398 | int dir = qc->dma_dir; | |
1399 | dma_chan = hsdevp->dma_chan[tag]; | |
1400 | ||
1401 | if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) { | |
1402 | start_dma = 1; | |
1403 | if (dir == DMA_TO_DEVICE) | |
1404 | hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX; | |
1405 | else | |
1406 | hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX; | |
1407 | } else { | |
1408 | dev_err(ap->dev, "%s: Command not pending cmd_issued=%d " | |
1409 | "(tag=%d) DMA NOT started\n", __func__, | |
1410 | hsdevp->cmd_issued[tag], tag); | |
1411 | start_dma = 0; | |
1412 | } | |
1413 | ||
1414 | dev_dbg(ap->dev, "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s " | |
1415 | "start_dma? %x\n", __func__, qc, tag, qc->tf.command, | |
1416 | ata_get_cmd_descript(qc->dma_dir), start_dma); | |
1417 | sata_dwc_tf_dump(&(qc->tf)); | |
1418 | ||
1419 | if (start_dma) { | |
1420 | reg = core_scr_read(SCR_ERROR); | |
1421 | if (reg & SATA_DWC_SERROR_ERR_BITS) { | |
1422 | dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n", | |
1423 | __func__, reg); | |
1424 | } | |
1425 | ||
1426 | if (dir == DMA_TO_DEVICE) | |
1427 | out_le32(&hsdev->sata_dwc_regs->dmacr, | |
1428 | SATA_DWC_DMACR_TXCHEN); | |
1429 | else | |
1430 | out_le32(&hsdev->sata_dwc_regs->dmacr, | |
1431 | SATA_DWC_DMACR_RXCHEN); | |
1432 | ||
1433 | /* Enable AHB DMA transfer on the specified channel */ | |
1434 | dma_dwc_xfer_start(dma_chan); | |
1435 | } | |
1436 | } | |
1437 | ||
1438 | static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc) | |
1439 | { | |
1440 | u8 tag = qc->tag; | |
1441 | ||
1442 | if (ata_is_ncq(qc->tf.protocol)) { | |
1443 | dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", | |
1444 | __func__, qc->ap->link.sactive, tag); | |
1445 | } else { | |
1446 | tag = 0; | |
1447 | } | |
1448 | dev_dbg(qc->ap->dev, "%s\n", __func__); | |
1449 | sata_dwc_bmdma_start_by_tag(qc, tag); | |
1450 | } | |
1451 | ||
1452 | /* | |
1453 | * Function : sata_dwc_qc_prep_by_tag | |
1454 | * arguments : ata_queued_cmd *qc, u8 tag | |
1455 | * Return value : None | |
1456 | * qc_prep for a particular queued command based on tag | |
1457 | */ | |
1458 | static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag) | |
1459 | { | |
1460 | struct scatterlist *sg = qc->sg; | |
1461 | struct ata_port *ap = qc->ap; | |
d26377b8 | 1462 | int dma_chan; |
62936009 RS |
1463 | struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); |
1464 | struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); | |
1465 | int err; | |
1466 | ||
1467 | dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n", | |
1468 | __func__, ap->port_no, ata_get_cmd_descript(qc->dma_dir), | |
1469 | qc->n_elem); | |
1470 | ||
1471 | dma_chan = dma_dwc_xfer_setup(sg, qc->n_elem, hsdevp->llit[tag], | |
1472 | hsdevp->llit_dma[tag], | |
1473 | (void *__iomem)(&hsdev->sata_dwc_regs->\ | |
1474 | dmadr), qc->dma_dir); | |
1475 | if (dma_chan < 0) { | |
1476 | dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n", | |
1477 | __func__, err); | |
1478 | return; | |
1479 | } | |
1480 | hsdevp->dma_chan[tag] = dma_chan; | |
1481 | } | |
1482 | ||
1483 | static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc) | |
1484 | { | |
1485 | u32 sactive; | |
1486 | u8 tag = qc->tag; | |
1487 | struct ata_port *ap = qc->ap; | |
1488 | ||
1489 | #ifdef DEBUG_NCQ | |
1490 | if (qc->tag > 0 || ap->link.sactive > 1) | |
1491 | dev_info(ap->dev, "%s ap id=%d cmd(0x%02x)=%s qc tag=%d " | |
1492 | "prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n", | |
1493 | __func__, ap->print_id, qc->tf.command, | |
1494 | ata_get_cmd_descript(&qc->tf), | |
1495 | qc->tag, ata_get_cmd_descript(qc->tf.protocol), | |
1496 | ap->link.active_tag, ap->link.sactive); | |
1497 | #endif | |
1498 | ||
1499 | if (!ata_is_ncq(qc->tf.protocol)) | |
1500 | tag = 0; | |
1501 | sata_dwc_qc_prep_by_tag(qc, tag); | |
1502 | ||
1503 | if (ata_is_ncq(qc->tf.protocol)) { | |
1504 | sactive = core_scr_read(SCR_ACTIVE); | |
1505 | sactive |= (0x00000001 << tag); | |
1506 | core_scr_write(SCR_ACTIVE, sactive); | |
1507 | ||
1508 | dev_dbg(qc->ap->dev, "%s: tag=%d ap->link.sactive = 0x%08x " | |
1509 | "sactive=0x%08x\n", __func__, tag, qc->ap->link.sactive, | |
1510 | sactive); | |
1511 | ||
1512 | ap->ops->sff_tf_load(ap, &qc->tf); | |
1513 | sata_dwc_exec_command_by_tag(ap, &qc->tf, qc->tag, | |
1514 | SATA_DWC_CMD_ISSUED_PEND); | |
1515 | } else { | |
1516 | ata_sff_qc_issue(qc); | |
1517 | } | |
1518 | return 0; | |
1519 | } | |
1520 | ||
1521 | /* | |
1522 | * Function : sata_dwc_qc_prep | |
1523 | * arguments : ata_queued_cmd *qc | |
1524 | * Return value : None | |
1525 | * qc_prep for a particular queued command | |
1526 | */ | |
1527 | ||
1528 | static void sata_dwc_qc_prep(struct ata_queued_cmd *qc) | |
1529 | { | |
1530 | if ((qc->dma_dir == DMA_NONE) || (qc->tf.protocol == ATA_PROT_PIO)) | |
1531 | return; | |
1532 | ||
1533 | #ifdef DEBUG_NCQ | |
1534 | if (qc->tag > 0) | |
1535 | dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n", | |
1536 | __func__, tag, qc->ap->link.active_tag); | |
1537 | ||
1538 | return ; | |
1539 | #endif | |
1540 | } | |
1541 | ||
1542 | static void sata_dwc_error_handler(struct ata_port *ap) | |
1543 | { | |
1544 | ap->link.flags |= ATA_LFLAG_NO_HRST; | |
1545 | ata_sff_error_handler(ap); | |
1546 | } | |
1547 | ||
1548 | /* | |
1549 | * scsi mid-layer and libata interface structures | |
1550 | */ | |
1551 | static struct scsi_host_template sata_dwc_sht = { | |
1552 | ATA_NCQ_SHT(DRV_NAME), | |
1553 | /* | |
1554 | * test-only: Currently this driver doesn't handle NCQ | |
1555 | * correctly. We enable NCQ but set the queue depth to a | |
1556 | * max of 1. This will get fixed in in a future release. | |
1557 | */ | |
1558 | .sg_tablesize = LIBATA_MAX_PRD, | |
1559 | .can_queue = ATA_DEF_QUEUE, /* ATA_MAX_QUEUE */ | |
1560 | .dma_boundary = ATA_DMA_BOUNDARY, | |
1561 | }; | |
1562 | ||
1563 | static struct ata_port_operations sata_dwc_ops = { | |
1564 | .inherits = &ata_sff_port_ops, | |
1565 | ||
1566 | .error_handler = sata_dwc_error_handler, | |
1567 | ||
1568 | .qc_prep = sata_dwc_qc_prep, | |
1569 | .qc_issue = sata_dwc_qc_issue, | |
1570 | ||
1571 | .scr_read = sata_dwc_scr_read, | |
1572 | .scr_write = sata_dwc_scr_write, | |
1573 | ||
1574 | .port_start = sata_dwc_port_start, | |
1575 | .port_stop = sata_dwc_port_stop, | |
1576 | ||
1577 | .bmdma_setup = sata_dwc_bmdma_setup, | |
1578 | .bmdma_start = sata_dwc_bmdma_start, | |
1579 | }; | |
1580 | ||
1581 | static const struct ata_port_info sata_dwc_port_info[] = { | |
1582 | { | |
1583 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | |
1584 | ATA_FLAG_MMIO | ATA_FLAG_NCQ, | |
1585 | .pio_mask = 0x1f, /* pio 0-4 */ | |
1586 | .udma_mask = ATA_UDMA6, | |
1587 | .port_ops = &sata_dwc_ops, | |
1588 | }, | |
1589 | }; | |
1590 | ||
60652d07 | 1591 | static int sata_dwc_probe(struct platform_device *ofdev, |
62936009 RS |
1592 | const struct of_device_id *match) |
1593 | { | |
1594 | struct sata_dwc_device *hsdev; | |
1595 | u32 idr, versionr; | |
1596 | char *ver = (char *)&versionr; | |
1597 | u8 *base = NULL; | |
1598 | int err = 0; | |
1599 | int irq, rc; | |
1600 | struct ata_host *host; | |
1601 | struct ata_port_info pi = sata_dwc_port_info[0]; | |
1602 | const struct ata_port_info *ppi[] = { &pi, NULL }; | |
1603 | ||
1604 | /* Allocate DWC SATA device */ | |
1605 | hsdev = kmalloc(sizeof(*hsdev), GFP_KERNEL); | |
1606 | if (hsdev == NULL) { | |
1607 | dev_err(&ofdev->dev, "kmalloc failed for hsdev\n"); | |
1608 | err = -ENOMEM; | |
1609 | goto error_out; | |
1610 | } | |
1611 | memset(hsdev, 0, sizeof(*hsdev)); | |
1612 | ||
1613 | /* Ioremap SATA registers */ | |
1614 | base = of_iomap(ofdev->dev.of_node, 0); | |
1615 | if (!base) { | |
1616 | dev_err(&ofdev->dev, "ioremap failed for SATA register" | |
1617 | " address\n"); | |
1618 | err = -ENODEV; | |
1619 | goto error_out; | |
1620 | } | |
1621 | hsdev->reg_base = base; | |
1622 | dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n"); | |
1623 | ||
1624 | /* Synopsys DWC SATA specific Registers */ | |
1625 | hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET); | |
1626 | ||
1627 | /* Allocate and fill host */ | |
1628 | host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS); | |
1629 | if (!host) { | |
1630 | dev_err(&ofdev->dev, "ata_host_alloc_pinfo failed\n"); | |
1631 | err = -ENOMEM; | |
1632 | goto error_out; | |
1633 | } | |
1634 | ||
1635 | host->private_data = hsdev; | |
1636 | ||
1637 | /* Setup port */ | |
1638 | host->ports[0]->ioaddr.cmd_addr = base; | |
1639 | host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET; | |
1640 | host_pvt.scr_addr_sstatus = base + SATA_DWC_SCR_OFFSET; | |
1641 | sata_dwc_setup_port(&host->ports[0]->ioaddr, (unsigned long)base); | |
1642 | ||
1643 | /* Read the ID and Version Registers */ | |
1644 | idr = in_le32(&hsdev->sata_dwc_regs->idr); | |
1645 | versionr = in_le32(&hsdev->sata_dwc_regs->versionr); | |
1646 | dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n", | |
1647 | idr, ver[0], ver[1], ver[2]); | |
1648 | ||
1649 | /* Get SATA DMA interrupt number */ | |
1650 | irq = irq_of_parse_and_map(ofdev->dev.of_node, 1); | |
1651 | if (irq == NO_IRQ) { | |
1652 | dev_err(&ofdev->dev, "no SATA DMA irq\n"); | |
1653 | err = -ENODEV; | |
1654 | goto error_out; | |
1655 | } | |
1656 | ||
1657 | /* Get physical SATA DMA register base address */ | |
1658 | host_pvt.sata_dma_regs = of_iomap(ofdev->dev.of_node, 1); | |
1659 | if (!(host_pvt.sata_dma_regs)) { | |
1660 | dev_err(&ofdev->dev, "ioremap failed for AHBDMA register" | |
1661 | " address\n"); | |
1662 | err = -ENODEV; | |
1663 | goto error_out; | |
1664 | } | |
1665 | ||
1666 | /* Save dev for later use in dev_xxx() routines */ | |
1667 | host_pvt.dwc_dev = &ofdev->dev; | |
1668 | ||
1669 | /* Initialize AHB DMAC */ | |
1670 | dma_dwc_init(hsdev, irq); | |
1671 | ||
1672 | /* Enable SATA Interrupts */ | |
1673 | sata_dwc_enable_interrupts(hsdev); | |
1674 | ||
1675 | /* Get SATA interrupt number */ | |
1676 | irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); | |
1677 | if (irq == NO_IRQ) { | |
1678 | dev_err(&ofdev->dev, "no SATA DMA irq\n"); | |
1679 | err = -ENODEV; | |
1680 | goto error_out; | |
1681 | } | |
1682 | ||
1683 | /* | |
1684 | * Now, register with libATA core, this will also initiate the | |
1685 | * device discovery process, invoking our port_start() handler & | |
1686 | * error_handler() to execute a dummy Softreset EH session | |
1687 | */ | |
1688 | rc = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht); | |
1689 | ||
1690 | if (rc != 0) | |
1691 | dev_err(&ofdev->dev, "failed to activate host"); | |
1692 | ||
1693 | dev_set_drvdata(&ofdev->dev, host); | |
1694 | return 0; | |
1695 | ||
1696 | error_out: | |
1697 | /* Free SATA DMA resources */ | |
1698 | dma_dwc_exit(hsdev); | |
1699 | ||
1700 | if (base) | |
1701 | iounmap(base); | |
1702 | return err; | |
1703 | } | |
1704 | ||
60652d07 | 1705 | static int sata_dwc_remove(struct platform_device *ofdev) |
62936009 RS |
1706 | { |
1707 | struct device *dev = &ofdev->dev; | |
1708 | struct ata_host *host = dev_get_drvdata(dev); | |
1709 | struct sata_dwc_device *hsdev = host->private_data; | |
1710 | ||
1711 | ata_host_detach(host); | |
1712 | dev_set_drvdata(dev, NULL); | |
1713 | ||
1714 | /* Free SATA DMA resources */ | |
1715 | dma_dwc_exit(hsdev); | |
1716 | ||
1717 | iounmap(hsdev->reg_base); | |
1718 | kfree(hsdev); | |
1719 | kfree(host); | |
1720 | dev_dbg(&ofdev->dev, "done\n"); | |
1721 | return 0; | |
1722 | } | |
1723 | ||
1724 | static const struct of_device_id sata_dwc_match[] = { | |
1725 | { .compatible = "amcc,sata-460ex", }, | |
1726 | {} | |
1727 | }; | |
1728 | MODULE_DEVICE_TABLE(of, sata_dwc_match); | |
1729 | ||
1730 | static struct of_platform_driver sata_dwc_driver = { | |
1731 | .driver = { | |
1732 | .name = DRV_NAME, | |
1733 | .owner = THIS_MODULE, | |
1734 | .of_match_table = sata_dwc_match, | |
1735 | }, | |
1736 | .probe = sata_dwc_probe, | |
1737 | .remove = sata_dwc_remove, | |
1738 | }; | |
1739 | ||
1740 | static int __init sata_dwc_init(void) | |
1741 | { | |
1742 | return of_register_platform_driver(&sata_dwc_driver); | |
1743 | } | |
1744 | ||
1745 | static void __exit sata_dwc_exit(void) | |
1746 | { | |
1747 | of_unregister_platform_driver(&sata_dwc_driver); | |
1748 | } | |
1749 | ||
1750 | module_init(sata_dwc_init); | |
1751 | module_exit(sata_dwc_exit); | |
1752 | ||
1753 | MODULE_LICENSE("GPL"); | |
1754 | MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>"); | |
1755 | MODULE_DESCRIPTION("DesignWare Cores SATA controller low lever driver"); | |
1756 | MODULE_VERSION(DRV_VERSION); |