net: sxgbe: add basic framework for Samsung 10Gb ethernet driver
[deliverable/linux.git] / drivers / net / ethernet / samsung / sxgbe / sxgbe_dma.c
1 /* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12 #include <linux/io.h>
13 #include <linux/delay.h>
14 #include <linux/export.h>
15 #include <linux/io.h>
16 #include <linux/netdevice.h>
17 #include <linux/phy.h>
18
19 #include "sxgbe_common.h"
20 #include "sxgbe_dma.h"
21 #include "sxgbe_reg.h"
22 #include "sxgbe_desc.h"
23
24 /* DMA core initialization */
25 static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map)
26 {
27 int retry_count = 10;
28 u32 reg_val;
29
30 /* reset the DMA */
31 writel(SXGBE_DMA_SOFT_RESET, ioaddr + SXGBE_DMA_MODE_REG);
32 while (retry_count--) {
33 if (!(readl(ioaddr + SXGBE_DMA_MODE_REG) &
34 SXGBE_DMA_SOFT_RESET))
35 break;
36 mdelay(10);
37 }
38
39 if (retry_count < 0)
40 return -EBUSY;
41
42 reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
43
44 /* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register.
45 * if fix_burst = 1, Set UNDEF = 0 of DMA_Sys_Mode Register.
46 * burst_map is bitmap for BLEN[4, 8, 16, 32, 64, 128 and 256].
47 * Set burst_map irrespective of fix_burst value.
48 */
49 if (!fix_burst)
50 reg_val |= SXGBE_DMA_AXI_UNDEF_BURST;
51
52 /* write burst len map */
53 reg_val |= (burst_map << SXGBE_DMA_BLENMAP_LSHIFT);
54
55 writel(reg_val, ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
56
57 return 0;
58 }
59
60 static void sxgbe_dma_channel_init(void __iomem *ioaddr, int cha_num,
61 int fix_burst, int pbl, dma_addr_t dma_tx,
62 dma_addr_t dma_rx, int t_rsize, int r_rsize)
63 {
64 u32 reg_val;
65 dma_addr_t dma_addr;
66
67 reg_val = readl(ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num));
68 /* set the pbl */
69 if (fix_burst) {
70 reg_val |= SXGBE_DMA_PBL_X8MODE;
71 writel(reg_val, ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num));
72 /* program the TX pbl */
73 reg_val = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
74 reg_val |= (pbl << SXGBE_DMA_TXPBL_LSHIFT);
75 writel(reg_val, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
76 /* program the RX pbl */
77 reg_val = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num));
78 reg_val |= (pbl << SXGBE_DMA_RXPBL_LSHIFT);
79 writel(reg_val, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num));
80 }
81
82 /* program desc registers */
83 writel(upper_32_bits(dma_tx),
84 ioaddr + SXGBE_DMA_CHA_TXDESC_HADD_REG(cha_num));
85 writel(lower_32_bits(dma_tx),
86 ioaddr + SXGBE_DMA_CHA_TXDESC_LADD_REG(cha_num));
87
88 writel(upper_32_bits(dma_rx),
89 ioaddr + SXGBE_DMA_CHA_RXDESC_HADD_REG(cha_num));
90 writel(lower_32_bits(dma_rx),
91 ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num));
92
93 /* program tail pointers */
94 /* assumption: upper 32 bits are constant and
95 * same as TX/RX desc list
96 */
97 dma_addr = dma_tx + ((t_rsize - 1) * SXGBE_DESC_SIZE_BYTES);
98 writel(lower_32_bits(dma_addr),
99 ioaddr + SXGBE_DMA_CHA_TXDESC_TAILPTR_REG(cha_num));
100
101 dma_addr = dma_rx + ((r_rsize - 1) * SXGBE_DESC_SIZE_BYTES);
102 writel(lower_32_bits(dma_addr),
103 ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num));
104 /* program the ring sizes */
105 writel(t_rsize - 1, ioaddr + SXGBE_DMA_CHA_TXDESC_RINGLEN_REG(cha_num));
106 writel(r_rsize - 1, ioaddr + SXGBE_DMA_CHA_RXDESC_RINGLEN_REG(cha_num));
107
108 /* Enable TX/RX interrupts */
109 writel(SXGBE_DMA_ENA_INT,
110 ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(cha_num));
111 }
112
113 static void sxgbe_enable_dma_transmission(void __iomem *ioaddr, int cha_num)
114 {
115 u32 tx_config;
116
117 tx_config = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
118 tx_config |= SXGBE_TX_START_DMA;
119 writel(tx_config, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
120 }
121
122 static void sxgbe_enable_dma_irq(void __iomem *ioaddr, int dma_cnum)
123 {
124 /* Enable TX/RX interrupts */
125 writel(SXGBE_DMA_ENA_INT,
126 ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum));
127 }
128
129 static void sxgbe_disable_dma_irq(void __iomem *ioaddr, int dma_cnum)
130 {
131 /* Disable TX/RX interrupts */
132 writel(0, ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum));
133 }
134
135 static void sxgbe_dma_start_tx(void __iomem *ioaddr, int tchannels)
136 {
137 int cnum;
138 u32 tx_ctl_reg;
139
140 for (cnum = 0; cnum < tchannels; cnum++) {
141 tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
142 tx_ctl_reg |= SXGBE_TX_ENABLE;
143 writel(tx_ctl_reg,
144 ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
145 }
146 }
147
148 static void sxgbe_dma_start_tx_queue(void __iomem *ioaddr, int dma_cnum)
149 {
150 u32 tx_ctl_reg;
151
152 tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
153 tx_ctl_reg |= SXGBE_TX_ENABLE;
154 writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
155 }
156
157 static void sxgbe_dma_stop_tx_queue(void __iomem *ioaddr, int dma_cnum)
158 {
159 u32 tx_ctl_reg;
160
161 tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
162 tx_ctl_reg &= ~(SXGBE_TX_ENABLE);
163 writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
164 }
165
166 static void sxgbe_dma_stop_tx(void __iomem *ioaddr, int tchannels)
167 {
168 int cnum;
169 u32 tx_ctl_reg;
170
171 for (cnum = 0; cnum < tchannels; cnum++) {
172 tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
173 tx_ctl_reg &= ~(SXGBE_TX_ENABLE);
174 writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
175 }
176 }
177
178 static void sxgbe_dma_start_rx(void __iomem *ioaddr, int rchannels)
179 {
180 int cnum;
181 u32 rx_ctl_reg;
182
183 for (cnum = 0; cnum < rchannels; cnum++) {
184 rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
185 rx_ctl_reg |= SXGBE_RX_ENABLE;
186 writel(rx_ctl_reg,
187 ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
188 }
189 }
190
191 static void sxgbe_dma_stop_rx(void __iomem *ioaddr, int rchannels)
192 {
193 int cnum;
194 u32 rx_ctl_reg;
195
196 for (cnum = 0; cnum < rchannels; cnum++) {
197 rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
198 rx_ctl_reg &= ~(SXGBE_RX_ENABLE);
199 writel(rx_ctl_reg, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
200 }
201 }
202
203 static int sxgbe_tx_dma_int_status(void __iomem *ioaddr, int channel_no,
204 struct sxgbe_extra_stats *x)
205 {
206 u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
207 u32 clear_val = 0;
208 u32 ret_val = 0;
209
210 /* TX Normal Interrupt Summary */
211 if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) {
212 x->normal_irq_n++;
213 if (int_status & SXGBE_DMA_INT_STATUS_TI) {
214 ret_val |= handle_tx;
215 x->tx_normal_irq_n++;
216 clear_val |= SXGBE_DMA_INT_STATUS_TI;
217 }
218
219 if (int_status & SXGBE_DMA_INT_STATUS_TBU) {
220 x->tx_underflow_irq++;
221 ret_val |= tx_bump_tc;
222 clear_val |= SXGBE_DMA_INT_STATUS_TBU;
223 }
224 } else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) {
225 /* TX Abnormal Interrupt Summary */
226 if (int_status & SXGBE_DMA_INT_STATUS_TPS) {
227 ret_val |= tx_hard_error;
228 clear_val |= SXGBE_DMA_INT_STATUS_TPS;
229 x->tx_process_stopped_irq++;
230 }
231
232 if (int_status & SXGBE_DMA_INT_STATUS_FBE) {
233 ret_val |= tx_hard_error;
234 x->fatal_bus_error_irq++;
235
236 /* Assumption: FBE bit is the combination of
237 * all the bus access erros and cleared when
238 * the respective error bits cleared
239 */
240
241 /* check for actual cause */
242 if (int_status & SXGBE_DMA_INT_STATUS_TEB0) {
243 x->tx_read_transfer_err++;
244 clear_val |= SXGBE_DMA_INT_STATUS_TEB0;
245 } else {
246 x->tx_write_transfer_err++;
247 }
248
249 if (int_status & SXGBE_DMA_INT_STATUS_TEB1) {
250 x->tx_desc_access_err++;
251 clear_val |= SXGBE_DMA_INT_STATUS_TEB1;
252 } else {
253 x->tx_buffer_access_err++;
254 }
255
256 if (int_status & SXGBE_DMA_INT_STATUS_TEB2) {
257 x->tx_data_transfer_err++;
258 clear_val |= SXGBE_DMA_INT_STATUS_TEB2;
259 }
260 }
261
262 /* context descriptor error */
263 if (int_status & SXGBE_DMA_INT_STATUS_CTXTERR) {
264 x->tx_ctxt_desc_err++;
265 clear_val |= SXGBE_DMA_INT_STATUS_CTXTERR;
266 }
267 }
268
269 /* clear the served bits */
270 writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
271
272 return ret_val;
273 }
274
275 static int sxgbe_rx_dma_int_status(void __iomem *ioaddr, int channel_no,
276 struct sxgbe_extra_stats *x)
277 {
278 u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
279 u32 clear_val = 0;
280 u32 ret_val = 0;
281
282 /* RX Normal Interrupt Summary */
283 if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) {
284 x->normal_irq_n++;
285 if (int_status & SXGBE_DMA_INT_STATUS_RI) {
286 ret_val |= handle_rx;
287 x->rx_normal_irq_n++;
288 clear_val |= SXGBE_DMA_INT_STATUS_RI;
289 }
290 } else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) {
291 /* RX Abnormal Interrupt Summary */
292 if (int_status & SXGBE_DMA_INT_STATUS_RBU) {
293 ret_val |= rx_bump_tc;
294 clear_val |= SXGBE_DMA_INT_STATUS_RBU;
295 x->rx_underflow_irq++;
296 }
297
298 if (int_status & SXGBE_DMA_INT_STATUS_RPS) {
299 ret_val |= rx_hard_error;
300 clear_val |= SXGBE_DMA_INT_STATUS_RPS;
301 x->rx_process_stopped_irq++;
302 }
303
304 if (int_status & SXGBE_DMA_INT_STATUS_FBE) {
305 ret_val |= rx_hard_error;
306 x->fatal_bus_error_irq++;
307
308 /* Assumption: FBE bit is the combination of
309 * all the bus access erros and cleared when
310 * the respective error bits cleared
311 */
312
313 /* check for actual cause */
314 if (int_status & SXGBE_DMA_INT_STATUS_REB0) {
315 x->rx_read_transfer_err++;
316 clear_val |= SXGBE_DMA_INT_STATUS_REB0;
317 } else {
318 x->rx_write_transfer_err++;
319 }
320
321 if (int_status & SXGBE_DMA_INT_STATUS_REB1) {
322 x->rx_desc_access_err++;
323 clear_val |= SXGBE_DMA_INT_STATUS_REB1;
324 } else {
325 x->rx_buffer_access_err++;
326 }
327
328 if (int_status & SXGBE_DMA_INT_STATUS_REB2) {
329 x->rx_data_transfer_err++;
330 clear_val |= SXGBE_DMA_INT_STATUS_REB2;
331 }
332 }
333 }
334
335 /* clear the served bits */
336 writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
337
338 return ret_val;
339 }
340
341 /* Program the HW RX Watchdog */
342 static void sxgbe_dma_rx_watchdog(void __iomem *ioaddr, u32 riwt)
343 {
344 u32 que_num;
345
346 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, que_num) {
347 writel(riwt,
348 ioaddr + SXGBE_DMA_CHA_INT_RXWATCHTMR_REG(que_num));
349 }
350 }
351
352 static const struct sxgbe_dma_ops sxgbe_dma_ops = {
353 .init = sxgbe_dma_init,
354 .cha_init = sxgbe_dma_channel_init,
355 .enable_dma_transmission = sxgbe_enable_dma_transmission,
356 .enable_dma_irq = sxgbe_enable_dma_irq,
357 .disable_dma_irq = sxgbe_disable_dma_irq,
358 .start_tx = sxgbe_dma_start_tx,
359 .start_tx_queue = sxgbe_dma_start_tx_queue,
360 .stop_tx = sxgbe_dma_stop_tx,
361 .stop_tx_queue = sxgbe_dma_stop_tx_queue,
362 .start_rx = sxgbe_dma_start_rx,
363 .stop_rx = sxgbe_dma_stop_rx,
364 .tx_dma_int_status = sxgbe_tx_dma_int_status,
365 .rx_dma_int_status = sxgbe_rx_dma_int_status,
366 .rx_watchdog = sxgbe_dma_rx_watchdog,
367 };
368
369 const struct sxgbe_dma_ops *sxgbe_get_dma_ops(void)
370 {
371 return &sxgbe_dma_ops;
372 }
This page took 0.050502 seconds and 5 git commands to generate.