1 /* 10G controller driver for Samsung SoCs
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/delay.h>
14 #include <linux/export.h>
16 #include <linux/netdevice.h>
17 #include <linux/phy.h>
19 #include "sxgbe_common.h"
20 #include "sxgbe_dma.h"
21 #include "sxgbe_reg.h"
22 #include "sxgbe_desc.h"
24 /* DMA core initialization */
25 static int sxgbe_dma_init(void __iomem
*ioaddr
, int fix_burst
, int burst_map
)
31 writel(SXGBE_DMA_SOFT_RESET
, ioaddr
+ SXGBE_DMA_MODE_REG
);
32 while (retry_count
--) {
33 if (!(readl(ioaddr
+ SXGBE_DMA_MODE_REG
) &
34 SXGBE_DMA_SOFT_RESET
))
42 reg_val
= readl(ioaddr
+ SXGBE_DMA_SYSBUS_MODE_REG
);
44 /* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register.
45 * if fix_burst = 1, Set UNDEF = 0 of DMA_Sys_Mode Register.
46 * burst_map is bitmap for BLEN[4, 8, 16, 32, 64, 128 and 256].
47 * Set burst_map irrespective of fix_burst value.
50 reg_val
|= SXGBE_DMA_AXI_UNDEF_BURST
;
52 /* write burst len map */
53 reg_val
|= (burst_map
<< SXGBE_DMA_BLENMAP_LSHIFT
);
55 writel(reg_val
, ioaddr
+ SXGBE_DMA_SYSBUS_MODE_REG
);
60 static void sxgbe_dma_channel_init(void __iomem
*ioaddr
, int cha_num
,
61 int fix_burst
, int pbl
, dma_addr_t dma_tx
,
62 dma_addr_t dma_rx
, int t_rsize
, int r_rsize
)
67 reg_val
= readl(ioaddr
+ SXGBE_DMA_CHA_CTL_REG(cha_num
));
70 reg_val
|= SXGBE_DMA_PBL_X8MODE
;
71 writel(reg_val
, ioaddr
+ SXGBE_DMA_CHA_CTL_REG(cha_num
));
72 /* program the TX pbl */
73 reg_val
= readl(ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(cha_num
));
74 reg_val
|= (pbl
<< SXGBE_DMA_TXPBL_LSHIFT
);
75 writel(reg_val
, ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(cha_num
));
76 /* program the RX pbl */
77 reg_val
= readl(ioaddr
+ SXGBE_DMA_CHA_RXCTL_REG(cha_num
));
78 reg_val
|= (pbl
<< SXGBE_DMA_RXPBL_LSHIFT
);
79 writel(reg_val
, ioaddr
+ SXGBE_DMA_CHA_RXCTL_REG(cha_num
));
82 /* program desc registers */
83 writel(upper_32_bits(dma_tx
),
84 ioaddr
+ SXGBE_DMA_CHA_TXDESC_HADD_REG(cha_num
));
85 writel(lower_32_bits(dma_tx
),
86 ioaddr
+ SXGBE_DMA_CHA_TXDESC_LADD_REG(cha_num
));
88 writel(upper_32_bits(dma_rx
),
89 ioaddr
+ SXGBE_DMA_CHA_RXDESC_HADD_REG(cha_num
));
90 writel(lower_32_bits(dma_rx
),
91 ioaddr
+ SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num
));
93 /* program tail pointers */
94 /* assumption: upper 32 bits are constant and
95 * same as TX/RX desc list
97 dma_addr
= dma_tx
+ ((t_rsize
- 1) * SXGBE_DESC_SIZE_BYTES
);
98 writel(lower_32_bits(dma_addr
),
99 ioaddr
+ SXGBE_DMA_CHA_TXDESC_TAILPTR_REG(cha_num
));
101 dma_addr
= dma_rx
+ ((r_rsize
- 1) * SXGBE_DESC_SIZE_BYTES
);
102 writel(lower_32_bits(dma_addr
),
103 ioaddr
+ SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num
));
104 /* program the ring sizes */
105 writel(t_rsize
- 1, ioaddr
+ SXGBE_DMA_CHA_TXDESC_RINGLEN_REG(cha_num
));
106 writel(r_rsize
- 1, ioaddr
+ SXGBE_DMA_CHA_RXDESC_RINGLEN_REG(cha_num
));
108 /* Enable TX/RX interrupts */
109 writel(SXGBE_DMA_ENA_INT
,
110 ioaddr
+ SXGBE_DMA_CHA_INT_ENABLE_REG(cha_num
));
113 static void sxgbe_enable_dma_transmission(void __iomem
*ioaddr
, int cha_num
)
117 tx_config
= readl(ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(cha_num
));
118 tx_config
|= SXGBE_TX_START_DMA
;
119 writel(tx_config
, ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(cha_num
));
122 static void sxgbe_enable_dma_irq(void __iomem
*ioaddr
, int dma_cnum
)
124 /* Enable TX/RX interrupts */
125 writel(SXGBE_DMA_ENA_INT
,
126 ioaddr
+ SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum
));
129 static void sxgbe_disable_dma_irq(void __iomem
*ioaddr
, int dma_cnum
)
131 /* Disable TX/RX interrupts */
132 writel(0, ioaddr
+ SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum
));
135 static void sxgbe_dma_start_tx(void __iomem
*ioaddr
, int tchannels
)
140 for (cnum
= 0; cnum
< tchannels
; cnum
++) {
141 tx_ctl_reg
= readl(ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(cnum
));
142 tx_ctl_reg
|= SXGBE_TX_ENABLE
;
144 ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(cnum
));
148 static void sxgbe_dma_start_tx_queue(void __iomem
*ioaddr
, int dma_cnum
)
152 tx_ctl_reg
= readl(ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(dma_cnum
));
153 tx_ctl_reg
|= SXGBE_TX_ENABLE
;
154 writel(tx_ctl_reg
, ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(dma_cnum
));
157 static void sxgbe_dma_stop_tx_queue(void __iomem
*ioaddr
, int dma_cnum
)
161 tx_ctl_reg
= readl(ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(dma_cnum
));
162 tx_ctl_reg
&= ~(SXGBE_TX_ENABLE
);
163 writel(tx_ctl_reg
, ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(dma_cnum
));
166 static void sxgbe_dma_stop_tx(void __iomem
*ioaddr
, int tchannels
)
171 for (cnum
= 0; cnum
< tchannels
; cnum
++) {
172 tx_ctl_reg
= readl(ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(cnum
));
173 tx_ctl_reg
&= ~(SXGBE_TX_ENABLE
);
174 writel(tx_ctl_reg
, ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(cnum
));
178 static void sxgbe_dma_start_rx(void __iomem
*ioaddr
, int rchannels
)
183 for (cnum
= 0; cnum
< rchannels
; cnum
++) {
184 rx_ctl_reg
= readl(ioaddr
+ SXGBE_DMA_CHA_RXCTL_REG(cnum
));
185 rx_ctl_reg
|= SXGBE_RX_ENABLE
;
187 ioaddr
+ SXGBE_DMA_CHA_RXCTL_REG(cnum
));
191 static void sxgbe_dma_stop_rx(void __iomem
*ioaddr
, int rchannels
)
196 for (cnum
= 0; cnum
< rchannels
; cnum
++) {
197 rx_ctl_reg
= readl(ioaddr
+ SXGBE_DMA_CHA_RXCTL_REG(cnum
));
198 rx_ctl_reg
&= ~(SXGBE_RX_ENABLE
);
199 writel(rx_ctl_reg
, ioaddr
+ SXGBE_DMA_CHA_RXCTL_REG(cnum
));
203 static int sxgbe_tx_dma_int_status(void __iomem
*ioaddr
, int channel_no
,
204 struct sxgbe_extra_stats
*x
)
206 u32 int_status
= readl(ioaddr
+ SXGBE_DMA_CHA_STATUS_REG(channel_no
));
210 /* TX Normal Interrupt Summary */
211 if (likely(int_status
& SXGBE_DMA_INT_STATUS_NIS
)) {
213 if (int_status
& SXGBE_DMA_INT_STATUS_TI
) {
214 ret_val
|= handle_tx
;
215 x
->tx_normal_irq_n
++;
216 clear_val
|= SXGBE_DMA_INT_STATUS_TI
;
219 if (int_status
& SXGBE_DMA_INT_STATUS_TBU
) {
220 x
->tx_underflow_irq
++;
221 ret_val
|= tx_bump_tc
;
222 clear_val
|= SXGBE_DMA_INT_STATUS_TBU
;
224 } else if (unlikely(int_status
& SXGBE_DMA_INT_STATUS_AIS
)) {
225 /* TX Abnormal Interrupt Summary */
226 if (int_status
& SXGBE_DMA_INT_STATUS_TPS
) {
227 ret_val
|= tx_hard_error
;
228 clear_val
|= SXGBE_DMA_INT_STATUS_TPS
;
229 x
->tx_process_stopped_irq
++;
232 if (int_status
& SXGBE_DMA_INT_STATUS_FBE
) {
233 ret_val
|= tx_hard_error
;
234 x
->fatal_bus_error_irq
++;
236 /* Assumption: FBE bit is the combination of
237 * all the bus access erros and cleared when
238 * the respective error bits cleared
241 /* check for actual cause */
242 if (int_status
& SXGBE_DMA_INT_STATUS_TEB0
) {
243 x
->tx_read_transfer_err
++;
244 clear_val
|= SXGBE_DMA_INT_STATUS_TEB0
;
246 x
->tx_write_transfer_err
++;
249 if (int_status
& SXGBE_DMA_INT_STATUS_TEB1
) {
250 x
->tx_desc_access_err
++;
251 clear_val
|= SXGBE_DMA_INT_STATUS_TEB1
;
253 x
->tx_buffer_access_err
++;
256 if (int_status
& SXGBE_DMA_INT_STATUS_TEB2
) {
257 x
->tx_data_transfer_err
++;
258 clear_val
|= SXGBE_DMA_INT_STATUS_TEB2
;
262 /* context descriptor error */
263 if (int_status
& SXGBE_DMA_INT_STATUS_CTXTERR
) {
264 x
->tx_ctxt_desc_err
++;
265 clear_val
|= SXGBE_DMA_INT_STATUS_CTXTERR
;
269 /* clear the served bits */
270 writel(clear_val
, ioaddr
+ SXGBE_DMA_CHA_STATUS_REG(channel_no
));
275 static int sxgbe_rx_dma_int_status(void __iomem
*ioaddr
, int channel_no
,
276 struct sxgbe_extra_stats
*x
)
278 u32 int_status
= readl(ioaddr
+ SXGBE_DMA_CHA_STATUS_REG(channel_no
));
282 /* RX Normal Interrupt Summary */
283 if (likely(int_status
& SXGBE_DMA_INT_STATUS_NIS
)) {
285 if (int_status
& SXGBE_DMA_INT_STATUS_RI
) {
286 ret_val
|= handle_rx
;
287 x
->rx_normal_irq_n
++;
288 clear_val
|= SXGBE_DMA_INT_STATUS_RI
;
290 } else if (unlikely(int_status
& SXGBE_DMA_INT_STATUS_AIS
)) {
291 /* RX Abnormal Interrupt Summary */
292 if (int_status
& SXGBE_DMA_INT_STATUS_RBU
) {
293 ret_val
|= rx_bump_tc
;
294 clear_val
|= SXGBE_DMA_INT_STATUS_RBU
;
295 x
->rx_underflow_irq
++;
298 if (int_status
& SXGBE_DMA_INT_STATUS_RPS
) {
299 ret_val
|= rx_hard_error
;
300 clear_val
|= SXGBE_DMA_INT_STATUS_RPS
;
301 x
->rx_process_stopped_irq
++;
304 if (int_status
& SXGBE_DMA_INT_STATUS_FBE
) {
305 ret_val
|= rx_hard_error
;
306 x
->fatal_bus_error_irq
++;
308 /* Assumption: FBE bit is the combination of
309 * all the bus access erros and cleared when
310 * the respective error bits cleared
313 /* check for actual cause */
314 if (int_status
& SXGBE_DMA_INT_STATUS_REB0
) {
315 x
->rx_read_transfer_err
++;
316 clear_val
|= SXGBE_DMA_INT_STATUS_REB0
;
318 x
->rx_write_transfer_err
++;
321 if (int_status
& SXGBE_DMA_INT_STATUS_REB1
) {
322 x
->rx_desc_access_err
++;
323 clear_val
|= SXGBE_DMA_INT_STATUS_REB1
;
325 x
->rx_buffer_access_err
++;
328 if (int_status
& SXGBE_DMA_INT_STATUS_REB2
) {
329 x
->rx_data_transfer_err
++;
330 clear_val
|= SXGBE_DMA_INT_STATUS_REB2
;
335 /* clear the served bits */
336 writel(clear_val
, ioaddr
+ SXGBE_DMA_CHA_STATUS_REG(channel_no
));
341 /* Program the HW RX Watchdog */
342 static void sxgbe_dma_rx_watchdog(void __iomem
*ioaddr
, u32 riwt
)
346 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES
, que_num
) {
348 ioaddr
+ SXGBE_DMA_CHA_INT_RXWATCHTMR_REG(que_num
));
352 static const struct sxgbe_dma_ops sxgbe_dma_ops
= {
353 .init
= sxgbe_dma_init
,
354 .cha_init
= sxgbe_dma_channel_init
,
355 .enable_dma_transmission
= sxgbe_enable_dma_transmission
,
356 .enable_dma_irq
= sxgbe_enable_dma_irq
,
357 .disable_dma_irq
= sxgbe_disable_dma_irq
,
358 .start_tx
= sxgbe_dma_start_tx
,
359 .start_tx_queue
= sxgbe_dma_start_tx_queue
,
360 .stop_tx
= sxgbe_dma_stop_tx
,
361 .stop_tx_queue
= sxgbe_dma_stop_tx_queue
,
362 .start_rx
= sxgbe_dma_start_rx
,
363 .stop_rx
= sxgbe_dma_stop_rx
,
364 .tx_dma_int_status
= sxgbe_tx_dma_int_status
,
365 .rx_dma_int_status
= sxgbe_rx_dma_int_status
,
366 .rx_watchdog
= sxgbe_dma_rx_watchdog
,
369 const struct sxgbe_dma_ops
*sxgbe_get_dma_ops(void)
371 return &sxgbe_dma_ops
;