2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
4 * Copyright (C) 2014 Marvell
6 * Marcin Wojtas <mw@semihalf.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/platform_device.h>
17 #include <linux/skbuff.h>
18 #include <linux/inetdevice.h>
19 #include <linux/mbus.h>
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/cpumask.h>
24 #include <linux/of_irq.h>
25 #include <linux/of_mdio.h>
26 #include <linux/of_net.h>
27 #include <linux/of_address.h>
28 #include <linux/phy.h>
29 #include <linux/clk.h>
30 #include <uapi/linux/ppp_defs.h>
34 /* RX Fifo Registers */
35 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
36 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
37 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
38 #define MVPP2_RX_FIFO_INIT_REG 0x64
40 /* RX DMA Top Registers */
41 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
42 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
43 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
44 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
45 #define MVPP2_POOL_BUF_SIZE_OFFSET 5
46 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
47 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
48 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
49 #define MVPP2_RXQ_POOL_SHORT_OFFS 20
50 #define MVPP2_RXQ_POOL_SHORT_MASK 0x700000
51 #define MVPP2_RXQ_POOL_LONG_OFFS 24
52 #define MVPP2_RXQ_POOL_LONG_MASK 0x7000000
53 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
54 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
55 #define MVPP2_RXQ_DISABLE_MASK BIT(31)
57 /* Parser Registers */
58 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
59 #define MVPP2_PRS_PORT_LU_MAX 0xf
60 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
61 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
62 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
63 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
64 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
65 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
66 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
67 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
68 #define MVPP2_PRS_TCAM_IDX_REG 0x1100
69 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
70 #define MVPP2_PRS_TCAM_INV_MASK BIT(31)
71 #define MVPP2_PRS_SRAM_IDX_REG 0x1200
72 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
73 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230
74 #define MVPP2_PRS_TCAM_EN_MASK BIT(0)
76 /* Classifier Registers */
77 #define MVPP2_CLS_MODE_REG 0x1800
78 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
79 #define MVPP2_CLS_PORT_WAY_REG 0x1810
80 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
81 #define MVPP2_CLS_LKP_INDEX_REG 0x1814
82 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
83 #define MVPP2_CLS_LKP_TBL_REG 0x1818
84 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
85 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
86 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820
87 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824
88 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828
89 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c
90 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
91 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
92 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
93 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
94 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
95 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
97 /* Descriptor Manager Top Registers */
98 #define MVPP2_RXQ_NUM_REG 0x2040
99 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044
100 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048
101 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
102 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
103 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
104 #define MVPP2_RXQ_NUM_NEW_OFFSET 16
105 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
106 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
107 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
108 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
109 #define MVPP2_RXQ_THRESH_REG 0x204c
110 #define MVPP2_OCCUPIED_THRESH_OFFSET 0
111 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
112 #define MVPP2_RXQ_INDEX_REG 0x2050
113 #define MVPP2_TXQ_NUM_REG 0x2080
114 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084
115 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088
116 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
117 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
118 #define MVPP2_TXQ_THRESH_REG 0x2094
119 #define MVPP2_TRANSMITTED_THRESH_OFFSET 16
120 #define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
121 #define MVPP2_TXQ_INDEX_REG 0x2098
122 #define MVPP2_TXQ_PREF_BUF_REG 0x209c
123 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
124 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
125 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
126 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
127 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
128 #define MVPP2_TXQ_PENDING_REG 0x20a0
129 #define MVPP2_TXQ_PENDING_MASK 0x3fff
130 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4
131 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
132 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16
133 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
134 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
135 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
136 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
137 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
138 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
139 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16
140 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
141 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
142 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
143 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
144 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
145 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
147 /* MBUS bridge registers */
148 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
149 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
150 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
151 #define MVPP2_BASE_ADDR_ENABLE 0x4060
153 /* Interrupt Cause and Mask registers */
154 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
155 #define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
156 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
157 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
158 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
159 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
160 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
161 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
162 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
163 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
164 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
165 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
166 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
167 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
168 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
169 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
170 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
171 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
172 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
173 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
175 /* Buffer Manager registers */
176 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
177 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
178 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
179 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0
180 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
181 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
182 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
183 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
184 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
185 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
186 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
187 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
188 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
189 #define MVPP2_BM_START_MASK BIT(0)
190 #define MVPP2_BM_STOP_MASK BIT(1)
191 #define MVPP2_BM_STATE_MASK BIT(4)
192 #define MVPP2_BM_LOW_THRESH_OFFS 8
193 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00
194 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
195 MVPP2_BM_LOW_THRESH_OFFS)
196 #define MVPP2_BM_HIGH_THRESH_OFFS 16
197 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
198 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
199 MVPP2_BM_HIGH_THRESH_OFFS)
200 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
201 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
202 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
203 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
204 #define MVPP2_BM_BPPE_FULL_MASK BIT(3)
205 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
206 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
207 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
208 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
209 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440
210 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
211 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
212 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
213 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
214 #define MVPP2_BM_VIRT_RLS_REG 0x64c0
215 #define MVPP2_BM_MC_RLS_REG 0x64c4
216 #define MVPP2_BM_MC_ID_MASK 0xfff
217 #define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
219 /* TX Scheduler registers */
220 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
221 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
222 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff
223 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8
224 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
225 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
226 #define MVPP2_TXP_SCHED_MTU_REG 0x801c
227 #define MVPP2_TXP_MTU_MAX 0x7FFFF
228 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020
229 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
230 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
231 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
232 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
233 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
234 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
235 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
236 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
237 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
238 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
239 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
240 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
241 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
243 /* TX general registers */
244 #define MVPP2_TX_SNOOP_REG 0x8800
245 #define MVPP2_TX_PORT_FLUSH_REG 0x8810
246 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
249 #define MVPP2_SRC_ADDR_MIDDLE 0x24
250 #define MVPP2_SRC_ADDR_HIGH 0x28
251 #define MVPP2_PHY_AN_CFG0_REG 0x34
252 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
253 #define MVPP2_MIB_COUNTERS_BASE(port) (0x1000 + ((port) >> 1) * \
254 0x400 + (port) * 0x400)
255 #define MVPP2_MIB_LATE_COLLISION 0x7c
256 #define MVPP2_ISR_SUM_MASK_REG 0x220c
257 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
258 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
260 /* Per-port registers */
261 #define MVPP2_GMAC_CTRL_0_REG 0x0
262 #define MVPP2_GMAC_PORT_EN_MASK BIT(0)
263 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
264 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
265 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
266 #define MVPP2_GMAC_CTRL_1_REG 0x4
267 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
268 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
269 #define MVPP2_GMAC_PCS_LB_EN_BIT 6
270 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
271 #define MVPP2_GMAC_SA_LOW_OFFS 7
272 #define MVPP2_GMAC_CTRL_2_REG 0x8
273 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
274 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
275 #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
276 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
277 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc
278 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
279 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
280 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
281 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
282 #define MVPP2_GMAC_AN_SPEED_EN BIT(7)
283 #define MVPP2_GMAC_FC_ADV_EN BIT(9)
284 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
285 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
286 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
287 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
288 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
289 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
290 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
292 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
294 /* Descriptor ring Macros */
295 #define MVPP2_QUEUE_NEXT_DESC(q, index) \
296 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
298 /* Various constants */
301 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15
302 #define MVPP2_RX_COAL_PKTS 32
303 #define MVPP2_RX_COAL_USEC 100
305 /* The two bytes Marvell header. Either contains a special value used
306 * by Marvell switches when a specific hardware mode is enabled (not
307 * supported by this driver) or is filled automatically by zeroes on
308 * the RX side. Those two bytes being at the front of the Ethernet
309 * header, they allow to have the IP header aligned on a 4 bytes
310 * boundary automatically: the hardware skips those two bytes on its
313 #define MVPP2_MH_SIZE 2
314 #define MVPP2_ETH_TYPE_LEN 2
315 #define MVPP2_PPPOE_HDR_SIZE 8
316 #define MVPP2_VLAN_TAG_LEN 4
318 /* Lbtd 802.3 type */
319 #define MVPP2_IP_LBDT_TYPE 0xfffa
321 #define MVPP2_CPU_D_CACHE_LINE_SIZE 32
322 #define MVPP2_TX_CSUM_MAX_SIZE 9800
324 /* Timeout constants */
325 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
326 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
328 #define MVPP2_TX_MTU_MAX 0x7ffff
330 /* Maximum number of T-CONTs of PON port */
331 #define MVPP2_MAX_TCONT 16
333 /* Maximum number of supported ports */
334 #define MVPP2_MAX_PORTS 4
336 /* Maximum number of TXQs used by single port */
337 #define MVPP2_MAX_TXQ 8
339 /* Maximum number of RXQs used by single port */
340 #define MVPP2_MAX_RXQ 8
342 /* Dfault number of RXQs in use */
343 #define MVPP2_DEFAULT_RXQ 4
345 /* Total number of RXQs available to all ports */
346 #define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
348 /* Max number of Rx descriptors */
349 #define MVPP2_MAX_RXD 128
351 /* Max number of Tx descriptors */
352 #define MVPP2_MAX_TXD 1024
354 /* Amount of Tx descriptors that can be reserved at once by CPU */
355 #define MVPP2_CPU_DESC_CHUNK 64
357 /* Max number of Tx descriptors in each aggregated queue */
358 #define MVPP2_AGGR_TXQ_SIZE 256
360 /* Descriptor aligned size */
361 #define MVPP2_DESC_ALIGNED_SIZE 32
363 /* Descriptor alignment mask */
364 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
366 /* RX FIFO constants */
367 #define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
368 #define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
369 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
371 /* RX buffer constants */
372 #define MVPP2_SKB_SHINFO_SIZE \
373 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
375 #define MVPP2_RX_PKT_SIZE(mtu) \
376 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
377 ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
379 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
380 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
381 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
382 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
384 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
386 /* IPv6 max L3 address size */
387 #define MVPP2_MAX_L3_ADDR_SIZE 16
390 #define MVPP2_F_LOOPBACK BIT(0)
392 /* Marvell tag types */
393 enum mvpp2_tag_type
{
394 MVPP2_TAG_TYPE_NONE
= 0,
395 MVPP2_TAG_TYPE_MH
= 1,
396 MVPP2_TAG_TYPE_DSA
= 2,
397 MVPP2_TAG_TYPE_EDSA
= 3,
398 MVPP2_TAG_TYPE_VLAN
= 4,
399 MVPP2_TAG_TYPE_LAST
= 5
402 /* Parser constants */
403 #define MVPP2_PRS_TCAM_SRAM_SIZE 256
404 #define MVPP2_PRS_TCAM_WORDS 6
405 #define MVPP2_PRS_SRAM_WORDS 4
406 #define MVPP2_PRS_FLOW_ID_SIZE 64
407 #define MVPP2_PRS_FLOW_ID_MASK 0x3f
408 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1
409 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
410 #define MVPP2_PRS_IPV4_HEAD 0x40
411 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
412 #define MVPP2_PRS_IPV4_MC 0xe0
413 #define MVPP2_PRS_IPV4_MC_MASK 0xf0
414 #define MVPP2_PRS_IPV4_BC_MASK 0xff
415 #define MVPP2_PRS_IPV4_IHL 0x5
416 #define MVPP2_PRS_IPV4_IHL_MASK 0xf
417 #define MVPP2_PRS_IPV6_MC 0xff
418 #define MVPP2_PRS_IPV6_MC_MASK 0xff
419 #define MVPP2_PRS_IPV6_HOP_MASK 0xff
420 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff
421 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
422 #define MVPP2_PRS_DBL_VLANS_MAX 100
425 * - lookup ID - 4 bits
427 * - additional information - 1 byte
428 * - header data - 8 bytes
429 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
431 #define MVPP2_PRS_AI_BITS 8
432 #define MVPP2_PRS_PORT_MASK 0xff
433 #define MVPP2_PRS_LU_MASK 0xf
434 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
435 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
436 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
437 (((offs) * 2) - ((offs) % 2) + 2)
438 #define MVPP2_PRS_TCAM_AI_BYTE 16
439 #define MVPP2_PRS_TCAM_PORT_BYTE 17
440 #define MVPP2_PRS_TCAM_LU_BYTE 20
441 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
442 #define MVPP2_PRS_TCAM_INV_WORD 5
443 /* Tcam entries ID */
444 #define MVPP2_PE_DROP_ALL 0
445 #define MVPP2_PE_FIRST_FREE_TID 1
446 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
447 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
448 #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
449 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
450 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
451 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
452 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
453 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
454 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
455 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
456 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
457 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
458 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
459 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
460 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
461 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
462 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
463 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
464 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
465 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
466 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
467 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
468 #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
469 #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
470 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
473 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
475 #define MVPP2_PRS_SRAM_RI_OFFS 0
476 #define MVPP2_PRS_SRAM_RI_WORD 0
477 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
478 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
479 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
480 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64
481 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
482 #define MVPP2_PRS_SRAM_UDF_OFFS 73
483 #define MVPP2_PRS_SRAM_UDF_BITS 8
484 #define MVPP2_PRS_SRAM_UDF_MASK 0xff
485 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
486 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
487 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
488 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
489 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
490 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
491 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
492 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
493 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
494 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
495 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
496 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
497 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
498 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
499 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
500 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
501 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
502 #define MVPP2_PRS_SRAM_AI_OFFS 90
503 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
504 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
505 #define MVPP2_PRS_SRAM_AI_MASK 0xff
506 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
507 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
508 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110
509 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111
511 /* Sram result info bits assignment */
512 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1
513 #define MVPP2_PRS_RI_DSA_MASK 0x2
514 #define MVPP2_PRS_RI_VLAN_MASK 0xc
515 #define MVPP2_PRS_RI_VLAN_NONE ~(BIT(2) | BIT(3))
516 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
517 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
518 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
519 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
520 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
521 #define MVPP2_PRS_RI_L2_CAST_MASK 0x600
522 #define MVPP2_PRS_RI_L2_UCAST ~(BIT(9) | BIT(10))
523 #define MVPP2_PRS_RI_L2_MCAST BIT(9)
524 #define MVPP2_PRS_RI_L2_BCAST BIT(10)
525 #define MVPP2_PRS_RI_PPPOE_MASK 0x800
526 #define MVPP2_PRS_RI_L3_PROTO_MASK 0x7000
527 #define MVPP2_PRS_RI_L3_UN ~(BIT(12) | BIT(13) | BIT(14))
528 #define MVPP2_PRS_RI_L3_IP4 BIT(12)
529 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
530 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
531 #define MVPP2_PRS_RI_L3_IP6 BIT(14)
532 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
533 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
534 #define MVPP2_PRS_RI_L3_ADDR_MASK 0x18000
535 #define MVPP2_PRS_RI_L3_UCAST ~(BIT(15) | BIT(16))
536 #define MVPP2_PRS_RI_L3_MCAST BIT(15)
537 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
538 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
539 #define MVPP2_PRS_RI_UDF3_MASK 0x300000
540 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
541 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
542 #define MVPP2_PRS_RI_L4_TCP BIT(22)
543 #define MVPP2_PRS_RI_L4_UDP BIT(23)
544 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
545 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000
546 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
547 #define MVPP2_PRS_RI_DROP_MASK 0x80000000
549 /* Sram additional info bits assignment */
550 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
551 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
552 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
553 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
554 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
555 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
556 #define MVPP2_PRS_SINGLE_VLAN_AI 0
557 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
560 #define MVPP2_PRS_TAGGED true
561 #define MVPP2_PRS_UNTAGGED false
562 #define MVPP2_PRS_EDSA true
563 #define MVPP2_PRS_DSA false
565 /* MAC entries, shadow udf */
567 MVPP2_PRS_UDF_MAC_DEF
,
568 MVPP2_PRS_UDF_MAC_RANGE
,
569 MVPP2_PRS_UDF_L2_DEF
,
570 MVPP2_PRS_UDF_L2_DEF_COPY
,
571 MVPP2_PRS_UDF_L2_USER
,
575 enum mvpp2_prs_lookup
{
589 enum mvpp2_prs_l3_cast
{
590 MVPP2_PRS_L3_UNI_CAST
,
591 MVPP2_PRS_L3_MULTI_CAST
,
592 MVPP2_PRS_L3_BROAD_CAST
595 /* Classifier constants */
596 #define MVPP2_CLS_FLOWS_TBL_SIZE 512
597 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
598 #define MVPP2_CLS_LKP_TBL_SIZE 64
601 #define MVPP2_BM_POOLS_NUM 8
602 #define MVPP2_BM_LONG_BUF_NUM 1024
603 #define MVPP2_BM_SHORT_BUF_NUM 2048
604 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
605 #define MVPP2_BM_POOL_PTR_ALIGN 128
606 #define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
607 #define MVPP2_BM_SWF_SHORT_POOL 3
609 /* BM cookie (32 bits) definition */
610 #define MVPP2_BM_COOKIE_POOL_OFFS 8
611 #define MVPP2_BM_COOKIE_CPU_OFFS 24
613 /* BM short pool packet size
614 * These value assure that for SWF the total number
615 * of bytes allocated for each buffer will be 512
617 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
627 /* Shared Packet Processor resources */
629 /* Shared registers' base addresses */
631 void __iomem
*lms_base
;
637 /* List of pointers to port structures */
638 struct mvpp2_port
**port_list
;
640 /* Aggregated TXQs */
641 struct mvpp2_tx_queue
*aggr_txqs
;
644 struct mvpp2_bm_pool
*bm_pools
;
646 /* PRS shadow table */
647 struct mvpp2_prs_shadow
*prs_shadow
;
648 /* PRS auxiliary table for double vlan entries control */
649 bool *prs_double_vlans
;
655 struct mvpp2_pcpu_stats
{
656 struct u64_stats_sync syncp
;
670 /* Per-port registers' base address */
673 struct mvpp2_rx_queue
**rxqs
;
674 struct mvpp2_tx_queue
**txqs
;
675 struct net_device
*dev
;
679 u32 pending_cause_rx
;
680 struct napi_struct napi
;
687 struct mvpp2_pcpu_stats __percpu
*stats
;
689 struct phy_device
*phy_dev
;
690 phy_interface_t phy_interface
;
691 struct device_node
*phy_node
;
696 struct mvpp2_bm_pool
*pool_long
;
697 struct mvpp2_bm_pool
*pool_short
;
699 /* Index of first port's physical RXQ */
703 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
704 * layout of the transmit and reception DMA descriptors, and their
705 * layout is therefore defined by the hardware design
708 #define MVPP2_TXD_L3_OFF_SHIFT 0
709 #define MVPP2_TXD_IP_HLEN_SHIFT 8
710 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
711 #define MVPP2_TXD_L4_CSUM_NOT BIT(14)
712 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
713 #define MVPP2_TXD_PADDING_DISABLE BIT(23)
714 #define MVPP2_TXD_L4_UDP BIT(24)
715 #define MVPP2_TXD_L3_IP6 BIT(26)
716 #define MVPP2_TXD_L_DESC BIT(28)
717 #define MVPP2_TXD_F_DESC BIT(29)
719 #define MVPP2_RXD_ERR_SUMMARY BIT(15)
720 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
721 #define MVPP2_RXD_ERR_CRC 0x0
722 #define MVPP2_RXD_ERR_OVERRUN BIT(13)
723 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
724 #define MVPP2_RXD_BM_POOL_ID_OFFS 16
725 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
726 #define MVPP2_RXD_HWF_SYNC BIT(21)
727 #define MVPP2_RXD_L4_CSUM_OK BIT(22)
728 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
729 #define MVPP2_RXD_L4_TCP BIT(25)
730 #define MVPP2_RXD_L4_UDP BIT(26)
731 #define MVPP2_RXD_L3_IP4 BIT(28)
732 #define MVPP2_RXD_L3_IP6 BIT(30)
733 #define MVPP2_RXD_BUF_HDR BIT(31)
735 struct mvpp2_tx_desc
{
736 u32 command
; /* Options used by HW for packet transmitting.*/
737 u8 packet_offset
; /* the offset from the buffer beginning */
738 u8 phys_txq
; /* destination queue ID */
739 u16 data_size
; /* data size of transmitted packet in bytes */
740 u32 buf_phys_addr
; /* physical addr of transmitted buffer */
741 u32 buf_cookie
; /* cookie for access to TX buffer in tx path */
742 u32 reserved1
[3]; /* hw_cmd (for future use, BM, PON, PNC) */
743 u32 reserved2
; /* reserved (for future use) */
746 struct mvpp2_rx_desc
{
747 u32 status
; /* info about received packet */
748 u16 reserved1
; /* parser_info (for future use, PnC) */
749 u16 data_size
; /* size of received packet in bytes */
750 u32 buf_phys_addr
; /* physical address of the buffer */
751 u32 buf_cookie
; /* cookie for access to RX buffer in rx path */
752 u16 reserved2
; /* gem_port_id (for future use, PON) */
753 u16 reserved3
; /* csum_l4 (for future use, PnC) */
754 u8 reserved4
; /* bm_qset (for future use, BM) */
756 u16 reserved6
; /* classify_info (for future use, PnC) */
757 u32 reserved7
; /* flow_id (for future use, PnC) */
761 /* Per-CPU Tx queue control */
762 struct mvpp2_txq_pcpu
{
765 /* Number of Tx DMA descriptors in the descriptor ring */
768 /* Number of currently used Tx DMA descriptor in the
773 /* Number of Tx DMA descriptors reserved for each CPU */
776 /* Array of transmitted skb */
777 struct sk_buff
**tx_skb
;
779 /* Index of last TX DMA descriptor that was inserted */
782 /* Index of the TX DMA descriptor to be cleaned up */
786 struct mvpp2_tx_queue
{
787 /* Physical number of this Tx queue */
790 /* Logical number of this Tx queue */
793 /* Number of Tx DMA descriptors in the descriptor ring */
796 /* Number of currently used Tx DMA descriptor in the descriptor ring */
799 /* Per-CPU control of physical Tx queues */
800 struct mvpp2_txq_pcpu __percpu
*pcpu
;
802 /* Array of transmitted skb */
803 struct sk_buff
**tx_skb
;
807 /* Virtual address of thex Tx DMA descriptors array */
808 struct mvpp2_tx_desc
*descs
;
810 /* DMA address of the Tx DMA descriptors array */
811 dma_addr_t descs_phys
;
813 /* Index of the last Tx DMA descriptor */
816 /* Index of the next Tx DMA descriptor to process */
817 int next_desc_to_proc
;
820 struct mvpp2_rx_queue
{
821 /* RX queue number, in the range 0-31 for physical RXQs */
824 /* Num of rx descriptors in the rx descriptor ring */
830 /* Virtual address of the RX DMA descriptors array */
831 struct mvpp2_rx_desc
*descs
;
833 /* DMA address of the RX DMA descriptors array */
834 dma_addr_t descs_phys
;
836 /* Index of the last RX DMA descriptor */
839 /* Index of the next RX DMA descriptor to process */
840 int next_desc_to_proc
;
842 /* ID of port to which physical RXQ is mapped */
845 /* Port's logic RXQ number to which physical RXQ is mapped */
849 union mvpp2_prs_tcam_entry
{
850 u32 word
[MVPP2_PRS_TCAM_WORDS
];
851 u8 byte
[MVPP2_PRS_TCAM_WORDS
* 4];
854 union mvpp2_prs_sram_entry
{
855 u32 word
[MVPP2_PRS_SRAM_WORDS
];
856 u8 byte
[MVPP2_PRS_SRAM_WORDS
* 4];
859 struct mvpp2_prs_entry
{
861 union mvpp2_prs_tcam_entry tcam
;
862 union mvpp2_prs_sram_entry sram
;
865 struct mvpp2_prs_shadow
{
872 /* User defined offset */
880 struct mvpp2_cls_flow_entry
{
882 u32 data
[MVPP2_CLS_FLOWS_TBL_DATA_WORDS
];
885 struct mvpp2_cls_lookup_entry
{
891 struct mvpp2_bm_pool
{
892 /* Pool number in the range 0-7 */
894 enum mvpp2_bm_type type
;
896 /* Buffer Pointers Pool External (BPPE) size */
898 /* Number of buffers for this pool */
900 /* Pool buffer size */
905 /* BPPE virtual base address */
907 /* BPPE physical base address */
908 dma_addr_t phys_addr
;
910 /* Ports using BM pool */
913 /* Occupied buffers indicator */
920 struct mvpp2_buff_hdr
{
921 u32 next_buff_phys_addr
;
922 u32 next_buff_virt_addr
;
925 u8 reserved1
; /* bm_qset (for future use, BM) */
928 /* Buffer header info bits */
929 #define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
930 #define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
931 #define MVPP2_B_HDR_INFO_LAST_OFFS 12
932 #define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
933 #define MVPP2_B_HDR_INFO_IS_LAST(info) \
934 ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
936 /* Static declaractions */
938 /* Number of RXQs used by single port */
939 static int rxq_number
= MVPP2_DEFAULT_RXQ
;
940 /* Number of TXQs used by single port */
941 static int txq_number
= MVPP2_MAX_TXQ
;
943 #define MVPP2_DRIVER_NAME "mvpp2"
944 #define MVPP2_DRIVER_VERSION "1.0"
946 /* Utility/helper methods */
948 static void mvpp2_write(struct mvpp2
*priv
, u32 offset
, u32 data
)
950 writel(data
, priv
->base
+ offset
);
953 static u32
mvpp2_read(struct mvpp2
*priv
, u32 offset
)
955 return readl(priv
->base
+ offset
);
958 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu
*txq_pcpu
)
960 txq_pcpu
->txq_get_index
++;
961 if (txq_pcpu
->txq_get_index
== txq_pcpu
->size
)
962 txq_pcpu
->txq_get_index
= 0;
965 static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu
*txq_pcpu
,
968 txq_pcpu
->tx_skb
[txq_pcpu
->txq_put_index
] = skb
;
969 txq_pcpu
->txq_put_index
++;
970 if (txq_pcpu
->txq_put_index
== txq_pcpu
->size
)
971 txq_pcpu
->txq_put_index
= 0;
974 /* Get number of physical egress port */
975 static inline int mvpp2_egress_port(struct mvpp2_port
*port
)
977 return MVPP2_MAX_TCONT
+ port
->id
;
980 /* Get number of physical TXQ */
981 static inline int mvpp2_txq_phys(int port
, int txq
)
983 return (MVPP2_MAX_TCONT
+ port
) * MVPP2_MAX_TXQ
+ txq
;
986 /* Parser configuration routines */
988 /* Update parser tcam and sram hw entries */
989 static int mvpp2_prs_hw_write(struct mvpp2
*priv
, struct mvpp2_prs_entry
*pe
)
993 if (pe
->index
> MVPP2_PRS_TCAM_SRAM_SIZE
- 1)
996 /* Clear entry invalidation bit */
997 pe
->tcam
.word
[MVPP2_PRS_TCAM_INV_WORD
] &= ~MVPP2_PRS_TCAM_INV_MASK
;
999 /* Write tcam index - indirect access */
1000 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, pe
->index
);
1001 for (i
= 0; i
< MVPP2_PRS_TCAM_WORDS
; i
++)
1002 mvpp2_write(priv
, MVPP2_PRS_TCAM_DATA_REG(i
), pe
->tcam
.word
[i
]);
1004 /* Write sram index - indirect access */
1005 mvpp2_write(priv
, MVPP2_PRS_SRAM_IDX_REG
, pe
->index
);
1006 for (i
= 0; i
< MVPP2_PRS_SRAM_WORDS
; i
++)
1007 mvpp2_write(priv
, MVPP2_PRS_SRAM_DATA_REG(i
), pe
->sram
.word
[i
]);
1012 /* Read tcam entry from hw */
1013 static int mvpp2_prs_hw_read(struct mvpp2
*priv
, struct mvpp2_prs_entry
*pe
)
1017 if (pe
->index
> MVPP2_PRS_TCAM_SRAM_SIZE
- 1)
1020 /* Write tcam index - indirect access */
1021 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, pe
->index
);
1023 pe
->tcam
.word
[MVPP2_PRS_TCAM_INV_WORD
] = mvpp2_read(priv
,
1024 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD
));
1025 if (pe
->tcam
.word
[MVPP2_PRS_TCAM_INV_WORD
] & MVPP2_PRS_TCAM_INV_MASK
)
1026 return MVPP2_PRS_TCAM_ENTRY_INVALID
;
1028 for (i
= 0; i
< MVPP2_PRS_TCAM_WORDS
; i
++)
1029 pe
->tcam
.word
[i
] = mvpp2_read(priv
, MVPP2_PRS_TCAM_DATA_REG(i
));
1031 /* Write sram index - indirect access */
1032 mvpp2_write(priv
, MVPP2_PRS_SRAM_IDX_REG
, pe
->index
);
1033 for (i
= 0; i
< MVPP2_PRS_SRAM_WORDS
; i
++)
1034 pe
->sram
.word
[i
] = mvpp2_read(priv
, MVPP2_PRS_SRAM_DATA_REG(i
));
1039 /* Invalidate tcam hw entry */
1040 static void mvpp2_prs_hw_inv(struct mvpp2
*priv
, int index
)
1042 /* Write index - indirect access */
1043 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, index
);
1044 mvpp2_write(priv
, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD
),
1045 MVPP2_PRS_TCAM_INV_MASK
);
1048 /* Enable shadow table entry and set its lookup ID */
1049 static void mvpp2_prs_shadow_set(struct mvpp2
*priv
, int index
, int lu
)
1051 priv
->prs_shadow
[index
].valid
= true;
1052 priv
->prs_shadow
[index
].lu
= lu
;
1055 /* Update ri fields in shadow table entry */
1056 static void mvpp2_prs_shadow_ri_set(struct mvpp2
*priv
, int index
,
1057 unsigned int ri
, unsigned int ri_mask
)
1059 priv
->prs_shadow
[index
].ri_mask
= ri_mask
;
1060 priv
->prs_shadow
[index
].ri
= ri
;
1063 /* Update lookup field in tcam sw entry */
1064 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry
*pe
, unsigned int lu
)
1066 int enable_off
= MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE
);
1068 pe
->tcam
.byte
[MVPP2_PRS_TCAM_LU_BYTE
] = lu
;
1069 pe
->tcam
.byte
[enable_off
] = MVPP2_PRS_LU_MASK
;
1072 /* Update mask for single port in tcam sw entry */
1073 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry
*pe
,
1074 unsigned int port
, bool add
)
1076 int enable_off
= MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE
);
1079 pe
->tcam
.byte
[enable_off
] &= ~(1 << port
);
1081 pe
->tcam
.byte
[enable_off
] |= 1 << port
;
1084 /* Update port map in tcam sw entry */
1085 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry
*pe
,
1088 unsigned char port_mask
= MVPP2_PRS_PORT_MASK
;
1089 int enable_off
= MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE
);
1091 pe
->tcam
.byte
[MVPP2_PRS_TCAM_PORT_BYTE
] = 0;
1092 pe
->tcam
.byte
[enable_off
] &= ~port_mask
;
1093 pe
->tcam
.byte
[enable_off
] |= ~ports
& MVPP2_PRS_PORT_MASK
;
1096 /* Obtain port map from tcam sw entry */
1097 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry
*pe
)
1099 int enable_off
= MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE
);
1101 return ~(pe
->tcam
.byte
[enable_off
]) & MVPP2_PRS_PORT_MASK
;
1104 /* Set byte of data and its enable bits in tcam sw entry */
1105 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry
*pe
,
1106 unsigned int offs
, unsigned char byte
,
1107 unsigned char enable
)
1109 pe
->tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE(offs
)] = byte
;
1110 pe
->tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs
)] = enable
;
1113 /* Get byte of data and its enable bits from tcam sw entry */
1114 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry
*pe
,
1115 unsigned int offs
, unsigned char *byte
,
1116 unsigned char *enable
)
1118 *byte
= pe
->tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE(offs
)];
1119 *enable
= pe
->tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs
)];
1122 /* Compare tcam data bytes with a pattern */
1123 static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry
*pe
, int offs
,
1126 int off
= MVPP2_PRS_TCAM_DATA_BYTE(offs
);
1129 tcam_data
= (8 << pe
->tcam
.byte
[off
+ 1]) | pe
->tcam
.byte
[off
];
1130 if (tcam_data
!= data
)
1135 /* Update ai bits in tcam sw entry */
1136 static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry
*pe
,
1137 unsigned int bits
, unsigned int enable
)
1139 int i
, ai_idx
= MVPP2_PRS_TCAM_AI_BYTE
;
1141 for (i
= 0; i
< MVPP2_PRS_AI_BITS
; i
++) {
1143 if (!(enable
& BIT(i
)))
1147 pe
->tcam
.byte
[ai_idx
] |= 1 << i
;
1149 pe
->tcam
.byte
[ai_idx
] &= ~(1 << i
);
1152 pe
->tcam
.byte
[MVPP2_PRS_TCAM_EN_OFFS(ai_idx
)] |= enable
;
1155 /* Get ai bits from tcam sw entry */
1156 static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry
*pe
)
1158 return pe
->tcam
.byte
[MVPP2_PRS_TCAM_AI_BYTE
];
1161 /* Set ethertype in tcam sw entry */
1162 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry
*pe
, int offset
,
1163 unsigned short ethertype
)
1165 mvpp2_prs_tcam_data_byte_set(pe
, offset
+ 0, ethertype
>> 8, 0xff);
1166 mvpp2_prs_tcam_data_byte_set(pe
, offset
+ 1, ethertype
& 0xff, 0xff);
1169 /* Set bits in sram sw entry */
1170 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry
*pe
, int bit_num
,
1173 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(bit_num
)] |= (val
<< (bit_num
% 8));
1176 /* Clear bits in sram sw entry */
1177 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry
*pe
, int bit_num
,
1180 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(bit_num
)] &= ~(val
<< (bit_num
% 8));
1183 /* Update ri bits in sram sw entry */
1184 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry
*pe
,
1185 unsigned int bits
, unsigned int mask
)
1189 for (i
= 0; i
< MVPP2_PRS_SRAM_RI_CTRL_BITS
; i
++) {
1190 int ri_off
= MVPP2_PRS_SRAM_RI_OFFS
;
1192 if (!(mask
& BIT(i
)))
1196 mvpp2_prs_sram_bits_set(pe
, ri_off
+ i
, 1);
1198 mvpp2_prs_sram_bits_clear(pe
, ri_off
+ i
, 1);
1200 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_RI_CTRL_OFFS
+ i
, 1);
1204 /* Obtain ri bits from sram sw entry */
1205 static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry
*pe
)
1207 return pe
->sram
.word
[MVPP2_PRS_SRAM_RI_WORD
];
1210 /* Update ai bits in sram sw entry */
1211 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry
*pe
,
1212 unsigned int bits
, unsigned int mask
)
1215 int ai_off
= MVPP2_PRS_SRAM_AI_OFFS
;
1217 for (i
= 0; i
< MVPP2_PRS_SRAM_AI_CTRL_BITS
; i
++) {
1219 if (!(mask
& BIT(i
)))
1223 mvpp2_prs_sram_bits_set(pe
, ai_off
+ i
, 1);
1225 mvpp2_prs_sram_bits_clear(pe
, ai_off
+ i
, 1);
1227 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_AI_CTRL_OFFS
+ i
, 1);
1231 /* Read ai bits from sram sw entry */
1232 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry
*pe
)
1235 int ai_off
= MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS
);
1236 int ai_en_off
= ai_off
+ 1;
1237 int ai_shift
= MVPP2_PRS_SRAM_AI_OFFS
% 8;
1239 bits
= (pe
->sram
.byte
[ai_off
] >> ai_shift
) |
1240 (pe
->sram
.byte
[ai_en_off
] << (8 - ai_shift
));
1245 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
1248 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry
*pe
,
1251 int sram_next_off
= MVPP2_PRS_SRAM_NEXT_LU_OFFS
;
1253 mvpp2_prs_sram_bits_clear(pe
, sram_next_off
,
1254 MVPP2_PRS_SRAM_NEXT_LU_MASK
);
1255 mvpp2_prs_sram_bits_set(pe
, sram_next_off
, lu
);
1258 /* In the sram sw entry set sign and value of the next lookup offset
1259 * and the offset value generated to the classifier
1261 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry
*pe
, int shift
,
1266 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT
, 1);
1269 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT
, 1);
1273 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS
)] =
1274 (unsigned char)shift
;
1276 /* Reset and set operation */
1277 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS
,
1278 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK
);
1279 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS
, op
);
1281 /* Set base offset as current */
1282 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS
, 1);
1285 /* In the sram sw entry set sign and value of the user defined offset
1286 * generated to the classifier
1288 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry
*pe
,
1289 unsigned int type
, int offset
,
1294 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_UDF_SIGN_BIT
, 1);
1295 offset
= 0 - offset
;
1297 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_UDF_SIGN_BIT
, 1);
1301 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_UDF_OFFS
,
1302 MVPP2_PRS_SRAM_UDF_MASK
);
1303 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_UDF_OFFS
, offset
);
1304 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS
+
1305 MVPP2_PRS_SRAM_UDF_BITS
)] &=
1306 ~(MVPP2_PRS_SRAM_UDF_MASK
>> (8 - (MVPP2_PRS_SRAM_UDF_OFFS
% 8)));
1307 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS
+
1308 MVPP2_PRS_SRAM_UDF_BITS
)] |=
1309 (offset
>> (8 - (MVPP2_PRS_SRAM_UDF_OFFS
% 8)));
1311 /* Set offset type */
1312 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_UDF_TYPE_OFFS
,
1313 MVPP2_PRS_SRAM_UDF_TYPE_MASK
);
1314 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_UDF_TYPE_OFFS
, type
);
1316 /* Set offset operation */
1317 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
,
1318 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK
);
1319 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
, op
);
1321 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
+
1322 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS
)] &=
1323 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK
>>
1324 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
% 8)));
1326 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
+
1327 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS
)] |=
1328 (op
>> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
% 8)));
1330 /* Set base offset as current */
1331 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS
, 1);
1334 /* Find parser flow entry */
1335 static struct mvpp2_prs_entry
*mvpp2_prs_flow_find(struct mvpp2
*priv
, int flow
)
1337 struct mvpp2_prs_entry
*pe
;
1340 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
1343 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_FLOWS
);
1345 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1346 for (tid
= MVPP2_PRS_TCAM_SRAM_SIZE
- 1; tid
>= 0; tid
--) {
1349 if (!priv
->prs_shadow
[tid
].valid
||
1350 priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_FLOWS
)
1354 mvpp2_prs_hw_read(priv
, pe
);
1355 bits
= mvpp2_prs_sram_ai_get(pe
);
1357 /* Sram store classification lookup ID in AI bits [5:0] */
1358 if ((bits
& MVPP2_PRS_FLOW_ID_MASK
) == flow
)
1366 /* Return first free tcam index, seeking from start to end */
1367 static int mvpp2_prs_tcam_first_free(struct mvpp2
*priv
, unsigned char start
,
1375 if (end
>= MVPP2_PRS_TCAM_SRAM_SIZE
)
1376 end
= MVPP2_PRS_TCAM_SRAM_SIZE
- 1;
1378 for (tid
= start
; tid
<= end
; tid
++) {
1379 if (!priv
->prs_shadow
[tid
].valid
)
1386 /* Enable/disable dropping all mac da's */
1387 static void mvpp2_prs_mac_drop_all_set(struct mvpp2
*priv
, int port
, bool add
)
1389 struct mvpp2_prs_entry pe
;
1391 if (priv
->prs_shadow
[MVPP2_PE_DROP_ALL
].valid
) {
1392 /* Entry exist - update port only */
1393 pe
.index
= MVPP2_PE_DROP_ALL
;
1394 mvpp2_prs_hw_read(priv
, &pe
);
1396 /* Entry doesn't exist - create new */
1397 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1398 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
1399 pe
.index
= MVPP2_PE_DROP_ALL
;
1401 /* Non-promiscuous mode for all ports - DROP unknown packets */
1402 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_DROP_MASK
,
1403 MVPP2_PRS_RI_DROP_MASK
);
1405 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
1406 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
1408 /* Update shadow table */
1409 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
1411 /* Mask all ports */
1412 mvpp2_prs_tcam_port_map_set(&pe
, 0);
1415 /* Update port mask */
1416 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
1418 mvpp2_prs_hw_write(priv
, &pe
);
1421 /* Set port to promiscuous mode */
1422 static void mvpp2_prs_mac_promisc_set(struct mvpp2
*priv
, int port
, bool add
)
1424 struct mvpp2_prs_entry pe
;
1426 /* Promiscuous mode - Accept unknown packets */
1428 if (priv
->prs_shadow
[MVPP2_PE_MAC_PROMISCUOUS
].valid
) {
1429 /* Entry exist - update port only */
1430 pe
.index
= MVPP2_PE_MAC_PROMISCUOUS
;
1431 mvpp2_prs_hw_read(priv
, &pe
);
1433 /* Entry doesn't exist - create new */
1434 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1435 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
1436 pe
.index
= MVPP2_PE_MAC_PROMISCUOUS
;
1438 /* Continue - set next lookup */
1439 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
1441 /* Set result info bits */
1442 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L2_UCAST
,
1443 MVPP2_PRS_RI_L2_CAST_MASK
);
1445 /* Shift to ethertype */
1446 mvpp2_prs_sram_shift_set(&pe
, 2 * ETH_ALEN
,
1447 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1449 /* Mask all ports */
1450 mvpp2_prs_tcam_port_map_set(&pe
, 0);
1452 /* Update shadow table */
1453 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
1456 /* Update port mask */
1457 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
1459 mvpp2_prs_hw_write(priv
, &pe
);
1462 /* Accept multicast */
1463 static void mvpp2_prs_mac_multi_set(struct mvpp2
*priv
, int port
, int index
,
1466 struct mvpp2_prs_entry pe
;
1467 unsigned char da_mc
;
1469 /* Ethernet multicast address first byte is
1470 * 0x01 for IPv4 and 0x33 for IPv6
1472 da_mc
= (index
== MVPP2_PE_MAC_MC_ALL
) ? 0x01 : 0x33;
1474 if (priv
->prs_shadow
[index
].valid
) {
1475 /* Entry exist - update port only */
1477 mvpp2_prs_hw_read(priv
, &pe
);
1479 /* Entry doesn't exist - create new */
1480 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1481 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
1484 /* Continue - set next lookup */
1485 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
1487 /* Set result info bits */
1488 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L2_MCAST
,
1489 MVPP2_PRS_RI_L2_CAST_MASK
);
1491 /* Update tcam entry data first byte */
1492 mvpp2_prs_tcam_data_byte_set(&pe
, 0, da_mc
, 0xff);
1494 /* Shift to ethertype */
1495 mvpp2_prs_sram_shift_set(&pe
, 2 * ETH_ALEN
,
1496 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1498 /* Mask all ports */
1499 mvpp2_prs_tcam_port_map_set(&pe
, 0);
1501 /* Update shadow table */
1502 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
1505 /* Update port mask */
1506 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
1508 mvpp2_prs_hw_write(priv
, &pe
);
1511 /* Set entry for dsa packets */
1512 static void mvpp2_prs_dsa_tag_set(struct mvpp2
*priv
, int port
, bool add
,
1513 bool tagged
, bool extend
)
1515 struct mvpp2_prs_entry pe
;
1519 tid
= tagged
? MVPP2_PE_EDSA_TAGGED
: MVPP2_PE_EDSA_UNTAGGED
;
1522 tid
= tagged
? MVPP2_PE_DSA_TAGGED
: MVPP2_PE_DSA_UNTAGGED
;
1526 if (priv
->prs_shadow
[tid
].valid
) {
1527 /* Entry exist - update port only */
1529 mvpp2_prs_hw_read(priv
, &pe
);
1531 /* Entry doesn't exist - create new */
1532 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1533 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
1536 /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
1537 mvpp2_prs_sram_shift_set(&pe
, shift
,
1538 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1540 /* Update shadow table */
1541 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_DSA
);
1544 /* Set tagged bit in DSA tag */
1545 mvpp2_prs_tcam_data_byte_set(&pe
, 0,
1546 MVPP2_PRS_TCAM_DSA_TAGGED_BIT
,
1547 MVPP2_PRS_TCAM_DSA_TAGGED_BIT
);
1548 /* Clear all ai bits for next iteration */
1549 mvpp2_prs_sram_ai_update(&pe
, 0,
1550 MVPP2_PRS_SRAM_AI_MASK
);
1551 /* If packet is tagged continue check vlans */
1552 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
1554 /* Set result info bits to 'no vlans' */
1555 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_NONE
,
1556 MVPP2_PRS_RI_VLAN_MASK
);
1557 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_L2
);
1560 /* Mask all ports */
1561 mvpp2_prs_tcam_port_map_set(&pe
, 0);
1564 /* Update port mask */
1565 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
1567 mvpp2_prs_hw_write(priv
, &pe
);
1570 /* Set entry for dsa ethertype */
1571 static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2
*priv
, int port
,
1572 bool add
, bool tagged
, bool extend
)
1574 struct mvpp2_prs_entry pe
;
1575 int tid
, shift
, port_mask
;
1578 tid
= tagged
? MVPP2_PE_ETYPE_EDSA_TAGGED
:
1579 MVPP2_PE_ETYPE_EDSA_UNTAGGED
;
1583 tid
= tagged
? MVPP2_PE_ETYPE_DSA_TAGGED
:
1584 MVPP2_PE_ETYPE_DSA_UNTAGGED
;
1585 port_mask
= MVPP2_PRS_PORT_MASK
;
1589 if (priv
->prs_shadow
[tid
].valid
) {
1590 /* Entry exist - update port only */
1592 mvpp2_prs_hw_read(priv
, &pe
);
1594 /* Entry doesn't exist - create new */
1595 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1596 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
1600 mvpp2_prs_match_etype(&pe
, 0, ETH_P_EDSA
);
1601 mvpp2_prs_match_etype(&pe
, 2, 0);
1603 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_DSA_MASK
,
1604 MVPP2_PRS_RI_DSA_MASK
);
1605 /* Shift ethertype + 2 byte reserved + tag*/
1606 mvpp2_prs_sram_shift_set(&pe
, 2 + MVPP2_ETH_TYPE_LEN
+ shift
,
1607 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1609 /* Update shadow table */
1610 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_DSA
);
1613 /* Set tagged bit in DSA tag */
1614 mvpp2_prs_tcam_data_byte_set(&pe
,
1615 MVPP2_ETH_TYPE_LEN
+ 2 + 3,
1616 MVPP2_PRS_TCAM_DSA_TAGGED_BIT
,
1617 MVPP2_PRS_TCAM_DSA_TAGGED_BIT
);
1618 /* Clear all ai bits for next iteration */
1619 mvpp2_prs_sram_ai_update(&pe
, 0,
1620 MVPP2_PRS_SRAM_AI_MASK
);
1621 /* If packet is tagged continue check vlans */
1622 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
1624 /* Set result info bits to 'no vlans' */
1625 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_NONE
,
1626 MVPP2_PRS_RI_VLAN_MASK
);
1627 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_L2
);
1629 /* Mask/unmask all ports, depending on dsa type */
1630 mvpp2_prs_tcam_port_map_set(&pe
, port_mask
);
1633 /* Update port mask */
1634 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
1636 mvpp2_prs_hw_write(priv
, &pe
);
1639 /* Search for existing single/triple vlan entry */
1640 static struct mvpp2_prs_entry
*mvpp2_prs_vlan_find(struct mvpp2
*priv
,
1641 unsigned short tpid
, int ai
)
1643 struct mvpp2_prs_entry
*pe
;
1646 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
1649 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_VLAN
);
1651 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1652 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
1653 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++) {
1654 unsigned int ri_bits
, ai_bits
;
1657 if (!priv
->prs_shadow
[tid
].valid
||
1658 priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_VLAN
)
1663 mvpp2_prs_hw_read(priv
, pe
);
1664 match
= mvpp2_prs_tcam_data_cmp(pe
, 0, swab16(tpid
));
1669 ri_bits
= mvpp2_prs_sram_ri_get(pe
);
1670 ri_bits
&= MVPP2_PRS_RI_VLAN_MASK
;
1672 /* Get current ai value from tcam */
1673 ai_bits
= mvpp2_prs_tcam_ai_get(pe
);
1674 /* Clear double vlan bit */
1675 ai_bits
&= ~MVPP2_PRS_DBL_VLAN_AI_BIT
;
1680 if (ri_bits
== MVPP2_PRS_RI_VLAN_SINGLE
||
1681 ri_bits
== MVPP2_PRS_RI_VLAN_TRIPLE
)
1689 /* Add/update single/triple vlan entry */
1690 static int mvpp2_prs_vlan_add(struct mvpp2
*priv
, unsigned short tpid
, int ai
,
1691 unsigned int port_map
)
1693 struct mvpp2_prs_entry
*pe
;
1697 pe
= mvpp2_prs_vlan_find(priv
, tpid
, ai
);
1700 /* Create new tcam entry */
1701 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_LAST_FREE_TID
,
1702 MVPP2_PE_FIRST_FREE_TID
);
1706 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
1710 /* Get last double vlan tid */
1711 for (tid_aux
= MVPP2_PE_LAST_FREE_TID
;
1712 tid_aux
>= MVPP2_PE_FIRST_FREE_TID
; tid_aux
--) {
1713 unsigned int ri_bits
;
1715 if (!priv
->prs_shadow
[tid_aux
].valid
||
1716 priv
->prs_shadow
[tid_aux
].lu
!= MVPP2_PRS_LU_VLAN
)
1719 pe
->index
= tid_aux
;
1720 mvpp2_prs_hw_read(priv
, pe
);
1721 ri_bits
= mvpp2_prs_sram_ri_get(pe
);
1722 if ((ri_bits
& MVPP2_PRS_RI_VLAN_MASK
) ==
1723 MVPP2_PRS_RI_VLAN_DOUBLE
)
1727 if (tid
<= tid_aux
) {
1732 memset(pe
, 0 , sizeof(struct mvpp2_prs_entry
));
1733 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_VLAN
);
1736 mvpp2_prs_match_etype(pe
, 0, tpid
);
1738 mvpp2_prs_sram_next_lu_set(pe
, MVPP2_PRS_LU_L2
);
1739 /* Shift 4 bytes - skip 1 vlan tag */
1740 mvpp2_prs_sram_shift_set(pe
, MVPP2_VLAN_TAG_LEN
,
1741 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1742 /* Clear all ai bits for next iteration */
1743 mvpp2_prs_sram_ai_update(pe
, 0, MVPP2_PRS_SRAM_AI_MASK
);
1745 if (ai
== MVPP2_PRS_SINGLE_VLAN_AI
) {
1746 mvpp2_prs_sram_ri_update(pe
, MVPP2_PRS_RI_VLAN_SINGLE
,
1747 MVPP2_PRS_RI_VLAN_MASK
);
1749 ai
|= MVPP2_PRS_DBL_VLAN_AI_BIT
;
1750 mvpp2_prs_sram_ri_update(pe
, MVPP2_PRS_RI_VLAN_TRIPLE
,
1751 MVPP2_PRS_RI_VLAN_MASK
);
1753 mvpp2_prs_tcam_ai_update(pe
, ai
, MVPP2_PRS_SRAM_AI_MASK
);
1755 mvpp2_prs_shadow_set(priv
, pe
->index
, MVPP2_PRS_LU_VLAN
);
1757 /* Update ports' mask */
1758 mvpp2_prs_tcam_port_map_set(pe
, port_map
);
1760 mvpp2_prs_hw_write(priv
, pe
);
1768 /* Get first free double vlan ai number */
1769 static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2
*priv
)
1773 for (i
= 1; i
< MVPP2_PRS_DBL_VLANS_MAX
; i
++) {
1774 if (!priv
->prs_double_vlans
[i
])
1781 /* Search for existing double vlan entry */
1782 static struct mvpp2_prs_entry
*mvpp2_prs_double_vlan_find(struct mvpp2
*priv
,
1783 unsigned short tpid1
,
1784 unsigned short tpid2
)
1786 struct mvpp2_prs_entry
*pe
;
1789 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
1792 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_VLAN
);
1794 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1795 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
1796 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++) {
1797 unsigned int ri_mask
;
1800 if (!priv
->prs_shadow
[tid
].valid
||
1801 priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_VLAN
)
1805 mvpp2_prs_hw_read(priv
, pe
);
1807 match
= mvpp2_prs_tcam_data_cmp(pe
, 0, swab16(tpid1
))
1808 && mvpp2_prs_tcam_data_cmp(pe
, 4, swab16(tpid2
));
1813 ri_mask
= mvpp2_prs_sram_ri_get(pe
) & MVPP2_PRS_RI_VLAN_MASK
;
1814 if (ri_mask
== MVPP2_PRS_RI_VLAN_DOUBLE
)
1822 /* Add or update double vlan entry */
1823 static int mvpp2_prs_double_vlan_add(struct mvpp2
*priv
, unsigned short tpid1
,
1824 unsigned short tpid2
,
1825 unsigned int port_map
)
1827 struct mvpp2_prs_entry
*pe
;
1828 int tid_aux
, tid
, ai
, ret
= 0;
1830 pe
= mvpp2_prs_double_vlan_find(priv
, tpid1
, tpid2
);
1833 /* Create new tcam entry */
1834 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
1835 MVPP2_PE_LAST_FREE_TID
);
1839 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
1843 /* Set ai value for new double vlan entry */
1844 ai
= mvpp2_prs_double_vlan_ai_free_get(priv
);
1850 /* Get first single/triple vlan tid */
1851 for (tid_aux
= MVPP2_PE_FIRST_FREE_TID
;
1852 tid_aux
<= MVPP2_PE_LAST_FREE_TID
; tid_aux
++) {
1853 unsigned int ri_bits
;
1855 if (!priv
->prs_shadow
[tid_aux
].valid
||
1856 priv
->prs_shadow
[tid_aux
].lu
!= MVPP2_PRS_LU_VLAN
)
1859 pe
->index
= tid_aux
;
1860 mvpp2_prs_hw_read(priv
, pe
);
1861 ri_bits
= mvpp2_prs_sram_ri_get(pe
);
1862 ri_bits
&= MVPP2_PRS_RI_VLAN_MASK
;
1863 if (ri_bits
== MVPP2_PRS_RI_VLAN_SINGLE
||
1864 ri_bits
== MVPP2_PRS_RI_VLAN_TRIPLE
)
1868 if (tid
>= tid_aux
) {
1873 memset(pe
, 0, sizeof(struct mvpp2_prs_entry
));
1874 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_VLAN
);
1877 priv
->prs_double_vlans
[ai
] = true;
1879 mvpp2_prs_match_etype(pe
, 0, tpid1
);
1880 mvpp2_prs_match_etype(pe
, 4, tpid2
);
1882 mvpp2_prs_sram_next_lu_set(pe
, MVPP2_PRS_LU_VLAN
);
1883 /* Shift 8 bytes - skip 2 vlan tags */
1884 mvpp2_prs_sram_shift_set(pe
, 2 * MVPP2_VLAN_TAG_LEN
,
1885 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1886 mvpp2_prs_sram_ri_update(pe
, MVPP2_PRS_RI_VLAN_DOUBLE
,
1887 MVPP2_PRS_RI_VLAN_MASK
);
1888 mvpp2_prs_sram_ai_update(pe
, ai
| MVPP2_PRS_DBL_VLAN_AI_BIT
,
1889 MVPP2_PRS_SRAM_AI_MASK
);
1891 mvpp2_prs_shadow_set(priv
, pe
->index
, MVPP2_PRS_LU_VLAN
);
1894 /* Update ports' mask */
1895 mvpp2_prs_tcam_port_map_set(pe
, port_map
);
1896 mvpp2_prs_hw_write(priv
, pe
);
1903 /* IPv4 header parsing for fragmentation and L4 offset */
1904 static int mvpp2_prs_ip4_proto(struct mvpp2
*priv
, unsigned short proto
,
1905 unsigned int ri
, unsigned int ri_mask
)
1907 struct mvpp2_prs_entry pe
;
1910 if ((proto
!= IPPROTO_TCP
) && (proto
!= IPPROTO_UDP
) &&
1911 (proto
!= IPPROTO_IGMP
))
1914 /* Fragmented packet */
1915 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
1916 MVPP2_PE_LAST_FREE_TID
);
1920 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1921 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
1924 /* Set next lu to IPv4 */
1925 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
1926 mvpp2_prs_sram_shift_set(&pe
, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1928 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L4
,
1929 sizeof(struct iphdr
) - 4,
1930 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
1931 mvpp2_prs_sram_ai_update(&pe
, MVPP2_PRS_IPV4_DIP_AI_BIT
,
1932 MVPP2_PRS_IPV4_DIP_AI_BIT
);
1933 mvpp2_prs_sram_ri_update(&pe
, ri
| MVPP2_PRS_RI_IP_FRAG_MASK
,
1934 ri_mask
| MVPP2_PRS_RI_IP_FRAG_MASK
);
1936 mvpp2_prs_tcam_data_byte_set(&pe
, 5, proto
, MVPP2_PRS_TCAM_PROTO_MASK
);
1937 mvpp2_prs_tcam_ai_update(&pe
, 0, MVPP2_PRS_IPV4_DIP_AI_BIT
);
1938 /* Unmask all ports */
1939 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
1941 /* Update shadow table and hw entry */
1942 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
1943 mvpp2_prs_hw_write(priv
, &pe
);
1945 /* Not fragmented packet */
1946 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
1947 MVPP2_PE_LAST_FREE_TID
);
1952 /* Clear ri before updating */
1953 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_WORD
] = 0x0;
1954 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_CTRL_WORD
] = 0x0;
1955 mvpp2_prs_sram_ri_update(&pe
, ri
, ri_mask
);
1957 mvpp2_prs_tcam_data_byte_set(&pe
, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L
);
1958 mvpp2_prs_tcam_data_byte_set(&pe
, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK
);
1960 /* Update shadow table and hw entry */
1961 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
1962 mvpp2_prs_hw_write(priv
, &pe
);
1967 /* IPv4 L3 multicast or broadcast */
1968 static int mvpp2_prs_ip4_cast(struct mvpp2
*priv
, unsigned short l3_cast
)
1970 struct mvpp2_prs_entry pe
;
1973 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
1974 MVPP2_PE_LAST_FREE_TID
);
1978 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1979 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
1983 case MVPP2_PRS_L3_MULTI_CAST
:
1984 mvpp2_prs_tcam_data_byte_set(&pe
, 0, MVPP2_PRS_IPV4_MC
,
1985 MVPP2_PRS_IPV4_MC_MASK
);
1986 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_MCAST
,
1987 MVPP2_PRS_RI_L3_ADDR_MASK
);
1989 case MVPP2_PRS_L3_BROAD_CAST
:
1990 mask
= MVPP2_PRS_IPV4_BC_MASK
;
1991 mvpp2_prs_tcam_data_byte_set(&pe
, 0, mask
, mask
);
1992 mvpp2_prs_tcam_data_byte_set(&pe
, 1, mask
, mask
);
1993 mvpp2_prs_tcam_data_byte_set(&pe
, 2, mask
, mask
);
1994 mvpp2_prs_tcam_data_byte_set(&pe
, 3, mask
, mask
);
1995 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_BCAST
,
1996 MVPP2_PRS_RI_L3_ADDR_MASK
);
2002 /* Finished: go to flowid generation */
2003 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2004 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2006 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV4_DIP_AI_BIT
,
2007 MVPP2_PRS_IPV4_DIP_AI_BIT
);
2008 /* Unmask all ports */
2009 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2011 /* Update shadow table and hw entry */
2012 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
2013 mvpp2_prs_hw_write(priv
, &pe
);
2018 /* Set entries for protocols over IPv6 */
2019 static int mvpp2_prs_ip6_proto(struct mvpp2
*priv
, unsigned short proto
,
2020 unsigned int ri
, unsigned int ri_mask
)
2022 struct mvpp2_prs_entry pe
;
2025 if ((proto
!= IPPROTO_TCP
) && (proto
!= IPPROTO_UDP
) &&
2026 (proto
!= IPPROTO_ICMPV6
) && (proto
!= IPPROTO_IPIP
))
2029 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2030 MVPP2_PE_LAST_FREE_TID
);
2034 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2035 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2038 /* Finished: go to flowid generation */
2039 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2040 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2041 mvpp2_prs_sram_ri_update(&pe
, ri
, ri_mask
);
2042 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L4
,
2043 sizeof(struct ipv6hdr
) - 6,
2044 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2046 mvpp2_prs_tcam_data_byte_set(&pe
, 0, proto
, MVPP2_PRS_TCAM_PROTO_MASK
);
2047 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
2048 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
2049 /* Unmask all ports */
2050 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2053 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP6
);
2054 mvpp2_prs_hw_write(priv
, &pe
);
2059 /* IPv6 L3 multicast entry */
2060 static int mvpp2_prs_ip6_cast(struct mvpp2
*priv
, unsigned short l3_cast
)
2062 struct mvpp2_prs_entry pe
;
2065 if (l3_cast
!= MVPP2_PRS_L3_MULTI_CAST
)
2068 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2069 MVPP2_PE_LAST_FREE_TID
);
2073 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2074 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2077 /* Finished: go to flowid generation */
2078 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2079 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_MCAST
,
2080 MVPP2_PRS_RI_L3_ADDR_MASK
);
2081 mvpp2_prs_sram_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
2082 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
2083 /* Shift back to IPv6 NH */
2084 mvpp2_prs_sram_shift_set(&pe
, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2086 mvpp2_prs_tcam_data_byte_set(&pe
, 0, MVPP2_PRS_IPV6_MC
,
2087 MVPP2_PRS_IPV6_MC_MASK
);
2088 mvpp2_prs_tcam_ai_update(&pe
, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
2089 /* Unmask all ports */
2090 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2092 /* Update shadow table and hw entry */
2093 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP6
);
2094 mvpp2_prs_hw_write(priv
, &pe
);
2099 /* Parser per-port initialization */
2100 static void mvpp2_prs_hw_port_init(struct mvpp2
*priv
, int port
, int lu_first
,
2101 int lu_max
, int offset
)
2106 val
= mvpp2_read(priv
, MVPP2_PRS_INIT_LOOKUP_REG
);
2107 val
&= ~MVPP2_PRS_PORT_LU_MASK(port
);
2108 val
|= MVPP2_PRS_PORT_LU_VAL(port
, lu_first
);
2109 mvpp2_write(priv
, MVPP2_PRS_INIT_LOOKUP_REG
, val
);
2111 /* Set maximum number of loops for packet received from port */
2112 val
= mvpp2_read(priv
, MVPP2_PRS_MAX_LOOP_REG(port
));
2113 val
&= ~MVPP2_PRS_MAX_LOOP_MASK(port
);
2114 val
|= MVPP2_PRS_MAX_LOOP_VAL(port
, lu_max
);
2115 mvpp2_write(priv
, MVPP2_PRS_MAX_LOOP_REG(port
), val
);
2117 /* Set initial offset for packet header extraction for the first
2120 val
= mvpp2_read(priv
, MVPP2_PRS_INIT_OFFS_REG(port
));
2121 val
&= ~MVPP2_PRS_INIT_OFF_MASK(port
);
2122 val
|= MVPP2_PRS_INIT_OFF_VAL(port
, offset
);
2123 mvpp2_write(priv
, MVPP2_PRS_INIT_OFFS_REG(port
), val
);
2126 /* Default flow entries initialization for all ports */
2127 static void mvpp2_prs_def_flow_init(struct mvpp2
*priv
)
2129 struct mvpp2_prs_entry pe
;
2132 for (port
= 0; port
< MVPP2_MAX_PORTS
; port
++) {
2133 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2134 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2135 pe
.index
= MVPP2_PE_FIRST_DEFAULT_FLOW
- port
;
2137 /* Mask all ports */
2138 mvpp2_prs_tcam_port_map_set(&pe
, 0);
2141 mvpp2_prs_sram_ai_update(&pe
, port
, MVPP2_PRS_FLOW_ID_MASK
);
2142 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_DONE_BIT
, 1);
2144 /* Update shadow table and hw entry */
2145 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_FLOWS
);
2146 mvpp2_prs_hw_write(priv
, &pe
);
2150 /* Set default entry for Marvell Header field */
2151 static void mvpp2_prs_mh_init(struct mvpp2
*priv
)
2153 struct mvpp2_prs_entry pe
;
2155 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2157 pe
.index
= MVPP2_PE_MH_DEFAULT
;
2158 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MH
);
2159 mvpp2_prs_sram_shift_set(&pe
, MVPP2_MH_SIZE
,
2160 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2161 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
2163 /* Unmask all ports */
2164 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2166 /* Update shadow table and hw entry */
2167 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MH
);
2168 mvpp2_prs_hw_write(priv
, &pe
);
2171 /* Set default entires (place holder) for promiscuous, non-promiscuous and
2172 * multicast MAC addresses
2174 static void mvpp2_prs_mac_init(struct mvpp2
*priv
)
2176 struct mvpp2_prs_entry pe
;
2178 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2180 /* Non-promiscuous mode for all ports - DROP unknown packets */
2181 pe
.index
= MVPP2_PE_MAC_NON_PROMISCUOUS
;
2182 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
2184 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_DROP_MASK
,
2185 MVPP2_PRS_RI_DROP_MASK
);
2186 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2187 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2189 /* Unmask all ports */
2190 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2192 /* Update shadow table and hw entry */
2193 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
2194 mvpp2_prs_hw_write(priv
, &pe
);
2196 /* place holders only - no ports */
2197 mvpp2_prs_mac_drop_all_set(priv
, 0, false);
2198 mvpp2_prs_mac_promisc_set(priv
, 0, false);
2199 mvpp2_prs_mac_multi_set(priv
, MVPP2_PE_MAC_MC_ALL
, 0, false);
2200 mvpp2_prs_mac_multi_set(priv
, MVPP2_PE_MAC_MC_IP6
, 0, false);
2203 /* Set default entries for various types of dsa packets */
2204 static void mvpp2_prs_dsa_init(struct mvpp2
*priv
)
2206 struct mvpp2_prs_entry pe
;
2208 /* None tagged EDSA entry - place holder */
2209 mvpp2_prs_dsa_tag_set(priv
, 0, false, MVPP2_PRS_UNTAGGED
,
2212 /* Tagged EDSA entry - place holder */
2213 mvpp2_prs_dsa_tag_set(priv
, 0, false, MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
2215 /* None tagged DSA entry - place holder */
2216 mvpp2_prs_dsa_tag_set(priv
, 0, false, MVPP2_PRS_UNTAGGED
,
2219 /* Tagged DSA entry - place holder */
2220 mvpp2_prs_dsa_tag_set(priv
, 0, false, MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
2222 /* None tagged EDSA ethertype entry - place holder*/
2223 mvpp2_prs_dsa_tag_ethertype_set(priv
, 0, false,
2224 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_EDSA
);
2226 /* Tagged EDSA ethertype entry - place holder*/
2227 mvpp2_prs_dsa_tag_ethertype_set(priv
, 0, false,
2228 MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
2230 /* None tagged DSA ethertype entry */
2231 mvpp2_prs_dsa_tag_ethertype_set(priv
, 0, true,
2232 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_DSA
);
2234 /* Tagged DSA ethertype entry */
2235 mvpp2_prs_dsa_tag_ethertype_set(priv
, 0, true,
2236 MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
2238 /* Set default entry, in case DSA or EDSA tag not found */
2239 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2240 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
2241 pe
.index
= MVPP2_PE_DSA_DEFAULT
;
2242 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
2245 mvpp2_prs_sram_shift_set(&pe
, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2246 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
2248 /* Clear all sram ai bits for next iteration */
2249 mvpp2_prs_sram_ai_update(&pe
, 0, MVPP2_PRS_SRAM_AI_MASK
);
2251 /* Unmask all ports */
2252 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2254 mvpp2_prs_hw_write(priv
, &pe
);
2257 /* Match basic ethertypes */
2258 static int mvpp2_prs_etype_init(struct mvpp2
*priv
)
2260 struct mvpp2_prs_entry pe
;
2263 /* Ethertype: PPPoE */
2264 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2265 MVPP2_PE_LAST_FREE_TID
);
2269 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2270 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2273 mvpp2_prs_match_etype(&pe
, 0, ETH_P_PPP_SES
);
2275 mvpp2_prs_sram_shift_set(&pe
, MVPP2_PPPOE_HDR_SIZE
,
2276 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2277 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_PPPOE
);
2278 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_PPPOE_MASK
,
2279 MVPP2_PRS_RI_PPPOE_MASK
);
2281 /* Update shadow table and hw entry */
2282 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2283 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2284 priv
->prs_shadow
[pe
.index
].finish
= false;
2285 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_PPPOE_MASK
,
2286 MVPP2_PRS_RI_PPPOE_MASK
);
2287 mvpp2_prs_hw_write(priv
, &pe
);
2289 /* Ethertype: ARP */
2290 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2291 MVPP2_PE_LAST_FREE_TID
);
2295 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2296 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2299 mvpp2_prs_match_etype(&pe
, 0, ETH_P_ARP
);
2301 /* Generate flow in the next iteration*/
2302 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2303 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2304 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_ARP
,
2305 MVPP2_PRS_RI_L3_PROTO_MASK
);
2307 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2309 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2311 /* Update shadow table and hw entry */
2312 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2313 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2314 priv
->prs_shadow
[pe
.index
].finish
= true;
2315 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_ARP
,
2316 MVPP2_PRS_RI_L3_PROTO_MASK
);
2317 mvpp2_prs_hw_write(priv
, &pe
);
2319 /* Ethertype: LBTD */
2320 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2321 MVPP2_PE_LAST_FREE_TID
);
2325 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2326 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2329 mvpp2_prs_match_etype(&pe
, 0, MVPP2_IP_LBDT_TYPE
);
2331 /* Generate flow in the next iteration*/
2332 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2333 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2334 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_CPU_CODE_RX_SPEC
|
2335 MVPP2_PRS_RI_UDF3_RX_SPECIAL
,
2336 MVPP2_PRS_RI_CPU_CODE_MASK
|
2337 MVPP2_PRS_RI_UDF3_MASK
);
2339 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2341 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2343 /* Update shadow table and hw entry */
2344 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2345 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2346 priv
->prs_shadow
[pe
.index
].finish
= true;
2347 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_CPU_CODE_RX_SPEC
|
2348 MVPP2_PRS_RI_UDF3_RX_SPECIAL
,
2349 MVPP2_PRS_RI_CPU_CODE_MASK
|
2350 MVPP2_PRS_RI_UDF3_MASK
);
2351 mvpp2_prs_hw_write(priv
, &pe
);
2353 /* Ethertype: IPv4 without options */
2354 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2355 MVPP2_PE_LAST_FREE_TID
);
2359 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2360 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2363 mvpp2_prs_match_etype(&pe
, 0, ETH_P_IP
);
2364 mvpp2_prs_tcam_data_byte_set(&pe
, MVPP2_ETH_TYPE_LEN
,
2365 MVPP2_PRS_IPV4_HEAD
| MVPP2_PRS_IPV4_IHL
,
2366 MVPP2_PRS_IPV4_HEAD_MASK
|
2367 MVPP2_PRS_IPV4_IHL_MASK
);
2369 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2370 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP4
,
2371 MVPP2_PRS_RI_L3_PROTO_MASK
);
2372 /* Skip eth_type + 4 bytes of IP header */
2373 mvpp2_prs_sram_shift_set(&pe
, MVPP2_ETH_TYPE_LEN
+ 4,
2374 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2376 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2378 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2380 /* Update shadow table and hw entry */
2381 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2382 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2383 priv
->prs_shadow
[pe
.index
].finish
= false;
2384 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_IP4
,
2385 MVPP2_PRS_RI_L3_PROTO_MASK
);
2386 mvpp2_prs_hw_write(priv
, &pe
);
2388 /* Ethertype: IPv4 with options */
2389 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2390 MVPP2_PE_LAST_FREE_TID
);
2396 /* Clear tcam data before updating */
2397 pe
.tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN
)] = 0x0;
2398 pe
.tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN
)] = 0x0;
2400 mvpp2_prs_tcam_data_byte_set(&pe
, MVPP2_ETH_TYPE_LEN
,
2401 MVPP2_PRS_IPV4_HEAD
,
2402 MVPP2_PRS_IPV4_HEAD_MASK
);
2404 /* Clear ri before updating */
2405 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_WORD
] = 0x0;
2406 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_CTRL_WORD
] = 0x0;
2407 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP4_OPT
,
2408 MVPP2_PRS_RI_L3_PROTO_MASK
);
2410 /* Update shadow table and hw entry */
2411 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2412 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2413 priv
->prs_shadow
[pe
.index
].finish
= false;
2414 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_IP4_OPT
,
2415 MVPP2_PRS_RI_L3_PROTO_MASK
);
2416 mvpp2_prs_hw_write(priv
, &pe
);
2418 /* Ethertype: IPv6 without options */
2419 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2420 MVPP2_PE_LAST_FREE_TID
);
2424 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2425 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2428 mvpp2_prs_match_etype(&pe
, 0, ETH_P_IPV6
);
2430 /* Skip DIP of IPV6 header */
2431 mvpp2_prs_sram_shift_set(&pe
, MVPP2_ETH_TYPE_LEN
+ 8 +
2432 MVPP2_MAX_L3_ADDR_SIZE
,
2433 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2434 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2435 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP6
,
2436 MVPP2_PRS_RI_L3_PROTO_MASK
);
2438 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2440 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2442 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2443 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2444 priv
->prs_shadow
[pe
.index
].finish
= false;
2445 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_IP6
,
2446 MVPP2_PRS_RI_L3_PROTO_MASK
);
2447 mvpp2_prs_hw_write(priv
, &pe
);
2449 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2450 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2451 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2452 pe
.index
= MVPP2_PE_ETH_TYPE_UN
;
2454 /* Unmask all ports */
2455 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2457 /* Generate flow in the next iteration*/
2458 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2459 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2460 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UN
,
2461 MVPP2_PRS_RI_L3_PROTO_MASK
);
2462 /* Set L3 offset even it's unknown L3 */
2463 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2465 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2467 /* Update shadow table and hw entry */
2468 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2469 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2470 priv
->prs_shadow
[pe
.index
].finish
= true;
2471 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_UN
,
2472 MVPP2_PRS_RI_L3_PROTO_MASK
);
2473 mvpp2_prs_hw_write(priv
, &pe
);
2478 /* Configure vlan entries and detect up to 2 successive VLAN tags.
2485 static int mvpp2_prs_vlan_init(struct platform_device
*pdev
, struct mvpp2
*priv
)
2487 struct mvpp2_prs_entry pe
;
2490 priv
->prs_double_vlans
= devm_kcalloc(&pdev
->dev
, sizeof(bool),
2491 MVPP2_PRS_DBL_VLANS_MAX
,
2493 if (!priv
->prs_double_vlans
)
2496 /* Double VLAN: 0x8100, 0x88A8 */
2497 err
= mvpp2_prs_double_vlan_add(priv
, ETH_P_8021Q
, ETH_P_8021AD
,
2498 MVPP2_PRS_PORT_MASK
);
2502 /* Double VLAN: 0x8100, 0x8100 */
2503 err
= mvpp2_prs_double_vlan_add(priv
, ETH_P_8021Q
, ETH_P_8021Q
,
2504 MVPP2_PRS_PORT_MASK
);
2508 /* Single VLAN: 0x88a8 */
2509 err
= mvpp2_prs_vlan_add(priv
, ETH_P_8021AD
, MVPP2_PRS_SINGLE_VLAN_AI
,
2510 MVPP2_PRS_PORT_MASK
);
2514 /* Single VLAN: 0x8100 */
2515 err
= mvpp2_prs_vlan_add(priv
, ETH_P_8021Q
, MVPP2_PRS_SINGLE_VLAN_AI
,
2516 MVPP2_PRS_PORT_MASK
);
2520 /* Set default double vlan entry */
2521 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2522 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
2523 pe
.index
= MVPP2_PE_VLAN_DBL
;
2525 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2526 /* Clear ai for next iterations */
2527 mvpp2_prs_sram_ai_update(&pe
, 0, MVPP2_PRS_SRAM_AI_MASK
);
2528 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_DOUBLE
,
2529 MVPP2_PRS_RI_VLAN_MASK
);
2531 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_DBL_VLAN_AI_BIT
,
2532 MVPP2_PRS_DBL_VLAN_AI_BIT
);
2533 /* Unmask all ports */
2534 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2536 /* Update shadow table and hw entry */
2537 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_VLAN
);
2538 mvpp2_prs_hw_write(priv
, &pe
);
2540 /* Set default vlan none entry */
2541 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2542 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
2543 pe
.index
= MVPP2_PE_VLAN_NONE
;
2545 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2546 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_NONE
,
2547 MVPP2_PRS_RI_VLAN_MASK
);
2549 /* Unmask all ports */
2550 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2552 /* Update shadow table and hw entry */
2553 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_VLAN
);
2554 mvpp2_prs_hw_write(priv
, &pe
);
2559 /* Set entries for PPPoE ethertype */
2560 static int mvpp2_prs_pppoe_init(struct mvpp2
*priv
)
2562 struct mvpp2_prs_entry pe
;
2565 /* IPv4 over PPPoE with options */
2566 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2567 MVPP2_PE_LAST_FREE_TID
);
2571 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2572 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_PPPOE
);
2575 mvpp2_prs_match_etype(&pe
, 0, PPP_IP
);
2577 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2578 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP4_OPT
,
2579 MVPP2_PRS_RI_L3_PROTO_MASK
);
2580 /* Skip eth_type + 4 bytes of IP header */
2581 mvpp2_prs_sram_shift_set(&pe
, MVPP2_ETH_TYPE_LEN
+ 4,
2582 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2584 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2586 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2588 /* Update shadow table and hw entry */
2589 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_PPPOE
);
2590 mvpp2_prs_hw_write(priv
, &pe
);
2592 /* IPv4 over PPPoE without options */
2593 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2594 MVPP2_PE_LAST_FREE_TID
);
2600 mvpp2_prs_tcam_data_byte_set(&pe
, MVPP2_ETH_TYPE_LEN
,
2601 MVPP2_PRS_IPV4_HEAD
| MVPP2_PRS_IPV4_IHL
,
2602 MVPP2_PRS_IPV4_HEAD_MASK
|
2603 MVPP2_PRS_IPV4_IHL_MASK
);
2605 /* Clear ri before updating */
2606 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_WORD
] = 0x0;
2607 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_CTRL_WORD
] = 0x0;
2608 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP4
,
2609 MVPP2_PRS_RI_L3_PROTO_MASK
);
2611 /* Update shadow table and hw entry */
2612 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_PPPOE
);
2613 mvpp2_prs_hw_write(priv
, &pe
);
2615 /* IPv6 over PPPoE */
2616 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2617 MVPP2_PE_LAST_FREE_TID
);
2621 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2622 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_PPPOE
);
2625 mvpp2_prs_match_etype(&pe
, 0, PPP_IPV6
);
2627 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2628 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP6
,
2629 MVPP2_PRS_RI_L3_PROTO_MASK
);
2630 /* Skip eth_type + 4 bytes of IPv6 header */
2631 mvpp2_prs_sram_shift_set(&pe
, MVPP2_ETH_TYPE_LEN
+ 4,
2632 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2634 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2636 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2638 /* Update shadow table and hw entry */
2639 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_PPPOE
);
2640 mvpp2_prs_hw_write(priv
, &pe
);
2642 /* Non-IP over PPPoE */
2643 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2644 MVPP2_PE_LAST_FREE_TID
);
2648 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2649 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_PPPOE
);
2652 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UN
,
2653 MVPP2_PRS_RI_L3_PROTO_MASK
);
2655 /* Finished: go to flowid generation */
2656 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2657 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2658 /* Set L3 offset even if it's unknown L3 */
2659 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2661 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2663 /* Update shadow table and hw entry */
2664 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_PPPOE
);
2665 mvpp2_prs_hw_write(priv
, &pe
);
2670 /* Initialize entries for IPv4 */
2671 static int mvpp2_prs_ip4_init(struct mvpp2
*priv
)
2673 struct mvpp2_prs_entry pe
;
2676 /* Set entries for TCP, UDP and IGMP over IPv4 */
2677 err
= mvpp2_prs_ip4_proto(priv
, IPPROTO_TCP
, MVPP2_PRS_RI_L4_TCP
,
2678 MVPP2_PRS_RI_L4_PROTO_MASK
);
2682 err
= mvpp2_prs_ip4_proto(priv
, IPPROTO_UDP
, MVPP2_PRS_RI_L4_UDP
,
2683 MVPP2_PRS_RI_L4_PROTO_MASK
);
2687 err
= mvpp2_prs_ip4_proto(priv
, IPPROTO_IGMP
,
2688 MVPP2_PRS_RI_CPU_CODE_RX_SPEC
|
2689 MVPP2_PRS_RI_UDF3_RX_SPECIAL
,
2690 MVPP2_PRS_RI_CPU_CODE_MASK
|
2691 MVPP2_PRS_RI_UDF3_MASK
);
2695 /* IPv4 Broadcast */
2696 err
= mvpp2_prs_ip4_cast(priv
, MVPP2_PRS_L3_BROAD_CAST
);
2700 /* IPv4 Multicast */
2701 err
= mvpp2_prs_ip4_cast(priv
, MVPP2_PRS_L3_MULTI_CAST
);
2705 /* Default IPv4 entry for unknown protocols */
2706 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2707 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2708 pe
.index
= MVPP2_PE_IP4_PROTO_UN
;
2710 /* Set next lu to IPv4 */
2711 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2712 mvpp2_prs_sram_shift_set(&pe
, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2714 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L4
,
2715 sizeof(struct iphdr
) - 4,
2716 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2717 mvpp2_prs_sram_ai_update(&pe
, MVPP2_PRS_IPV4_DIP_AI_BIT
,
2718 MVPP2_PRS_IPV4_DIP_AI_BIT
);
2719 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L4_OTHER
,
2720 MVPP2_PRS_RI_L4_PROTO_MASK
);
2722 mvpp2_prs_tcam_ai_update(&pe
, 0, MVPP2_PRS_IPV4_DIP_AI_BIT
);
2723 /* Unmask all ports */
2724 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2726 /* Update shadow table and hw entry */
2727 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
2728 mvpp2_prs_hw_write(priv
, &pe
);
2730 /* Default IPv4 entry for unicast address */
2731 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2732 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2733 pe
.index
= MVPP2_PE_IP4_ADDR_UN
;
2735 /* Finished: go to flowid generation */
2736 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2737 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2738 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UCAST
,
2739 MVPP2_PRS_RI_L3_ADDR_MASK
);
2741 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV4_DIP_AI_BIT
,
2742 MVPP2_PRS_IPV4_DIP_AI_BIT
);
2743 /* Unmask all ports */
2744 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2746 /* Update shadow table and hw entry */
2747 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
2748 mvpp2_prs_hw_write(priv
, &pe
);
2753 /* Initialize entries for IPv6 */
2754 static int mvpp2_prs_ip6_init(struct mvpp2
*priv
)
2756 struct mvpp2_prs_entry pe
;
2759 /* Set entries for TCP, UDP and ICMP over IPv6 */
2760 err
= mvpp2_prs_ip6_proto(priv
, IPPROTO_TCP
,
2761 MVPP2_PRS_RI_L4_TCP
,
2762 MVPP2_PRS_RI_L4_PROTO_MASK
);
2766 err
= mvpp2_prs_ip6_proto(priv
, IPPROTO_UDP
,
2767 MVPP2_PRS_RI_L4_UDP
,
2768 MVPP2_PRS_RI_L4_PROTO_MASK
);
2772 err
= mvpp2_prs_ip6_proto(priv
, IPPROTO_ICMPV6
,
2773 MVPP2_PRS_RI_CPU_CODE_RX_SPEC
|
2774 MVPP2_PRS_RI_UDF3_RX_SPECIAL
,
2775 MVPP2_PRS_RI_CPU_CODE_MASK
|
2776 MVPP2_PRS_RI_UDF3_MASK
);
2780 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
2781 /* Result Info: UDF7=1, DS lite */
2782 err
= mvpp2_prs_ip6_proto(priv
, IPPROTO_IPIP
,
2783 MVPP2_PRS_RI_UDF7_IP6_LITE
,
2784 MVPP2_PRS_RI_UDF7_MASK
);
2788 /* IPv6 multicast */
2789 err
= mvpp2_prs_ip6_cast(priv
, MVPP2_PRS_L3_MULTI_CAST
);
2793 /* Entry for checking hop limit */
2794 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2795 MVPP2_PE_LAST_FREE_TID
);
2799 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2800 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2803 /* Finished: go to flowid generation */
2804 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2805 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2806 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UN
|
2807 MVPP2_PRS_RI_DROP_MASK
,
2808 MVPP2_PRS_RI_L3_PROTO_MASK
|
2809 MVPP2_PRS_RI_DROP_MASK
);
2811 mvpp2_prs_tcam_data_byte_set(&pe
, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK
);
2812 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
2813 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
2815 /* Update shadow table and hw entry */
2816 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
2817 mvpp2_prs_hw_write(priv
, &pe
);
2819 /* Default IPv6 entry for unknown protocols */
2820 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2821 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2822 pe
.index
= MVPP2_PE_IP6_PROTO_UN
;
2824 /* Finished: go to flowid generation */
2825 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2826 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2827 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L4_OTHER
,
2828 MVPP2_PRS_RI_L4_PROTO_MASK
);
2829 /* Set L4 offset relatively to our current place */
2830 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L4
,
2831 sizeof(struct ipv6hdr
) - 4,
2832 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2834 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
2835 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
2836 /* Unmask all ports */
2837 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2839 /* Update shadow table and hw entry */
2840 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
2841 mvpp2_prs_hw_write(priv
, &pe
);
2843 /* Default IPv6 entry for unknown ext protocols */
2844 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2845 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2846 pe
.index
= MVPP2_PE_IP6_EXT_PROTO_UN
;
2848 /* Finished: go to flowid generation */
2849 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2850 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2851 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L4_OTHER
,
2852 MVPP2_PRS_RI_L4_PROTO_MASK
);
2854 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV6_EXT_AI_BIT
,
2855 MVPP2_PRS_IPV6_EXT_AI_BIT
);
2856 /* Unmask all ports */
2857 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2859 /* Update shadow table and hw entry */
2860 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
2861 mvpp2_prs_hw_write(priv
, &pe
);
2863 /* Default IPv6 entry for unicast address */
2864 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2865 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2866 pe
.index
= MVPP2_PE_IP6_ADDR_UN
;
2868 /* Finished: go to IPv6 again */
2869 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2870 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UCAST
,
2871 MVPP2_PRS_RI_L3_ADDR_MASK
);
2872 mvpp2_prs_sram_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
2873 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
2874 /* Shift back to IPV6 NH */
2875 mvpp2_prs_sram_shift_set(&pe
, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2877 mvpp2_prs_tcam_ai_update(&pe
, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
2878 /* Unmask all ports */
2879 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2881 /* Update shadow table and hw entry */
2882 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP6
);
2883 mvpp2_prs_hw_write(priv
, &pe
);
2888 /* Parser default initialization */
2889 static int mvpp2_prs_default_init(struct platform_device
*pdev
,
2894 /* Enable tcam table */
2895 mvpp2_write(priv
, MVPP2_PRS_TCAM_CTRL_REG
, MVPP2_PRS_TCAM_EN_MASK
);
2897 /* Clear all tcam and sram entries */
2898 for (index
= 0; index
< MVPP2_PRS_TCAM_SRAM_SIZE
; index
++) {
2899 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, index
);
2900 for (i
= 0; i
< MVPP2_PRS_TCAM_WORDS
; i
++)
2901 mvpp2_write(priv
, MVPP2_PRS_TCAM_DATA_REG(i
), 0);
2903 mvpp2_write(priv
, MVPP2_PRS_SRAM_IDX_REG
, index
);
2904 for (i
= 0; i
< MVPP2_PRS_SRAM_WORDS
; i
++)
2905 mvpp2_write(priv
, MVPP2_PRS_SRAM_DATA_REG(i
), 0);
2908 /* Invalidate all tcam entries */
2909 for (index
= 0; index
< MVPP2_PRS_TCAM_SRAM_SIZE
; index
++)
2910 mvpp2_prs_hw_inv(priv
, index
);
2912 priv
->prs_shadow
= devm_kcalloc(&pdev
->dev
, MVPP2_PRS_TCAM_SRAM_SIZE
,
2913 sizeof(struct mvpp2_prs_shadow
),
2915 if (!priv
->prs_shadow
)
2918 /* Always start from lookup = 0 */
2919 for (index
= 0; index
< MVPP2_MAX_PORTS
; index
++)
2920 mvpp2_prs_hw_port_init(priv
, index
, MVPP2_PRS_LU_MH
,
2921 MVPP2_PRS_PORT_LU_MAX
, 0);
2923 mvpp2_prs_def_flow_init(priv
);
2925 mvpp2_prs_mh_init(priv
);
2927 mvpp2_prs_mac_init(priv
);
2929 mvpp2_prs_dsa_init(priv
);
2931 err
= mvpp2_prs_etype_init(priv
);
2935 err
= mvpp2_prs_vlan_init(pdev
, priv
);
2939 err
= mvpp2_prs_pppoe_init(priv
);
2943 err
= mvpp2_prs_ip6_init(priv
);
2947 err
= mvpp2_prs_ip4_init(priv
);
2954 /* Compare MAC DA with tcam entry data */
2955 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry
*pe
,
2956 const u8
*da
, unsigned char *mask
)
2958 unsigned char tcam_byte
, tcam_mask
;
2961 for (index
= 0; index
< ETH_ALEN
; index
++) {
2962 mvpp2_prs_tcam_data_byte_get(pe
, index
, &tcam_byte
, &tcam_mask
);
2963 if (tcam_mask
!= mask
[index
])
2966 if ((tcam_mask
& tcam_byte
) != (da
[index
] & mask
[index
]))
2973 /* Find tcam entry with matched pair <MAC DA, port> */
2974 static struct mvpp2_prs_entry
*
2975 mvpp2_prs_mac_da_range_find(struct mvpp2
*priv
, int pmap
, const u8
*da
,
2976 unsigned char *mask
, int udf_type
)
2978 struct mvpp2_prs_entry
*pe
;
2981 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
2984 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_MAC
);
2986 /* Go through the all entires with MVPP2_PRS_LU_MAC */
2987 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
2988 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++) {
2989 unsigned int entry_pmap
;
2991 if (!priv
->prs_shadow
[tid
].valid
||
2992 (priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_MAC
) ||
2993 (priv
->prs_shadow
[tid
].udf
!= udf_type
))
2997 mvpp2_prs_hw_read(priv
, pe
);
2998 entry_pmap
= mvpp2_prs_tcam_port_map_get(pe
);
3000 if (mvpp2_prs_mac_range_equals(pe
, da
, mask
) &&
3009 /* Update parser's mac da entry */
3010 static int mvpp2_prs_mac_da_accept(struct mvpp2
*priv
, int port
,
3011 const u8
*da
, bool add
)
3013 struct mvpp2_prs_entry
*pe
;
3014 unsigned int pmap
, len
, ri
;
3015 unsigned char mask
[ETH_ALEN
] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3018 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3019 pe
= mvpp2_prs_mac_da_range_find(priv
, (1 << port
), da
, mask
,
3020 MVPP2_PRS_UDF_MAC_DEF
);
3027 /* Create new TCAM entry */
3028 /* Find first range mac entry*/
3029 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
3030 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++)
3031 if (priv
->prs_shadow
[tid
].valid
&&
3032 (priv
->prs_shadow
[tid
].lu
== MVPP2_PRS_LU_MAC
) &&
3033 (priv
->prs_shadow
[tid
].udf
==
3034 MVPP2_PRS_UDF_MAC_RANGE
))
3037 /* Go through the all entries from first to last */
3038 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
3043 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
3046 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_MAC
);
3049 /* Mask all ports */
3050 mvpp2_prs_tcam_port_map_set(pe
, 0);
3053 /* Update port mask */
3054 mvpp2_prs_tcam_port_set(pe
, port
, add
);
3056 /* Invalidate the entry if no ports are left enabled */
3057 pmap
= mvpp2_prs_tcam_port_map_get(pe
);
3063 mvpp2_prs_hw_inv(priv
, pe
->index
);
3064 priv
->prs_shadow
[pe
->index
].valid
= false;
3069 /* Continue - set next lookup */
3070 mvpp2_prs_sram_next_lu_set(pe
, MVPP2_PRS_LU_DSA
);
3072 /* Set match on DA */
3075 mvpp2_prs_tcam_data_byte_set(pe
, len
, da
[len
], 0xff);
3077 /* Set result info bits */
3078 if (is_broadcast_ether_addr(da
))
3079 ri
= MVPP2_PRS_RI_L2_BCAST
;
3080 else if (is_multicast_ether_addr(da
))
3081 ri
= MVPP2_PRS_RI_L2_MCAST
;
3083 ri
= MVPP2_PRS_RI_L2_UCAST
| MVPP2_PRS_RI_MAC_ME_MASK
;
3085 mvpp2_prs_sram_ri_update(pe
, ri
, MVPP2_PRS_RI_L2_CAST_MASK
|
3086 MVPP2_PRS_RI_MAC_ME_MASK
);
3087 mvpp2_prs_shadow_ri_set(priv
, pe
->index
, ri
, MVPP2_PRS_RI_L2_CAST_MASK
|
3088 MVPP2_PRS_RI_MAC_ME_MASK
);
3090 /* Shift to ethertype */
3091 mvpp2_prs_sram_shift_set(pe
, 2 * ETH_ALEN
,
3092 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
3094 /* Update shadow table and hw entry */
3095 priv
->prs_shadow
[pe
->index
].udf
= MVPP2_PRS_UDF_MAC_DEF
;
3096 mvpp2_prs_shadow_set(priv
, pe
->index
, MVPP2_PRS_LU_MAC
);
3097 mvpp2_prs_hw_write(priv
, pe
);
3104 static int mvpp2_prs_update_mac_da(struct net_device
*dev
, const u8
*da
)
3106 struct mvpp2_port
*port
= netdev_priv(dev
);
3109 /* Remove old parser entry */
3110 err
= mvpp2_prs_mac_da_accept(port
->priv
, port
->id
, dev
->dev_addr
,
3115 /* Add new parser entry */
3116 err
= mvpp2_prs_mac_da_accept(port
->priv
, port
->id
, da
, true);
3120 /* Set addr in the device */
3121 ether_addr_copy(dev
->dev_addr
, da
);
3126 /* Delete all port's multicast simple (not range) entries */
3127 static void mvpp2_prs_mcast_del_all(struct mvpp2
*priv
, int port
)
3129 struct mvpp2_prs_entry pe
;
3132 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
3133 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++) {
3134 unsigned char da
[ETH_ALEN
], da_mask
[ETH_ALEN
];
3136 if (!priv
->prs_shadow
[tid
].valid
||
3137 (priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_MAC
) ||
3138 (priv
->prs_shadow
[tid
].udf
!= MVPP2_PRS_UDF_MAC_DEF
))
3141 /* Only simple mac entries */
3143 mvpp2_prs_hw_read(priv
, &pe
);
3145 /* Read mac addr from entry */
3146 for (index
= 0; index
< ETH_ALEN
; index
++)
3147 mvpp2_prs_tcam_data_byte_get(&pe
, index
, &da
[index
],
3150 if (is_multicast_ether_addr(da
) && !is_broadcast_ether_addr(da
))
3151 /* Delete this entry */
3152 mvpp2_prs_mac_da_accept(priv
, port
, da
, false);
3156 static int mvpp2_prs_tag_mode_set(struct mvpp2
*priv
, int port
, int type
)
3159 case MVPP2_TAG_TYPE_EDSA
:
3160 /* Add port to EDSA entries */
3161 mvpp2_prs_dsa_tag_set(priv
, port
, true,
3162 MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
3163 mvpp2_prs_dsa_tag_set(priv
, port
, true,
3164 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_EDSA
);
3165 /* Remove port from DSA entries */
3166 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3167 MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
3168 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3169 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_DSA
);
3172 case MVPP2_TAG_TYPE_DSA
:
3173 /* Add port to DSA entries */
3174 mvpp2_prs_dsa_tag_set(priv
, port
, true,
3175 MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
3176 mvpp2_prs_dsa_tag_set(priv
, port
, true,
3177 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_DSA
);
3178 /* Remove port from EDSA entries */
3179 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3180 MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
3181 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3182 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_EDSA
);
3185 case MVPP2_TAG_TYPE_MH
:
3186 case MVPP2_TAG_TYPE_NONE
:
3187 /* Remove port form EDSA and DSA entries */
3188 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3189 MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
3190 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3191 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_DSA
);
3192 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3193 MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
3194 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3195 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_EDSA
);
3199 if ((type
< 0) || (type
> MVPP2_TAG_TYPE_EDSA
))
3206 /* Set prs flow for the port */
3207 static int mvpp2_prs_def_flow(struct mvpp2_port
*port
)
3209 struct mvpp2_prs_entry
*pe
;
3212 pe
= mvpp2_prs_flow_find(port
->priv
, port
->id
);
3214 /* Such entry not exist */
3216 /* Go through the all entires from last to first */
3217 tid
= mvpp2_prs_tcam_first_free(port
->priv
,
3218 MVPP2_PE_LAST_FREE_TID
,
3219 MVPP2_PE_FIRST_FREE_TID
);
3223 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
3227 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_FLOWS
);
3231 mvpp2_prs_sram_ai_update(pe
, port
->id
, MVPP2_PRS_FLOW_ID_MASK
);
3232 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_LU_DONE_BIT
, 1);
3234 /* Update shadow table */
3235 mvpp2_prs_shadow_set(port
->priv
, pe
->index
, MVPP2_PRS_LU_FLOWS
);
3238 mvpp2_prs_tcam_port_map_set(pe
, (1 << port
->id
));
3239 mvpp2_prs_hw_write(port
->priv
, pe
);
3245 /* Classifier configuration routines */
3247 /* Update classification flow table registers */
3248 static void mvpp2_cls_flow_write(struct mvpp2
*priv
,
3249 struct mvpp2_cls_flow_entry
*fe
)
3251 mvpp2_write(priv
, MVPP2_CLS_FLOW_INDEX_REG
, fe
->index
);
3252 mvpp2_write(priv
, MVPP2_CLS_FLOW_TBL0_REG
, fe
->data
[0]);
3253 mvpp2_write(priv
, MVPP2_CLS_FLOW_TBL1_REG
, fe
->data
[1]);
3254 mvpp2_write(priv
, MVPP2_CLS_FLOW_TBL2_REG
, fe
->data
[2]);
3257 /* Update classification lookup table register */
3258 static void mvpp2_cls_lookup_write(struct mvpp2
*priv
,
3259 struct mvpp2_cls_lookup_entry
*le
)
3263 val
= (le
->way
<< MVPP2_CLS_LKP_INDEX_WAY_OFFS
) | le
->lkpid
;
3264 mvpp2_write(priv
, MVPP2_CLS_LKP_INDEX_REG
, val
);
3265 mvpp2_write(priv
, MVPP2_CLS_LKP_TBL_REG
, le
->data
);
3268 /* Classifier default initialization */
3269 static void mvpp2_cls_init(struct mvpp2
*priv
)
3271 struct mvpp2_cls_lookup_entry le
;
3272 struct mvpp2_cls_flow_entry fe
;
3275 /* Enable classifier */
3276 mvpp2_write(priv
, MVPP2_CLS_MODE_REG
, MVPP2_CLS_MODE_ACTIVE_MASK
);
3278 /* Clear classifier flow table */
3279 memset(&fe
.data
, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS
);
3280 for (index
= 0; index
< MVPP2_CLS_FLOWS_TBL_SIZE
; index
++) {
3282 mvpp2_cls_flow_write(priv
, &fe
);
3285 /* Clear classifier lookup table */
3287 for (index
= 0; index
< MVPP2_CLS_LKP_TBL_SIZE
; index
++) {
3290 mvpp2_cls_lookup_write(priv
, &le
);
3293 mvpp2_cls_lookup_write(priv
, &le
);
3297 static void mvpp2_cls_port_config(struct mvpp2_port
*port
)
3299 struct mvpp2_cls_lookup_entry le
;
3302 /* Set way for the port */
3303 val
= mvpp2_read(port
->priv
, MVPP2_CLS_PORT_WAY_REG
);
3304 val
&= ~MVPP2_CLS_PORT_WAY_MASK(port
->id
);
3305 mvpp2_write(port
->priv
, MVPP2_CLS_PORT_WAY_REG
, val
);
3307 /* Pick the entry to be accessed in lookup ID decoding table
3308 * according to the way and lkpid.
3310 le
.lkpid
= port
->id
;
3314 /* Set initial CPU queue for receiving packets */
3315 le
.data
&= ~MVPP2_CLS_LKP_TBL_RXQ_MASK
;
3316 le
.data
|= port
->first_rxq
;
3318 /* Disable classification engines */
3319 le
.data
&= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK
;
3321 /* Update lookup ID table entry */
3322 mvpp2_cls_lookup_write(port
->priv
, &le
);
3325 /* Set CPU queue number for oversize packets */
3326 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port
*port
)
3330 mvpp2_write(port
->priv
, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port
->id
),
3331 port
->first_rxq
& MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK
);
3333 mvpp2_write(port
->priv
, MVPP2_CLS_SWFWD_P2HQ_REG(port
->id
),
3334 (port
->first_rxq
>> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS
));
3336 val
= mvpp2_read(port
->priv
, MVPP2_CLS_SWFWD_PCTRL_REG
);
3337 val
|= MVPP2_CLS_SWFWD_PCTRL_MASK(port
->id
);
3338 mvpp2_write(port
->priv
, MVPP2_CLS_SWFWD_PCTRL_REG
, val
);
3341 /* Buffer Manager configuration routines */
3344 static int mvpp2_bm_pool_create(struct platform_device
*pdev
,
3346 struct mvpp2_bm_pool
*bm_pool
, int size
)
3351 size_bytes
= sizeof(u32
) * size
;
3352 bm_pool
->virt_addr
= dma_alloc_coherent(&pdev
->dev
, size_bytes
,
3353 &bm_pool
->phys_addr
,
3355 if (!bm_pool
->virt_addr
)
3358 if (!IS_ALIGNED((u32
)bm_pool
->virt_addr
, MVPP2_BM_POOL_PTR_ALIGN
)) {
3359 dma_free_coherent(&pdev
->dev
, size_bytes
, bm_pool
->virt_addr
,
3360 bm_pool
->phys_addr
);
3361 dev_err(&pdev
->dev
, "BM pool %d is not %d bytes aligned\n",
3362 bm_pool
->id
, MVPP2_BM_POOL_PTR_ALIGN
);
3366 mvpp2_write(priv
, MVPP2_BM_POOL_BASE_REG(bm_pool
->id
),
3367 bm_pool
->phys_addr
);
3368 mvpp2_write(priv
, MVPP2_BM_POOL_SIZE_REG(bm_pool
->id
), size
);
3370 val
= mvpp2_read(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
));
3371 val
|= MVPP2_BM_START_MASK
;
3372 mvpp2_write(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
), val
);
3374 bm_pool
->type
= MVPP2_BM_FREE
;
3375 bm_pool
->size
= size
;
3376 bm_pool
->pkt_size
= 0;
3377 bm_pool
->buf_num
= 0;
3378 atomic_set(&bm_pool
->in_use
, 0);
3379 spin_lock_init(&bm_pool
->lock
);
3384 /* Set pool buffer size */
3385 static void mvpp2_bm_pool_bufsize_set(struct mvpp2
*priv
,
3386 struct mvpp2_bm_pool
*bm_pool
,
3391 bm_pool
->buf_size
= buf_size
;
3393 val
= ALIGN(buf_size
, 1 << MVPP2_POOL_BUF_SIZE_OFFSET
);
3394 mvpp2_write(priv
, MVPP2_POOL_BUF_SIZE_REG(bm_pool
->id
), val
);
3397 /* Free all buffers from the pool */
3398 static void mvpp2_bm_bufs_free(struct mvpp2
*priv
, struct mvpp2_bm_pool
*bm_pool
)
3402 for (i
= 0; i
< bm_pool
->buf_num
; i
++) {
3405 /* Get buffer virtual address (indirect access) */
3406 mvpp2_read(priv
, MVPP2_BM_PHY_ALLOC_REG(bm_pool
->id
));
3407 vaddr
= mvpp2_read(priv
, MVPP2_BM_VIRT_ALLOC_REG
);
3410 dev_kfree_skb_any((struct sk_buff
*)vaddr
);
3413 /* Update BM driver with number of buffers removed from pool */
3414 bm_pool
->buf_num
-= i
;
3418 static int mvpp2_bm_pool_destroy(struct platform_device
*pdev
,
3420 struct mvpp2_bm_pool
*bm_pool
)
3424 mvpp2_bm_bufs_free(priv
, bm_pool
);
3425 if (bm_pool
->buf_num
) {
3426 WARN(1, "cannot free all buffers in pool %d\n", bm_pool
->id
);
3430 val
= mvpp2_read(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
));
3431 val
|= MVPP2_BM_STOP_MASK
;
3432 mvpp2_write(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
), val
);
3434 dma_free_coherent(&pdev
->dev
, sizeof(u32
) * bm_pool
->size
,
3436 bm_pool
->phys_addr
);
3440 static int mvpp2_bm_pools_init(struct platform_device
*pdev
,
3444 struct mvpp2_bm_pool
*bm_pool
;
3446 /* Create all pools with maximum size */
3447 size
= MVPP2_BM_POOL_SIZE_MAX
;
3448 for (i
= 0; i
< MVPP2_BM_POOLS_NUM
; i
++) {
3449 bm_pool
= &priv
->bm_pools
[i
];
3451 err
= mvpp2_bm_pool_create(pdev
, priv
, bm_pool
, size
);
3453 goto err_unroll_pools
;
3454 mvpp2_bm_pool_bufsize_set(priv
, bm_pool
, 0);
3459 dev_err(&pdev
->dev
, "failed to create BM pool %d, size %d\n", i
, size
);
3460 for (i
= i
- 1; i
>= 0; i
--)
3461 mvpp2_bm_pool_destroy(pdev
, priv
, &priv
->bm_pools
[i
]);
3465 static int mvpp2_bm_init(struct platform_device
*pdev
, struct mvpp2
*priv
)
3469 for (i
= 0; i
< MVPP2_BM_POOLS_NUM
; i
++) {
3470 /* Mask BM all interrupts */
3471 mvpp2_write(priv
, MVPP2_BM_INTR_MASK_REG(i
), 0);
3472 /* Clear BM cause register */
3473 mvpp2_write(priv
, MVPP2_BM_INTR_CAUSE_REG(i
), 0);
3476 /* Allocate and initialize BM pools */
3477 priv
->bm_pools
= devm_kcalloc(&pdev
->dev
, MVPP2_BM_POOLS_NUM
,
3478 sizeof(struct mvpp2_bm_pool
), GFP_KERNEL
);
3479 if (!priv
->bm_pools
)
3482 err
= mvpp2_bm_pools_init(pdev
, priv
);
3488 /* Attach long pool to rxq */
3489 static void mvpp2_rxq_long_pool_set(struct mvpp2_port
*port
,
3490 int lrxq
, int long_pool
)
3495 /* Get queue physical ID */
3496 prxq
= port
->rxqs
[lrxq
]->id
;
3498 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
));
3499 val
&= ~MVPP2_RXQ_POOL_LONG_MASK
;
3500 val
|= ((long_pool
<< MVPP2_RXQ_POOL_LONG_OFFS
) &
3501 MVPP2_RXQ_POOL_LONG_MASK
);
3503 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
), val
);
3506 /* Attach short pool to rxq */
3507 static void mvpp2_rxq_short_pool_set(struct mvpp2_port
*port
,
3508 int lrxq
, int short_pool
)
3513 /* Get queue physical ID */
3514 prxq
= port
->rxqs
[lrxq
]->id
;
3516 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
));
3517 val
&= ~MVPP2_RXQ_POOL_SHORT_MASK
;
3518 val
|= ((short_pool
<< MVPP2_RXQ_POOL_SHORT_OFFS
) &
3519 MVPP2_RXQ_POOL_SHORT_MASK
);
3521 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
), val
);
3524 /* Allocate skb for BM pool */
3525 static struct sk_buff
*mvpp2_skb_alloc(struct mvpp2_port
*port
,
3526 struct mvpp2_bm_pool
*bm_pool
,
3527 dma_addr_t
*buf_phys_addr
,
3530 struct sk_buff
*skb
;
3531 dma_addr_t phys_addr
;
3533 skb
= __dev_alloc_skb(bm_pool
->pkt_size
, gfp_mask
);
3537 phys_addr
= dma_map_single(port
->dev
->dev
.parent
, skb
->head
,
3538 MVPP2_RX_BUF_SIZE(bm_pool
->pkt_size
),
3540 if (unlikely(dma_mapping_error(port
->dev
->dev
.parent
, phys_addr
))) {
3541 dev_kfree_skb_any(skb
);
3544 *buf_phys_addr
= phys_addr
;
3549 /* Set pool number in a BM cookie */
3550 static inline u32
mvpp2_bm_cookie_pool_set(u32 cookie
, int pool
)
3554 bm
= cookie
& ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS
);
3555 bm
|= ((pool
& 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS
);
3560 /* Get pool number from a BM cookie */
3561 static inline int mvpp2_bm_cookie_pool_get(u32 cookie
)
3563 return (cookie
>> MVPP2_BM_COOKIE_POOL_OFFS
) & 0xFF;
3566 /* Release buffer to BM */
3567 static inline void mvpp2_bm_pool_put(struct mvpp2_port
*port
, int pool
,
3568 u32 buf_phys_addr
, u32 buf_virt_addr
)
3570 mvpp2_write(port
->priv
, MVPP2_BM_VIRT_RLS_REG
, buf_virt_addr
);
3571 mvpp2_write(port
->priv
, MVPP2_BM_PHY_RLS_REG(pool
), buf_phys_addr
);
3574 /* Release multicast buffer */
3575 static void mvpp2_bm_pool_mc_put(struct mvpp2_port
*port
, int pool
,
3576 u32 buf_phys_addr
, u32 buf_virt_addr
,
3581 val
|= (mc_id
& MVPP2_BM_MC_ID_MASK
);
3582 mvpp2_write(port
->priv
, MVPP2_BM_MC_RLS_REG
, val
);
3584 mvpp2_bm_pool_put(port
, pool
,
3585 buf_phys_addr
| MVPP2_BM_PHY_RLS_MC_BUFF_MASK
,
3589 /* Refill BM pool */
3590 static void mvpp2_pool_refill(struct mvpp2_port
*port
, u32 bm
,
3591 u32 phys_addr
, u32 cookie
)
3593 int pool
= mvpp2_bm_cookie_pool_get(bm
);
3595 mvpp2_bm_pool_put(port
, pool
, phys_addr
, cookie
);
3598 /* Allocate buffers for the pool */
3599 static int mvpp2_bm_bufs_add(struct mvpp2_port
*port
,
3600 struct mvpp2_bm_pool
*bm_pool
, int buf_num
)
3602 struct sk_buff
*skb
;
3603 int i
, buf_size
, total_size
;
3605 dma_addr_t phys_addr
;
3607 buf_size
= MVPP2_RX_BUF_SIZE(bm_pool
->pkt_size
);
3608 total_size
= MVPP2_RX_TOTAL_SIZE(buf_size
);
3611 (buf_num
+ bm_pool
->buf_num
> bm_pool
->size
)) {
3612 netdev_err(port
->dev
,
3613 "cannot allocate %d buffers for pool %d\n",
3614 buf_num
, bm_pool
->id
);
3618 bm
= mvpp2_bm_cookie_pool_set(0, bm_pool
->id
);
3619 for (i
= 0; i
< buf_num
; i
++) {
3620 skb
= mvpp2_skb_alloc(port
, bm_pool
, &phys_addr
, GFP_KERNEL
);
3624 mvpp2_pool_refill(port
, bm
, (u32
)phys_addr
, (u32
)skb
);
3627 /* Update BM driver with number of buffers added to pool */
3628 bm_pool
->buf_num
+= i
;
3629 bm_pool
->in_use_thresh
= bm_pool
->buf_num
/ 4;
3631 netdev_dbg(port
->dev
,
3632 "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
3633 bm_pool
->type
== MVPP2_BM_SWF_SHORT
? "short" : " long",
3634 bm_pool
->id
, bm_pool
->pkt_size
, buf_size
, total_size
);
3636 netdev_dbg(port
->dev
,
3637 "%s pool %d: %d of %d buffers added\n",
3638 bm_pool
->type
== MVPP2_BM_SWF_SHORT
? "short" : " long",
3639 bm_pool
->id
, i
, buf_num
);
3643 /* Notify the driver that BM pool is being used as specific type and return the
3644 * pool pointer on success
3646 static struct mvpp2_bm_pool
*
3647 mvpp2_bm_pool_use(struct mvpp2_port
*port
, int pool
, enum mvpp2_bm_type type
,
3650 unsigned long flags
= 0;
3651 struct mvpp2_bm_pool
*new_pool
= &port
->priv
->bm_pools
[pool
];
3654 if (new_pool
->type
!= MVPP2_BM_FREE
&& new_pool
->type
!= type
) {
3655 netdev_err(port
->dev
, "mixing pool types is forbidden\n");
3659 spin_lock_irqsave(&new_pool
->lock
, flags
);
3661 if (new_pool
->type
== MVPP2_BM_FREE
)
3662 new_pool
->type
= type
;
3664 /* Allocate buffers in case BM pool is used as long pool, but packet
3665 * size doesn't match MTU or BM pool hasn't being used yet
3667 if (((type
== MVPP2_BM_SWF_LONG
) && (pkt_size
> new_pool
->pkt_size
)) ||
3668 (new_pool
->pkt_size
== 0)) {
3671 /* Set default buffer number or free all the buffers in case
3672 * the pool is not empty
3674 pkts_num
= new_pool
->buf_num
;
3676 pkts_num
= type
== MVPP2_BM_SWF_LONG
?
3677 MVPP2_BM_LONG_BUF_NUM
:
3678 MVPP2_BM_SHORT_BUF_NUM
;
3680 mvpp2_bm_bufs_free(port
->priv
, new_pool
);
3682 new_pool
->pkt_size
= pkt_size
;
3684 /* Allocate buffers for this pool */
3685 num
= mvpp2_bm_bufs_add(port
, new_pool
, pkts_num
);
3686 if (num
!= pkts_num
) {
3687 WARN(1, "pool %d: %d of %d allocated\n",
3688 new_pool
->id
, num
, pkts_num
);
3689 /* We need to undo the bufs_add() allocations */
3690 spin_unlock_irqrestore(&new_pool
->lock
, flags
);
3695 mvpp2_bm_pool_bufsize_set(port
->priv
, new_pool
,
3696 MVPP2_RX_BUF_SIZE(new_pool
->pkt_size
));
3698 spin_unlock_irqrestore(&new_pool
->lock
, flags
);
3703 /* Initialize pools for swf */
3704 static int mvpp2_swf_bm_pool_init(struct mvpp2_port
*port
)
3706 unsigned long flags
= 0;
3709 if (!port
->pool_long
) {
3711 mvpp2_bm_pool_use(port
, MVPP2_BM_SWF_LONG_POOL(port
->id
),
3714 if (!port
->pool_long
)
3717 spin_lock_irqsave(&port
->pool_long
->lock
, flags
);
3718 port
->pool_long
->port_map
|= (1 << port
->id
);
3719 spin_unlock_irqrestore(&port
->pool_long
->lock
, flags
);
3721 for (rxq
= 0; rxq
< rxq_number
; rxq
++)
3722 mvpp2_rxq_long_pool_set(port
, rxq
, port
->pool_long
->id
);
3725 if (!port
->pool_short
) {
3727 mvpp2_bm_pool_use(port
, MVPP2_BM_SWF_SHORT_POOL
,
3729 MVPP2_BM_SHORT_PKT_SIZE
);
3730 if (!port
->pool_short
)
3733 spin_lock_irqsave(&port
->pool_short
->lock
, flags
);
3734 port
->pool_short
->port_map
|= (1 << port
->id
);
3735 spin_unlock_irqrestore(&port
->pool_short
->lock
, flags
);
3737 for (rxq
= 0; rxq
< rxq_number
; rxq
++)
3738 mvpp2_rxq_short_pool_set(port
, rxq
,
3739 port
->pool_short
->id
);
3745 static int mvpp2_bm_update_mtu(struct net_device
*dev
, int mtu
)
3747 struct mvpp2_port
*port
= netdev_priv(dev
);
3748 struct mvpp2_bm_pool
*port_pool
= port
->pool_long
;
3749 int num
, pkts_num
= port_pool
->buf_num
;
3750 int pkt_size
= MVPP2_RX_PKT_SIZE(mtu
);
3752 /* Update BM pool with new buffer size */
3753 mvpp2_bm_bufs_free(port
->priv
, port_pool
);
3754 if (port_pool
->buf_num
) {
3755 WARN(1, "cannot free all buffers in pool %d\n", port_pool
->id
);
3759 port_pool
->pkt_size
= pkt_size
;
3760 num
= mvpp2_bm_bufs_add(port
, port_pool
, pkts_num
);
3761 if (num
!= pkts_num
) {
3762 WARN(1, "pool %d: %d of %d allocated\n",
3763 port_pool
->id
, num
, pkts_num
);
3767 mvpp2_bm_pool_bufsize_set(port
->priv
, port_pool
,
3768 MVPP2_RX_BUF_SIZE(port_pool
->pkt_size
));
3770 netdev_update_features(dev
);
3774 static inline void mvpp2_interrupts_enable(struct mvpp2_port
*port
)
3776 int cpu
, cpu_mask
= 0;
3778 for_each_present_cpu(cpu
)
3779 cpu_mask
|= 1 << cpu
;
3780 mvpp2_write(port
->priv
, MVPP2_ISR_ENABLE_REG(port
->id
),
3781 MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask
));
3784 static inline void mvpp2_interrupts_disable(struct mvpp2_port
*port
)
3786 int cpu
, cpu_mask
= 0;
3788 for_each_present_cpu(cpu
)
3789 cpu_mask
|= 1 << cpu
;
3790 mvpp2_write(port
->priv
, MVPP2_ISR_ENABLE_REG(port
->id
),
3791 MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask
));
3794 /* Mask the current CPU's Rx/Tx interrupts */
3795 static void mvpp2_interrupts_mask(void *arg
)
3797 struct mvpp2_port
*port
= arg
;
3799 mvpp2_write(port
->priv
, MVPP2_ISR_RX_TX_MASK_REG(port
->id
), 0);
3802 /* Unmask the current CPU's Rx/Tx interrupts */
3803 static void mvpp2_interrupts_unmask(void *arg
)
3805 struct mvpp2_port
*port
= arg
;
3807 mvpp2_write(port
->priv
, MVPP2_ISR_RX_TX_MASK_REG(port
->id
),
3808 (MVPP2_CAUSE_MISC_SUM_MASK
|
3809 MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK
|
3810 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK
));
3813 /* Port configuration routines */
3815 static void mvpp2_port_mii_set(struct mvpp2_port
*port
)
3819 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
3821 switch (port
->phy_interface
) {
3822 case PHY_INTERFACE_MODE_SGMII
:
3823 val
|= MVPP2_GMAC_INBAND_AN_MASK
;
3825 case PHY_INTERFACE_MODE_RGMII
:
3826 val
|= MVPP2_GMAC_PORT_RGMII_MASK
;
3828 val
&= ~MVPP2_GMAC_PCS_ENABLE_MASK
;
3831 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
3834 static void mvpp2_port_fc_adv_enable(struct mvpp2_port
*port
)
3838 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
3839 val
|= MVPP2_GMAC_FC_ADV_EN
;
3840 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
3843 static void mvpp2_port_enable(struct mvpp2_port
*port
)
3847 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
3848 val
|= MVPP2_GMAC_PORT_EN_MASK
;
3849 val
|= MVPP2_GMAC_MIB_CNTR_EN_MASK
;
3850 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
3853 static void mvpp2_port_disable(struct mvpp2_port
*port
)
3857 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
3858 val
&= ~(MVPP2_GMAC_PORT_EN_MASK
);
3859 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
3862 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
3863 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port
*port
)
3867 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_1_REG
) &
3868 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK
;
3869 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
3872 /* Configure loopback port */
3873 static void mvpp2_port_loopback_set(struct mvpp2_port
*port
)
3877 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
3879 if (port
->speed
== 1000)
3880 val
|= MVPP2_GMAC_GMII_LB_EN_MASK
;
3882 val
&= ~MVPP2_GMAC_GMII_LB_EN_MASK
;
3884 if (port
->phy_interface
== PHY_INTERFACE_MODE_SGMII
)
3885 val
|= MVPP2_GMAC_PCS_LB_EN_MASK
;
3887 val
&= ~MVPP2_GMAC_PCS_LB_EN_MASK
;
3889 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
3892 static void mvpp2_port_reset(struct mvpp2_port
*port
)
3896 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
) &
3897 ~MVPP2_GMAC_PORT_RESET_MASK
;
3898 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
3900 while (readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
) &
3901 MVPP2_GMAC_PORT_RESET_MASK
)
3905 /* Change maximum receive size of the port */
3906 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port
*port
)
3910 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
3911 val
&= ~MVPP2_GMAC_MAX_RX_SIZE_MASK
;
3912 val
|= (((port
->pkt_size
- MVPP2_MH_SIZE
) / 2) <<
3913 MVPP2_GMAC_MAX_RX_SIZE_OFFS
);
3914 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
3917 /* Set defaults to the MVPP2 port */
3918 static void mvpp2_defaults_set(struct mvpp2_port
*port
)
3920 int tx_port_num
, val
, queue
, ptxq
, lrxq
;
3922 /* Configure port to loopback if needed */
3923 if (port
->flags
& MVPP2_F_LOOPBACK
)
3924 mvpp2_port_loopback_set(port
);
3926 /* Update TX FIFO MIN Threshold */
3927 val
= readl(port
->base
+ MVPP2_GMAC_PORT_FIFO_CFG_1_REG
);
3928 val
&= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK
;
3929 /* Min. TX threshold must be less than minimal packet length */
3930 val
|= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
3931 writel(val
, port
->base
+ MVPP2_GMAC_PORT_FIFO_CFG_1_REG
);
3933 /* Disable Legacy WRR, Disable EJP, Release from reset */
3934 tx_port_num
= mvpp2_egress_port(port
);
3935 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
,
3937 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_CMD_1_REG
, 0);
3939 /* Close bandwidth for all queues */
3940 for (queue
= 0; queue
< MVPP2_MAX_TXQ
; queue
++) {
3941 ptxq
= mvpp2_txq_phys(port
->id
, queue
);
3942 mvpp2_write(port
->priv
,
3943 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq
), 0);
3946 /* Set refill period to 1 usec, refill tokens
3947 * and bucket size to maximum
3949 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PERIOD_REG
,
3950 port
->priv
->tclk
/ USEC_PER_SEC
);
3951 val
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_REFILL_REG
);
3952 val
&= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK
;
3953 val
|= MVPP2_TXP_REFILL_PERIOD_MASK(1);
3954 val
|= MVPP2_TXP_REFILL_TOKENS_ALL_MASK
;
3955 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_REFILL_REG
, val
);
3956 val
= MVPP2_TXP_TOKEN_SIZE_MAX
;
3957 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_TOKEN_SIZE_REG
, val
);
3959 /* Set MaximumLowLatencyPacketSize value to 256 */
3960 mvpp2_write(port
->priv
, MVPP2_RX_CTRL_REG(port
->id
),
3961 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK
|
3962 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
3964 /* Enable Rx cache snoop */
3965 for (lrxq
= 0; lrxq
< rxq_number
; lrxq
++) {
3966 queue
= port
->rxqs
[lrxq
]->id
;
3967 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
));
3968 val
|= MVPP2_SNOOP_PKT_SIZE_MASK
|
3969 MVPP2_SNOOP_BUF_HDR_MASK
;
3970 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
), val
);
3973 /* At default, mask all interrupts to all present cpus */
3974 mvpp2_interrupts_disable(port
);
3977 /* Enable/disable receiving packets */
3978 static void mvpp2_ingress_enable(struct mvpp2_port
*port
)
3983 for (lrxq
= 0; lrxq
< rxq_number
; lrxq
++) {
3984 queue
= port
->rxqs
[lrxq
]->id
;
3985 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
));
3986 val
&= ~MVPP2_RXQ_DISABLE_MASK
;
3987 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
), val
);
3991 static void mvpp2_ingress_disable(struct mvpp2_port
*port
)
3996 for (lrxq
= 0; lrxq
< rxq_number
; lrxq
++) {
3997 queue
= port
->rxqs
[lrxq
]->id
;
3998 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
));
3999 val
|= MVPP2_RXQ_DISABLE_MASK
;
4000 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
), val
);
4004 /* Enable transmit via physical egress queue
4005 * - HW starts take descriptors from DRAM
4007 static void mvpp2_egress_enable(struct mvpp2_port
*port
)
4011 int tx_port_num
= mvpp2_egress_port(port
);
4013 /* Enable all initialized TXs. */
4015 for (queue
= 0; queue
< txq_number
; queue
++) {
4016 struct mvpp2_tx_queue
*txq
= port
->txqs
[queue
];
4018 if (txq
->descs
!= NULL
)
4019 qmap
|= (1 << queue
);
4022 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
4023 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
, qmap
);
4026 /* Disable transmit via physical egress queue
4027 * - HW doesn't take descriptors from DRAM
4029 static void mvpp2_egress_disable(struct mvpp2_port
*port
)
4033 int tx_port_num
= mvpp2_egress_port(port
);
4035 /* Issue stop command for active channels only */
4036 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
4037 reg_data
= (mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
)) &
4038 MVPP2_TXP_SCHED_ENQ_MASK
;
4040 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
,
4041 (reg_data
<< MVPP2_TXP_SCHED_DISQ_OFFSET
));
4043 /* Wait for all Tx activity to terminate. */
4046 if (delay
>= MVPP2_TX_DISABLE_TIMEOUT_MSEC
) {
4047 netdev_warn(port
->dev
,
4048 "Tx stop timed out, status=0x%08x\n",
4055 /* Check port TX Command register that all
4056 * Tx queues are stopped
4058 reg_data
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
);
4059 } while (reg_data
& MVPP2_TXP_SCHED_ENQ_MASK
);
4062 /* Rx descriptors helper methods */
4064 /* Get number of Rx descriptors occupied by received packets */
4066 mvpp2_rxq_received(struct mvpp2_port
*port
, int rxq_id
)
4068 u32 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_STATUS_REG(rxq_id
));
4070 return val
& MVPP2_RXQ_OCCUPIED_MASK
;
4073 /* Update Rx queue status with the number of occupied and available
4074 * Rx descriptor slots.
4077 mvpp2_rxq_status_update(struct mvpp2_port
*port
, int rxq_id
,
4078 int used_count
, int free_count
)
4080 /* Decrement the number of used descriptors and increment count
4081 * increment the number of free descriptors.
4083 u32 val
= used_count
| (free_count
<< MVPP2_RXQ_NUM_NEW_OFFSET
);
4085 mvpp2_write(port
->priv
, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id
), val
);
4088 /* Get pointer to next RX descriptor to be processed by SW */
4089 static inline struct mvpp2_rx_desc
*
4090 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue
*rxq
)
4092 int rx_desc
= rxq
->next_desc_to_proc
;
4094 rxq
->next_desc_to_proc
= MVPP2_QUEUE_NEXT_DESC(rxq
, rx_desc
);
4095 prefetch(rxq
->descs
+ rxq
->next_desc_to_proc
);
4096 return rxq
->descs
+ rx_desc
;
4099 /* Set rx queue offset */
4100 static void mvpp2_rxq_offset_set(struct mvpp2_port
*port
,
4101 int prxq
, int offset
)
4105 /* Convert offset from bytes to units of 32 bytes */
4106 offset
= offset
>> 5;
4108 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
));
4109 val
&= ~MVPP2_RXQ_PACKET_OFFSET_MASK
;
4112 val
|= ((offset
<< MVPP2_RXQ_PACKET_OFFSET_OFFS
) &
4113 MVPP2_RXQ_PACKET_OFFSET_MASK
);
4115 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
), val
);
4118 /* Obtain BM cookie information from descriptor */
4119 static u32
mvpp2_bm_cookie_build(struct mvpp2_rx_desc
*rx_desc
)
4121 int pool
= (rx_desc
->status
& MVPP2_RXD_BM_POOL_ID_MASK
) >>
4122 MVPP2_RXD_BM_POOL_ID_OFFS
;
4123 int cpu
= smp_processor_id();
4125 return ((pool
& 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS
) |
4126 ((cpu
& 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS
);
4129 /* Tx descriptors helper methods */
4131 /* Get number of Tx descriptors waiting to be transmitted by HW */
4132 static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port
*port
,
4133 struct mvpp2_tx_queue
*txq
)
4137 mvpp2_write(port
->priv
, MVPP2_TXQ_NUM_REG
, txq
->id
);
4138 val
= mvpp2_read(port
->priv
, MVPP2_TXQ_PENDING_REG
);
4140 return val
& MVPP2_TXQ_PENDING_MASK
;
4143 /* Get pointer to next Tx descriptor to be processed (send) by HW */
4144 static struct mvpp2_tx_desc
*
4145 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue
*txq
)
4147 int tx_desc
= txq
->next_desc_to_proc
;
4149 txq
->next_desc_to_proc
= MVPP2_QUEUE_NEXT_DESC(txq
, tx_desc
);
4150 return txq
->descs
+ tx_desc
;
4153 /* Update HW with number of aggregated Tx descriptors to be sent */
4154 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port
*port
, int pending
)
4156 /* aggregated access - relevant TXQ number is written in TX desc */
4157 mvpp2_write(port
->priv
, MVPP2_AGGR_TXQ_UPDATE_REG
, pending
);
4161 /* Check if there are enough free descriptors in aggregated txq.
4162 * If not, update the number of occupied descriptors and repeat the check.
4164 static int mvpp2_aggr_desc_num_check(struct mvpp2
*priv
,
4165 struct mvpp2_tx_queue
*aggr_txq
, int num
)
4167 if ((aggr_txq
->count
+ num
) > aggr_txq
->size
) {
4168 /* Update number of occupied aggregated Tx descriptors */
4169 int cpu
= smp_processor_id();
4170 u32 val
= mvpp2_read(priv
, MVPP2_AGGR_TXQ_STATUS_REG(cpu
));
4172 aggr_txq
->count
= val
& MVPP2_AGGR_TXQ_PENDING_MASK
;
4175 if ((aggr_txq
->count
+ num
) > aggr_txq
->size
)
4181 /* Reserved Tx descriptors allocation request */
4182 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2
*priv
,
4183 struct mvpp2_tx_queue
*txq
, int num
)
4187 val
= (txq
->id
<< MVPP2_TXQ_RSVD_REQ_Q_OFFSET
) | num
;
4188 mvpp2_write(priv
, MVPP2_TXQ_RSVD_REQ_REG
, val
);
4190 val
= mvpp2_read(priv
, MVPP2_TXQ_RSVD_RSLT_REG
);
4192 return val
& MVPP2_TXQ_RSVD_RSLT_MASK
;
4195 /* Check if there are enough reserved descriptors for transmission.
4196 * If not, request chunk of reserved descriptors and check again.
4198 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2
*priv
,
4199 struct mvpp2_tx_queue
*txq
,
4200 struct mvpp2_txq_pcpu
*txq_pcpu
,
4203 int req
, cpu
, desc_count
;
4205 if (txq_pcpu
->reserved_num
>= num
)
4208 /* Not enough descriptors reserved! Update the reserved descriptor
4209 * count and check again.
4213 /* Compute total of used descriptors */
4214 for_each_present_cpu(cpu
) {
4215 struct mvpp2_txq_pcpu
*txq_pcpu_aux
;
4217 txq_pcpu_aux
= per_cpu_ptr(txq
->pcpu
, cpu
);
4218 desc_count
+= txq_pcpu_aux
->count
;
4219 desc_count
+= txq_pcpu_aux
->reserved_num
;
4222 req
= max(MVPP2_CPU_DESC_CHUNK
, num
- txq_pcpu
->reserved_num
);
4226 (txq
->size
- (num_present_cpus() * MVPP2_CPU_DESC_CHUNK
)))
4229 txq_pcpu
->reserved_num
+= mvpp2_txq_alloc_reserved_desc(priv
, txq
, req
);
4231 /* OK, the descriptor cound has been updated: check again. */
4232 if (txq_pcpu
->reserved_num
< num
)
4237 /* Release the last allocated Tx descriptor. Useful to handle DMA
4238 * mapping failures in the Tx path.
4240 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue
*txq
)
4242 if (txq
->next_desc_to_proc
== 0)
4243 txq
->next_desc_to_proc
= txq
->last_desc
- 1;
4245 txq
->next_desc_to_proc
--;
4248 /* Set Tx descriptors fields relevant for CSUM calculation */
4249 static u32
mvpp2_txq_desc_csum(int l3_offs
, int l3_proto
,
4250 int ip_hdr_len
, int l4_proto
)
4254 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
4255 * G_L4_chk, L4_type required only for checksum calculation
4257 command
= (l3_offs
<< MVPP2_TXD_L3_OFF_SHIFT
);
4258 command
|= (ip_hdr_len
<< MVPP2_TXD_IP_HLEN_SHIFT
);
4259 command
|= MVPP2_TXD_IP_CSUM_DISABLE
;
4261 if (l3_proto
== swab16(ETH_P_IP
)) {
4262 command
&= ~MVPP2_TXD_IP_CSUM_DISABLE
; /* enable IPv4 csum */
4263 command
&= ~MVPP2_TXD_L3_IP6
; /* enable IPv4 */
4265 command
|= MVPP2_TXD_L3_IP6
; /* enable IPv6 */
4268 if (l4_proto
== IPPROTO_TCP
) {
4269 command
&= ~MVPP2_TXD_L4_UDP
; /* enable TCP */
4270 command
&= ~MVPP2_TXD_L4_CSUM_FRAG
; /* generate L4 csum */
4271 } else if (l4_proto
== IPPROTO_UDP
) {
4272 command
|= MVPP2_TXD_L4_UDP
; /* enable UDP */
4273 command
&= ~MVPP2_TXD_L4_CSUM_FRAG
; /* generate L4 csum */
4275 command
|= MVPP2_TXD_L4_CSUM_NOT
;
4281 /* Get number of sent descriptors and decrement counter.
4282 * The number of sent descriptors is returned.
4285 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port
*port
,
4286 struct mvpp2_tx_queue
*txq
)
4290 /* Reading status reg resets transmitted descriptor counter */
4291 val
= mvpp2_read(port
->priv
, MVPP2_TXQ_SENT_REG(txq
->id
));
4293 return (val
& MVPP2_TRANSMITTED_COUNT_MASK
) >>
4294 MVPP2_TRANSMITTED_COUNT_OFFSET
;
4297 static void mvpp2_txq_sent_counter_clear(void *arg
)
4299 struct mvpp2_port
*port
= arg
;
4302 for (queue
= 0; queue
< txq_number
; queue
++) {
4303 int id
= port
->txqs
[queue
]->id
;
4305 mvpp2_read(port
->priv
, MVPP2_TXQ_SENT_REG(id
));
4309 /* Set max sizes for Tx queues */
4310 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port
*port
)
4313 int txq
, tx_port_num
;
4315 mtu
= port
->pkt_size
* 8;
4316 if (mtu
> MVPP2_TXP_MTU_MAX
)
4317 mtu
= MVPP2_TXP_MTU_MAX
;
4319 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
4322 /* Indirect access to registers */
4323 tx_port_num
= mvpp2_egress_port(port
);
4324 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
4327 val
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_MTU_REG
);
4328 val
&= ~MVPP2_TXP_MTU_MAX
;
4330 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_MTU_REG
, val
);
4332 /* TXP token size and all TXQs token size must be larger that MTU */
4333 val
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_TOKEN_SIZE_REG
);
4334 size
= val
& MVPP2_TXP_TOKEN_SIZE_MAX
;
4337 val
&= ~MVPP2_TXP_TOKEN_SIZE_MAX
;
4339 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_TOKEN_SIZE_REG
, val
);
4342 for (txq
= 0; txq
< txq_number
; txq
++) {
4343 val
= mvpp2_read(port
->priv
,
4344 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq
));
4345 size
= val
& MVPP2_TXQ_TOKEN_SIZE_MAX
;
4349 val
&= ~MVPP2_TXQ_TOKEN_SIZE_MAX
;
4351 mvpp2_write(port
->priv
,
4352 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq
),
4358 /* Set the number of packets that will be received before Rx interrupt
4359 * will be generated by HW.
4361 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port
*port
,
4362 struct mvpp2_rx_queue
*rxq
, u32 pkts
)
4366 val
= (pkts
& MVPP2_OCCUPIED_THRESH_MASK
);
4367 mvpp2_write(port
->priv
, MVPP2_RXQ_NUM_REG
, rxq
->id
);
4368 mvpp2_write(port
->priv
, MVPP2_RXQ_THRESH_REG
, val
);
4370 rxq
->pkts_coal
= pkts
;
4373 /* Set the time delay in usec before Rx interrupt */
4374 static void mvpp2_rx_time_coal_set(struct mvpp2_port
*port
,
4375 struct mvpp2_rx_queue
*rxq
, u32 usec
)
4379 val
= (port
->priv
->tclk
/ USEC_PER_SEC
) * usec
;
4380 mvpp2_write(port
->priv
, MVPP2_ISR_RX_THRESHOLD_REG(rxq
->id
), val
);
4382 rxq
->time_coal
= usec
;
4385 /* Set threshold for TX_DONE pkts coalescing */
4386 static void mvpp2_tx_done_pkts_coal_set(void *arg
)
4388 struct mvpp2_port
*port
= arg
;
4392 for (queue
= 0; queue
< txq_number
; queue
++) {
4393 struct mvpp2_tx_queue
*txq
= port
->txqs
[queue
];
4395 val
= (txq
->done_pkts_coal
<< MVPP2_TRANSMITTED_THRESH_OFFSET
) &
4396 MVPP2_TRANSMITTED_THRESH_MASK
;
4397 mvpp2_write(port
->priv
, MVPP2_TXQ_NUM_REG
, txq
->id
);
4398 mvpp2_write(port
->priv
, MVPP2_TXQ_THRESH_REG
, val
);
4402 /* Free Tx queue skbuffs */
4403 static void mvpp2_txq_bufs_free(struct mvpp2_port
*port
,
4404 struct mvpp2_tx_queue
*txq
,
4405 struct mvpp2_txq_pcpu
*txq_pcpu
, int num
)
4409 for (i
= 0; i
< num
; i
++) {
4410 struct mvpp2_tx_desc
*tx_desc
= txq
->descs
+
4411 txq_pcpu
->txq_get_index
;
4412 struct sk_buff
*skb
= txq_pcpu
->tx_skb
[txq_pcpu
->txq_get_index
];
4414 mvpp2_txq_inc_get(txq_pcpu
);
4419 dma_unmap_single(port
->dev
->dev
.parent
, tx_desc
->buf_phys_addr
,
4420 tx_desc
->data_size
, DMA_TO_DEVICE
);
4421 dev_kfree_skb_any(skb
);
4425 static inline struct mvpp2_rx_queue
*mvpp2_get_rx_queue(struct mvpp2_port
*port
,
4428 int queue
= fls(cause
) - 1;
4430 return port
->rxqs
[queue
];
4433 static inline struct mvpp2_tx_queue
*mvpp2_get_tx_queue(struct mvpp2_port
*port
,
4436 int queue
= fls(cause
>> 16) - 1;
4438 return port
->txqs
[queue
];
4441 /* Handle end of transmission */
4442 static void mvpp2_txq_done(struct mvpp2_port
*port
, struct mvpp2_tx_queue
*txq
,
4443 struct mvpp2_txq_pcpu
*txq_pcpu
)
4445 struct netdev_queue
*nq
= netdev_get_tx_queue(port
->dev
, txq
->log_id
);
4448 if (txq_pcpu
->cpu
!= smp_processor_id())
4449 netdev_err(port
->dev
, "wrong cpu on the end of Tx processing\n");
4451 tx_done
= mvpp2_txq_sent_desc_proc(port
, txq
);
4454 mvpp2_txq_bufs_free(port
, txq
, txq_pcpu
, tx_done
);
4456 txq_pcpu
->count
-= tx_done
;
4458 if (netif_tx_queue_stopped(nq
))
4459 if (txq_pcpu
->size
- txq_pcpu
->count
>= MAX_SKB_FRAGS
+ 1)
4460 netif_tx_wake_queue(nq
);
4463 /* Rx/Tx queue initialization/cleanup methods */
4465 /* Allocate and initialize descriptors for aggr TXQ */
4466 static int mvpp2_aggr_txq_init(struct platform_device
*pdev
,
4467 struct mvpp2_tx_queue
*aggr_txq
,
4468 int desc_num
, int cpu
,
4471 /* Allocate memory for TX descriptors */
4472 aggr_txq
->descs
= dma_alloc_coherent(&pdev
->dev
,
4473 desc_num
* MVPP2_DESC_ALIGNED_SIZE
,
4474 &aggr_txq
->descs_phys
, GFP_KERNEL
);
4475 if (!aggr_txq
->descs
)
4478 /* Make sure descriptor address is cache line size aligned */
4479 BUG_ON(aggr_txq
->descs
!=
4480 PTR_ALIGN(aggr_txq
->descs
, MVPP2_CPU_D_CACHE_LINE_SIZE
));
4482 aggr_txq
->last_desc
= aggr_txq
->size
- 1;
4484 /* Aggr TXQ no reset WA */
4485 aggr_txq
->next_desc_to_proc
= mvpp2_read(priv
,
4486 MVPP2_AGGR_TXQ_INDEX_REG(cpu
));
4488 /* Set Tx descriptors queue starting address */
4489 /* indirect access */
4490 mvpp2_write(priv
, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu
),
4491 aggr_txq
->descs_phys
);
4492 mvpp2_write(priv
, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu
), desc_num
);
4497 /* Create a specified Rx queue */
4498 static int mvpp2_rxq_init(struct mvpp2_port
*port
,
4499 struct mvpp2_rx_queue
*rxq
)
4502 rxq
->size
= port
->rx_ring_size
;
4504 /* Allocate memory for RX descriptors */
4505 rxq
->descs
= dma_alloc_coherent(port
->dev
->dev
.parent
,
4506 rxq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
4507 &rxq
->descs_phys
, GFP_KERNEL
);
4511 BUG_ON(rxq
->descs
!=
4512 PTR_ALIGN(rxq
->descs
, MVPP2_CPU_D_CACHE_LINE_SIZE
));
4514 rxq
->last_desc
= rxq
->size
- 1;
4516 /* Zero occupied and non-occupied counters - direct access */
4517 mvpp2_write(port
->priv
, MVPP2_RXQ_STATUS_REG(rxq
->id
), 0);
4519 /* Set Rx descriptors queue starting address - indirect access */
4520 mvpp2_write(port
->priv
, MVPP2_RXQ_NUM_REG
, rxq
->id
);
4521 mvpp2_write(port
->priv
, MVPP2_RXQ_DESC_ADDR_REG
, rxq
->descs_phys
);
4522 mvpp2_write(port
->priv
, MVPP2_RXQ_DESC_SIZE_REG
, rxq
->size
);
4523 mvpp2_write(port
->priv
, MVPP2_RXQ_INDEX_REG
, 0);
4526 mvpp2_rxq_offset_set(port
, rxq
->id
, NET_SKB_PAD
);
4528 /* Set coalescing pkts and time */
4529 mvpp2_rx_pkts_coal_set(port
, rxq
, rxq
->pkts_coal
);
4530 mvpp2_rx_time_coal_set(port
, rxq
, rxq
->time_coal
);
4532 /* Add number of descriptors ready for receiving packets */
4533 mvpp2_rxq_status_update(port
, rxq
->id
, 0, rxq
->size
);
4538 /* Push packets received by the RXQ to BM pool */
4539 static void mvpp2_rxq_drop_pkts(struct mvpp2_port
*port
,
4540 struct mvpp2_rx_queue
*rxq
)
4544 rx_received
= mvpp2_rxq_received(port
, rxq
->id
);
4548 for (i
= 0; i
< rx_received
; i
++) {
4549 struct mvpp2_rx_desc
*rx_desc
= mvpp2_rxq_next_desc_get(rxq
);
4550 u32 bm
= mvpp2_bm_cookie_build(rx_desc
);
4552 mvpp2_pool_refill(port
, bm
, rx_desc
->buf_phys_addr
,
4553 rx_desc
->buf_cookie
);
4555 mvpp2_rxq_status_update(port
, rxq
->id
, rx_received
, rx_received
);
4558 /* Cleanup Rx queue */
4559 static void mvpp2_rxq_deinit(struct mvpp2_port
*port
,
4560 struct mvpp2_rx_queue
*rxq
)
4562 mvpp2_rxq_drop_pkts(port
, rxq
);
4565 dma_free_coherent(port
->dev
->dev
.parent
,
4566 rxq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
4572 rxq
->next_desc_to_proc
= 0;
4573 rxq
->descs_phys
= 0;
4575 /* Clear Rx descriptors queue starting address and size;
4576 * free descriptor number
4578 mvpp2_write(port
->priv
, MVPP2_RXQ_STATUS_REG(rxq
->id
), 0);
4579 mvpp2_write(port
->priv
, MVPP2_RXQ_NUM_REG
, rxq
->id
);
4580 mvpp2_write(port
->priv
, MVPP2_RXQ_DESC_ADDR_REG
, 0);
4581 mvpp2_write(port
->priv
, MVPP2_RXQ_DESC_SIZE_REG
, 0);
4584 /* Create and initialize a Tx queue */
4585 static int mvpp2_txq_init(struct mvpp2_port
*port
,
4586 struct mvpp2_tx_queue
*txq
)
4589 int cpu
, desc
, desc_per_txq
, tx_port_num
;
4590 struct mvpp2_txq_pcpu
*txq_pcpu
;
4592 txq
->size
= port
->tx_ring_size
;
4594 /* Allocate memory for Tx descriptors */
4595 txq
->descs
= dma_alloc_coherent(port
->dev
->dev
.parent
,
4596 txq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
4597 &txq
->descs_phys
, GFP_KERNEL
);
4601 /* Make sure descriptor address is cache line size aligned */
4602 BUG_ON(txq
->descs
!=
4603 PTR_ALIGN(txq
->descs
, MVPP2_CPU_D_CACHE_LINE_SIZE
));
4605 txq
->last_desc
= txq
->size
- 1;
4607 /* Set Tx descriptors queue starting address - indirect access */
4608 mvpp2_write(port
->priv
, MVPP2_TXQ_NUM_REG
, txq
->id
);
4609 mvpp2_write(port
->priv
, MVPP2_TXQ_DESC_ADDR_REG
, txq
->descs_phys
);
4610 mvpp2_write(port
->priv
, MVPP2_TXQ_DESC_SIZE_REG
, txq
->size
&
4611 MVPP2_TXQ_DESC_SIZE_MASK
);
4612 mvpp2_write(port
->priv
, MVPP2_TXQ_INDEX_REG
, 0);
4613 mvpp2_write(port
->priv
, MVPP2_TXQ_RSVD_CLR_REG
,
4614 txq
->id
<< MVPP2_TXQ_RSVD_CLR_OFFSET
);
4615 val
= mvpp2_read(port
->priv
, MVPP2_TXQ_PENDING_REG
);
4616 val
&= ~MVPP2_TXQ_PENDING_MASK
;
4617 mvpp2_write(port
->priv
, MVPP2_TXQ_PENDING_REG
, val
);
4619 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
4620 * for each existing TXQ.
4621 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
4622 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
4625 desc
= (port
->id
* MVPP2_MAX_TXQ
* desc_per_txq
) +
4626 (txq
->log_id
* desc_per_txq
);
4628 mvpp2_write(port
->priv
, MVPP2_TXQ_PREF_BUF_REG
,
4629 MVPP2_PREF_BUF_PTR(desc
) | MVPP2_PREF_BUF_SIZE_16
|
4630 MVPP2_PREF_BUF_THRESH(desc_per_txq
/2));
4632 /* WRR / EJP configuration - indirect access */
4633 tx_port_num
= mvpp2_egress_port(port
);
4634 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
4636 val
= mvpp2_read(port
->priv
, MVPP2_TXQ_SCHED_REFILL_REG(txq
->log_id
));
4637 val
&= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK
;
4638 val
|= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
4639 val
|= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK
;
4640 mvpp2_write(port
->priv
, MVPP2_TXQ_SCHED_REFILL_REG(txq
->log_id
), val
);
4642 val
= MVPP2_TXQ_TOKEN_SIZE_MAX
;
4643 mvpp2_write(port
->priv
, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq
->log_id
),
4646 for_each_present_cpu(cpu
) {
4647 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
4648 txq_pcpu
->size
= txq
->size
;
4649 txq_pcpu
->tx_skb
= kmalloc(txq_pcpu
->size
*
4650 sizeof(*txq_pcpu
->tx_skb
),
4652 if (!txq_pcpu
->tx_skb
) {
4653 dma_free_coherent(port
->dev
->dev
.parent
,
4654 txq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
4655 txq
->descs
, txq
->descs_phys
);
4659 txq_pcpu
->count
= 0;
4660 txq_pcpu
->reserved_num
= 0;
4661 txq_pcpu
->txq_put_index
= 0;
4662 txq_pcpu
->txq_get_index
= 0;
4668 /* Free allocated TXQ resources */
4669 static void mvpp2_txq_deinit(struct mvpp2_port
*port
,
4670 struct mvpp2_tx_queue
*txq
)
4672 struct mvpp2_txq_pcpu
*txq_pcpu
;
4675 for_each_present_cpu(cpu
) {
4676 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
4677 kfree(txq_pcpu
->tx_skb
);
4681 dma_free_coherent(port
->dev
->dev
.parent
,
4682 txq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
4683 txq
->descs
, txq
->descs_phys
);
4687 txq
->next_desc_to_proc
= 0;
4688 txq
->descs_phys
= 0;
4690 /* Set minimum bandwidth for disabled TXQs */
4691 mvpp2_write(port
->priv
, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq
->id
), 0);
4693 /* Set Tx descriptors queue starting address and size */
4694 mvpp2_write(port
->priv
, MVPP2_TXQ_NUM_REG
, txq
->id
);
4695 mvpp2_write(port
->priv
, MVPP2_TXQ_DESC_ADDR_REG
, 0);
4696 mvpp2_write(port
->priv
, MVPP2_TXQ_DESC_SIZE_REG
, 0);
4699 /* Cleanup Tx ports */
4700 static void mvpp2_txq_clean(struct mvpp2_port
*port
, struct mvpp2_tx_queue
*txq
)
4702 struct mvpp2_txq_pcpu
*txq_pcpu
;
4703 int delay
, pending
, cpu
;
4706 mvpp2_write(port
->priv
, MVPP2_TXQ_NUM_REG
, txq
->id
);
4707 val
= mvpp2_read(port
->priv
, MVPP2_TXQ_PREF_BUF_REG
);
4708 val
|= MVPP2_TXQ_DRAIN_EN_MASK
;
4709 mvpp2_write(port
->priv
, MVPP2_TXQ_PREF_BUF_REG
, val
);
4711 /* The napi queue has been stopped so wait for all packets
4712 * to be transmitted.
4716 if (delay
>= MVPP2_TX_PENDING_TIMEOUT_MSEC
) {
4717 netdev_warn(port
->dev
,
4718 "port %d: cleaning queue %d timed out\n",
4719 port
->id
, txq
->log_id
);
4725 pending
= mvpp2_txq_pend_desc_num_get(port
, txq
);
4728 val
&= ~MVPP2_TXQ_DRAIN_EN_MASK
;
4729 mvpp2_write(port
->priv
, MVPP2_TXQ_PREF_BUF_REG
, val
);
4731 for_each_present_cpu(cpu
) {
4732 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
4734 /* Release all packets */
4735 mvpp2_txq_bufs_free(port
, txq
, txq_pcpu
, txq_pcpu
->count
);
4738 txq_pcpu
->count
= 0;
4739 txq_pcpu
->txq_put_index
= 0;
4740 txq_pcpu
->txq_get_index
= 0;
4744 /* Cleanup all Tx queues */
4745 static void mvpp2_cleanup_txqs(struct mvpp2_port
*port
)
4747 struct mvpp2_tx_queue
*txq
;
4751 val
= mvpp2_read(port
->priv
, MVPP2_TX_PORT_FLUSH_REG
);
4753 /* Reset Tx ports and delete Tx queues */
4754 val
|= MVPP2_TX_PORT_FLUSH_MASK(port
->id
);
4755 mvpp2_write(port
->priv
, MVPP2_TX_PORT_FLUSH_REG
, val
);
4757 for (queue
= 0; queue
< txq_number
; queue
++) {
4758 txq
= port
->txqs
[queue
];
4759 mvpp2_txq_clean(port
, txq
);
4760 mvpp2_txq_deinit(port
, txq
);
4763 on_each_cpu(mvpp2_txq_sent_counter_clear
, port
, 1);
4765 val
&= ~MVPP2_TX_PORT_FLUSH_MASK(port
->id
);
4766 mvpp2_write(port
->priv
, MVPP2_TX_PORT_FLUSH_REG
, val
);
4769 /* Cleanup all Rx queues */
4770 static void mvpp2_cleanup_rxqs(struct mvpp2_port
*port
)
4774 for (queue
= 0; queue
< rxq_number
; queue
++)
4775 mvpp2_rxq_deinit(port
, port
->rxqs
[queue
]);
4778 /* Init all Rx queues for port */
4779 static int mvpp2_setup_rxqs(struct mvpp2_port
*port
)
4783 for (queue
= 0; queue
< rxq_number
; queue
++) {
4784 err
= mvpp2_rxq_init(port
, port
->rxqs
[queue
]);
4791 mvpp2_cleanup_rxqs(port
);
4795 /* Init all tx queues for port */
4796 static int mvpp2_setup_txqs(struct mvpp2_port
*port
)
4798 struct mvpp2_tx_queue
*txq
;
4801 for (queue
= 0; queue
< txq_number
; queue
++) {
4802 txq
= port
->txqs
[queue
];
4803 err
= mvpp2_txq_init(port
, txq
);
4808 on_each_cpu(mvpp2_tx_done_pkts_coal_set
, port
, 1);
4809 on_each_cpu(mvpp2_txq_sent_counter_clear
, port
, 1);
4813 mvpp2_cleanup_txqs(port
);
4817 /* The callback for per-port interrupt */
4818 static irqreturn_t
mvpp2_isr(int irq
, void *dev_id
)
4820 struct mvpp2_port
*port
= (struct mvpp2_port
*)dev_id
;
4822 mvpp2_interrupts_disable(port
);
4824 napi_schedule(&port
->napi
);
4830 static void mvpp2_link_event(struct net_device
*dev
)
4832 struct mvpp2_port
*port
= netdev_priv(dev
);
4833 struct phy_device
*phydev
= port
->phy_dev
;
4834 int status_change
= 0;
4838 if ((port
->speed
!= phydev
->speed
) ||
4839 (port
->duplex
!= phydev
->duplex
)) {
4842 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4843 val
&= ~(MVPP2_GMAC_CONFIG_MII_SPEED
|
4844 MVPP2_GMAC_CONFIG_GMII_SPEED
|
4845 MVPP2_GMAC_CONFIG_FULL_DUPLEX
|
4846 MVPP2_GMAC_AN_SPEED_EN
|
4847 MVPP2_GMAC_AN_DUPLEX_EN
);
4850 val
|= MVPP2_GMAC_CONFIG_FULL_DUPLEX
;
4852 if (phydev
->speed
== SPEED_1000
)
4853 val
|= MVPP2_GMAC_CONFIG_GMII_SPEED
;
4854 else if (phydev
->speed
== SPEED_100
)
4855 val
|= MVPP2_GMAC_CONFIG_MII_SPEED
;
4857 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4859 port
->duplex
= phydev
->duplex
;
4860 port
->speed
= phydev
->speed
;
4864 if (phydev
->link
!= port
->link
) {
4865 if (!phydev
->link
) {
4870 port
->link
= phydev
->link
;
4874 if (status_change
) {
4876 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4877 val
|= (MVPP2_GMAC_FORCE_LINK_PASS
|
4878 MVPP2_GMAC_FORCE_LINK_DOWN
);
4879 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4880 mvpp2_egress_enable(port
);
4881 mvpp2_ingress_enable(port
);
4883 mvpp2_ingress_disable(port
);
4884 mvpp2_egress_disable(port
);
4886 phy_print_status(phydev
);
4890 /* Main RX/TX processing routines */
4892 /* Display more error info */
4893 static void mvpp2_rx_error(struct mvpp2_port
*port
,
4894 struct mvpp2_rx_desc
*rx_desc
)
4896 u32 status
= rx_desc
->status
;
4898 switch (status
& MVPP2_RXD_ERR_CODE_MASK
) {
4899 case MVPP2_RXD_ERR_CRC
:
4900 netdev_err(port
->dev
, "bad rx status %08x (crc error), size=%d\n",
4901 status
, rx_desc
->data_size
);
4903 case MVPP2_RXD_ERR_OVERRUN
:
4904 netdev_err(port
->dev
, "bad rx status %08x (overrun error), size=%d\n",
4905 status
, rx_desc
->data_size
);
4907 case MVPP2_RXD_ERR_RESOURCE
:
4908 netdev_err(port
->dev
, "bad rx status %08x (resource error), size=%d\n",
4909 status
, rx_desc
->data_size
);
4914 /* Handle RX checksum offload */
4915 static void mvpp2_rx_csum(struct mvpp2_port
*port
, u32 status
,
4916 struct sk_buff
*skb
)
4918 if (((status
& MVPP2_RXD_L3_IP4
) &&
4919 !(status
& MVPP2_RXD_IP4_HEADER_ERR
)) ||
4920 (status
& MVPP2_RXD_L3_IP6
))
4921 if (((status
& MVPP2_RXD_L4_UDP
) ||
4922 (status
& MVPP2_RXD_L4_TCP
)) &&
4923 (status
& MVPP2_RXD_L4_CSUM_OK
)) {
4925 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
4929 skb
->ip_summed
= CHECKSUM_NONE
;
4932 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
4933 static int mvpp2_rx_refill(struct mvpp2_port
*port
,
4934 struct mvpp2_bm_pool
*bm_pool
,
4935 u32 bm
, int is_recycle
)
4937 struct sk_buff
*skb
;
4938 dma_addr_t phys_addr
;
4941 (atomic_read(&bm_pool
->in_use
) < bm_pool
->in_use_thresh
))
4944 /* No recycle or too many buffers are in use, so allocate a new skb */
4945 skb
= mvpp2_skb_alloc(port
, bm_pool
, &phys_addr
, GFP_ATOMIC
);
4949 mvpp2_pool_refill(port
, bm
, (u32
)phys_addr
, (u32
)skb
);
4950 atomic_dec(&bm_pool
->in_use
);
4954 /* Handle tx checksum */
4955 static u32
mvpp2_skb_tx_csum(struct mvpp2_port
*port
, struct sk_buff
*skb
)
4957 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
4961 if (skb
->protocol
== htons(ETH_P_IP
)) {
4962 struct iphdr
*ip4h
= ip_hdr(skb
);
4964 /* Calculate IPv4 checksum and L4 checksum */
4965 ip_hdr_len
= ip4h
->ihl
;
4966 l4_proto
= ip4h
->protocol
;
4967 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
4968 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
4970 /* Read l4_protocol from one of IPv6 extra headers */
4971 if (skb_network_header_len(skb
) > 0)
4972 ip_hdr_len
= (skb_network_header_len(skb
) >> 2);
4973 l4_proto
= ip6h
->nexthdr
;
4975 return MVPP2_TXD_L4_CSUM_NOT
;
4978 return mvpp2_txq_desc_csum(skb_network_offset(skb
),
4979 skb
->protocol
, ip_hdr_len
, l4_proto
);
4982 return MVPP2_TXD_L4_CSUM_NOT
| MVPP2_TXD_IP_CSUM_DISABLE
;
4985 static void mvpp2_buff_hdr_rx(struct mvpp2_port
*port
,
4986 struct mvpp2_rx_desc
*rx_desc
)
4988 struct mvpp2_buff_hdr
*buff_hdr
;
4989 struct sk_buff
*skb
;
4990 u32 rx_status
= rx_desc
->status
;
4993 u32 buff_phys_addr_next
;
4994 u32 buff_virt_addr_next
;
4998 pool_id
= (rx_status
& MVPP2_RXD_BM_POOL_ID_MASK
) >>
4999 MVPP2_RXD_BM_POOL_ID_OFFS
;
5000 buff_phys_addr
= rx_desc
->buf_phys_addr
;
5001 buff_virt_addr
= rx_desc
->buf_cookie
;
5004 skb
= (struct sk_buff
*)buff_virt_addr
;
5005 buff_hdr
= (struct mvpp2_buff_hdr
*)skb
->head
;
5007 mc_id
= MVPP2_B_HDR_INFO_MC_ID(buff_hdr
->info
);
5009 buff_phys_addr_next
= buff_hdr
->next_buff_phys_addr
;
5010 buff_virt_addr_next
= buff_hdr
->next_buff_virt_addr
;
5012 /* Release buffer */
5013 mvpp2_bm_pool_mc_put(port
, pool_id
, buff_phys_addr
,
5014 buff_virt_addr
, mc_id
);
5016 buff_phys_addr
= buff_phys_addr_next
;
5017 buff_virt_addr
= buff_virt_addr_next
;
5019 } while (!MVPP2_B_HDR_INFO_IS_LAST(buff_hdr
->info
));
5022 /* Main rx processing */
5023 static int mvpp2_rx(struct mvpp2_port
*port
, int rx_todo
,
5024 struct mvpp2_rx_queue
*rxq
)
5026 struct net_device
*dev
= port
->dev
;
5027 int rx_received
, rx_filled
, i
;
5031 /* Get number of received packets and clamp the to-do */
5032 rx_received
= mvpp2_rxq_received(port
, rxq
->id
);
5033 if (rx_todo
> rx_received
)
5034 rx_todo
= rx_received
;
5037 for (i
= 0; i
< rx_todo
; i
++) {
5038 struct mvpp2_rx_desc
*rx_desc
= mvpp2_rxq_next_desc_get(rxq
);
5039 struct mvpp2_bm_pool
*bm_pool
;
5040 struct sk_buff
*skb
;
5042 int pool
, rx_bytes
, err
;
5045 rx_status
= rx_desc
->status
;
5046 rx_bytes
= rx_desc
->data_size
- MVPP2_MH_SIZE
;
5048 bm
= mvpp2_bm_cookie_build(rx_desc
);
5049 pool
= mvpp2_bm_cookie_pool_get(bm
);
5050 bm_pool
= &port
->priv
->bm_pools
[pool
];
5051 /* Check if buffer header is used */
5052 if (rx_status
& MVPP2_RXD_BUF_HDR
) {
5053 mvpp2_buff_hdr_rx(port
, rx_desc
);
5057 /* In case of an error, release the requested buffer pointer
5058 * to the Buffer Manager. This request process is controlled
5059 * by the hardware, and the information about the buffer is
5060 * comprised by the RX descriptor.
5062 if (rx_status
& MVPP2_RXD_ERR_SUMMARY
) {
5063 dev
->stats
.rx_errors
++;
5064 mvpp2_rx_error(port
, rx_desc
);
5065 mvpp2_pool_refill(port
, bm
, rx_desc
->buf_phys_addr
,
5066 rx_desc
->buf_cookie
);
5070 skb
= (struct sk_buff
*)rx_desc
->buf_cookie
;
5073 rcvd_bytes
+= rx_bytes
;
5074 atomic_inc(&bm_pool
->in_use
);
5076 skb_reserve(skb
, MVPP2_MH_SIZE
);
5077 skb_put(skb
, rx_bytes
);
5078 skb
->protocol
= eth_type_trans(skb
, dev
);
5079 mvpp2_rx_csum(port
, rx_status
, skb
);
5081 napi_gro_receive(&port
->napi
, skb
);
5083 err
= mvpp2_rx_refill(port
, bm_pool
, bm
, 0);
5085 netdev_err(port
->dev
, "failed to refill BM pools\n");
5091 struct mvpp2_pcpu_stats
*stats
= this_cpu_ptr(port
->stats
);
5093 u64_stats_update_begin(&stats
->syncp
);
5094 stats
->rx_packets
+= rcvd_pkts
;
5095 stats
->rx_bytes
+= rcvd_bytes
;
5096 u64_stats_update_end(&stats
->syncp
);
5099 /* Update Rx queue management counters */
5101 mvpp2_rxq_status_update(port
, rxq
->id
, rx_todo
, rx_filled
);
5107 tx_desc_unmap_put(struct device
*dev
, struct mvpp2_tx_queue
*txq
,
5108 struct mvpp2_tx_desc
*desc
)
5110 dma_unmap_single(dev
, desc
->buf_phys_addr
,
5111 desc
->data_size
, DMA_TO_DEVICE
);
5112 mvpp2_txq_desc_put(txq
);
5115 /* Handle tx fragmentation processing */
5116 static int mvpp2_tx_frag_process(struct mvpp2_port
*port
, struct sk_buff
*skb
,
5117 struct mvpp2_tx_queue
*aggr_txq
,
5118 struct mvpp2_tx_queue
*txq
)
5120 struct mvpp2_txq_pcpu
*txq_pcpu
= this_cpu_ptr(txq
->pcpu
);
5121 struct mvpp2_tx_desc
*tx_desc
;
5123 dma_addr_t buf_phys_addr
;
5125 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
5126 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
5127 void *addr
= page_address(frag
->page
.p
) + frag
->page_offset
;
5129 tx_desc
= mvpp2_txq_next_desc_get(aggr_txq
);
5130 tx_desc
->phys_txq
= txq
->id
;
5131 tx_desc
->data_size
= frag
->size
;
5133 buf_phys_addr
= dma_map_single(port
->dev
->dev
.parent
, addr
,
5136 if (dma_mapping_error(port
->dev
->dev
.parent
, buf_phys_addr
)) {
5137 mvpp2_txq_desc_put(txq
);
5141 tx_desc
->packet_offset
= buf_phys_addr
& MVPP2_TX_DESC_ALIGN
;
5142 tx_desc
->buf_phys_addr
= buf_phys_addr
& (~MVPP2_TX_DESC_ALIGN
);
5144 if (i
== (skb_shinfo(skb
)->nr_frags
- 1)) {
5145 /* Last descriptor */
5146 tx_desc
->command
= MVPP2_TXD_L_DESC
;
5147 mvpp2_txq_inc_put(txq_pcpu
, skb
);
5149 /* Descriptor in the middle: Not First, Not Last */
5150 tx_desc
->command
= 0;
5151 mvpp2_txq_inc_put(txq_pcpu
, NULL
);
5158 /* Release all descriptors that were used to map fragments of
5159 * this packet, as well as the corresponding DMA mappings
5161 for (i
= i
- 1; i
>= 0; i
--) {
5162 tx_desc
= txq
->descs
+ i
;
5163 tx_desc_unmap_put(port
->dev
->dev
.parent
, txq
, tx_desc
);
5169 /* Main tx processing */
5170 static int mvpp2_tx(struct sk_buff
*skb
, struct net_device
*dev
)
5172 struct mvpp2_port
*port
= netdev_priv(dev
);
5173 struct mvpp2_tx_queue
*txq
, *aggr_txq
;
5174 struct mvpp2_txq_pcpu
*txq_pcpu
;
5175 struct mvpp2_tx_desc
*tx_desc
;
5176 dma_addr_t buf_phys_addr
;
5181 txq_id
= skb_get_queue_mapping(skb
);
5182 txq
= port
->txqs
[txq_id
];
5183 txq_pcpu
= this_cpu_ptr(txq
->pcpu
);
5184 aggr_txq
= &port
->priv
->aggr_txqs
[smp_processor_id()];
5186 frags
= skb_shinfo(skb
)->nr_frags
+ 1;
5188 /* Check number of available descriptors */
5189 if (mvpp2_aggr_desc_num_check(port
->priv
, aggr_txq
, frags
) ||
5190 mvpp2_txq_reserved_desc_num_proc(port
->priv
, txq
,
5196 /* Get a descriptor for the first part of the packet */
5197 tx_desc
= mvpp2_txq_next_desc_get(aggr_txq
);
5198 tx_desc
->phys_txq
= txq
->id
;
5199 tx_desc
->data_size
= skb_headlen(skb
);
5201 buf_phys_addr
= dma_map_single(dev
->dev
.parent
, skb
->data
,
5202 tx_desc
->data_size
, DMA_TO_DEVICE
);
5203 if (unlikely(dma_mapping_error(dev
->dev
.parent
, buf_phys_addr
))) {
5204 mvpp2_txq_desc_put(txq
);
5208 tx_desc
->packet_offset
= buf_phys_addr
& MVPP2_TX_DESC_ALIGN
;
5209 tx_desc
->buf_phys_addr
= buf_phys_addr
& ~MVPP2_TX_DESC_ALIGN
;
5211 tx_cmd
= mvpp2_skb_tx_csum(port
, skb
);
5214 /* First and Last descriptor */
5215 tx_cmd
|= MVPP2_TXD_F_DESC
| MVPP2_TXD_L_DESC
;
5216 tx_desc
->command
= tx_cmd
;
5217 mvpp2_txq_inc_put(txq_pcpu
, skb
);
5219 /* First but not Last */
5220 tx_cmd
|= MVPP2_TXD_F_DESC
| MVPP2_TXD_PADDING_DISABLE
;
5221 tx_desc
->command
= tx_cmd
;
5222 mvpp2_txq_inc_put(txq_pcpu
, NULL
);
5224 /* Continue with other skb fragments */
5225 if (mvpp2_tx_frag_process(port
, skb
, aggr_txq
, txq
)) {
5226 tx_desc_unmap_put(port
->dev
->dev
.parent
, txq
, tx_desc
);
5232 txq_pcpu
->reserved_num
-= frags
;
5233 txq_pcpu
->count
+= frags
;
5234 aggr_txq
->count
+= frags
;
5236 /* Enable transmit */
5238 mvpp2_aggr_txq_pend_desc_add(port
, frags
);
5240 if (txq_pcpu
->size
- txq_pcpu
->count
< MAX_SKB_FRAGS
+ 1) {
5241 struct netdev_queue
*nq
= netdev_get_tx_queue(dev
, txq_id
);
5243 netif_tx_stop_queue(nq
);
5247 struct mvpp2_pcpu_stats
*stats
= this_cpu_ptr(port
->stats
);
5249 u64_stats_update_begin(&stats
->syncp
);
5250 stats
->tx_packets
++;
5251 stats
->tx_bytes
+= skb
->len
;
5252 u64_stats_update_end(&stats
->syncp
);
5254 dev
->stats
.tx_dropped
++;
5255 dev_kfree_skb_any(skb
);
5258 return NETDEV_TX_OK
;
5261 static inline void mvpp2_cause_error(struct net_device
*dev
, int cause
)
5263 if (cause
& MVPP2_CAUSE_FCS_ERR_MASK
)
5264 netdev_err(dev
, "FCS error\n");
5265 if (cause
& MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK
)
5266 netdev_err(dev
, "rx fifo overrun error\n");
5267 if (cause
& MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK
)
5268 netdev_err(dev
, "tx fifo underrun error\n");
5271 static void mvpp2_txq_done_percpu(void *arg
)
5273 struct mvpp2_port
*port
= arg
;
5274 u32 cause_rx_tx
, cause_tx
, cause_misc
;
5276 /* Rx/Tx cause register
5278 * Bits 0-15: each bit indicates received packets on the Rx queue
5279 * (bit 0 is for Rx queue 0).
5281 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
5282 * (bit 16 is for Tx queue 0).
5284 * Each CPU has its own Rx/Tx cause register
5286 cause_rx_tx
= mvpp2_read(port
->priv
,
5287 MVPP2_ISR_RX_TX_CAUSE_REG(port
->id
));
5288 cause_tx
= cause_rx_tx
& MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK
;
5289 cause_misc
= cause_rx_tx
& MVPP2_CAUSE_MISC_SUM_MASK
;
5292 mvpp2_cause_error(port
->dev
, cause_misc
);
5294 /* Clear the cause register */
5295 mvpp2_write(port
->priv
, MVPP2_ISR_MISC_CAUSE_REG
, 0);
5296 mvpp2_write(port
->priv
, MVPP2_ISR_RX_TX_CAUSE_REG(port
->id
),
5297 cause_rx_tx
& ~MVPP2_CAUSE_MISC_SUM_MASK
);
5300 /* Release TX descriptors */
5302 struct mvpp2_tx_queue
*txq
= mvpp2_get_tx_queue(port
, cause_tx
);
5303 struct mvpp2_txq_pcpu
*txq_pcpu
= this_cpu_ptr(txq
->pcpu
);
5305 if (txq_pcpu
->count
)
5306 mvpp2_txq_done(port
, txq
, txq_pcpu
);
5310 static int mvpp2_poll(struct napi_struct
*napi
, int budget
)
5312 u32 cause_rx_tx
, cause_rx
;
5314 struct mvpp2_port
*port
= netdev_priv(napi
->dev
);
5316 on_each_cpu(mvpp2_txq_done_percpu
, port
, 1);
5318 cause_rx_tx
= mvpp2_read(port
->priv
,
5319 MVPP2_ISR_RX_TX_CAUSE_REG(port
->id
));
5320 cause_rx
= cause_rx_tx
& MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK
;
5322 /* Process RX packets */
5323 cause_rx
|= port
->pending_cause_rx
;
5324 while (cause_rx
&& budget
> 0) {
5326 struct mvpp2_rx_queue
*rxq
;
5328 rxq
= mvpp2_get_rx_queue(port
, cause_rx
);
5332 count
= mvpp2_rx(port
, budget
, rxq
);
5336 /* Clear the bit associated to this Rx queue
5337 * so that next iteration will continue from
5338 * the next Rx queue.
5340 cause_rx
&= ~(1 << rxq
->logic_rxq
);
5346 napi_complete(napi
);
5348 mvpp2_interrupts_enable(port
);
5350 port
->pending_cause_rx
= cause_rx
;
5354 /* Set hw internals when starting port */
5355 static void mvpp2_start_dev(struct mvpp2_port
*port
)
5357 mvpp2_gmac_max_rx_size_set(port
);
5358 mvpp2_txp_max_tx_size_set(port
);
5360 napi_enable(&port
->napi
);
5362 /* Enable interrupts on all CPUs */
5363 mvpp2_interrupts_enable(port
);
5365 mvpp2_port_enable(port
);
5366 phy_start(port
->phy_dev
);
5367 netif_tx_start_all_queues(port
->dev
);
5370 /* Set hw internals when stopping port */
5371 static void mvpp2_stop_dev(struct mvpp2_port
*port
)
5373 /* Stop new packets from arriving to RXQs */
5374 mvpp2_ingress_disable(port
);
5378 /* Disable interrupts on all CPUs */
5379 mvpp2_interrupts_disable(port
);
5381 napi_disable(&port
->napi
);
5383 netif_carrier_off(port
->dev
);
5384 netif_tx_stop_all_queues(port
->dev
);
5386 mvpp2_egress_disable(port
);
5387 mvpp2_port_disable(port
);
5388 phy_stop(port
->phy_dev
);
5391 /* Return positive if MTU is valid */
5392 static inline int mvpp2_check_mtu_valid(struct net_device
*dev
, int mtu
)
5395 netdev_err(dev
, "cannot change mtu to less than 68\n");
5399 /* 9676 == 9700 - 20 and rounding to 8 */
5401 netdev_info(dev
, "illegal MTU value %d, round to 9676\n", mtu
);
5405 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu
), 8)) {
5406 netdev_info(dev
, "illegal MTU value %d, round to %d\n", mtu
,
5407 ALIGN(MVPP2_RX_PKT_SIZE(mtu
), 8));
5408 mtu
= ALIGN(MVPP2_RX_PKT_SIZE(mtu
), 8);
5414 static int mvpp2_check_ringparam_valid(struct net_device
*dev
,
5415 struct ethtool_ringparam
*ring
)
5417 u16 new_rx_pending
= ring
->rx_pending
;
5418 u16 new_tx_pending
= ring
->tx_pending
;
5420 if (ring
->rx_pending
== 0 || ring
->tx_pending
== 0)
5423 if (ring
->rx_pending
> MVPP2_MAX_RXD
)
5424 new_rx_pending
= MVPP2_MAX_RXD
;
5425 else if (!IS_ALIGNED(ring
->rx_pending
, 16))
5426 new_rx_pending
= ALIGN(ring
->rx_pending
, 16);
5428 if (ring
->tx_pending
> MVPP2_MAX_TXD
)
5429 new_tx_pending
= MVPP2_MAX_TXD
;
5430 else if (!IS_ALIGNED(ring
->tx_pending
, 32))
5431 new_tx_pending
= ALIGN(ring
->tx_pending
, 32);
5433 if (ring
->rx_pending
!= new_rx_pending
) {
5434 netdev_info(dev
, "illegal Rx ring size value %d, round to %d\n",
5435 ring
->rx_pending
, new_rx_pending
);
5436 ring
->rx_pending
= new_rx_pending
;
5439 if (ring
->tx_pending
!= new_tx_pending
) {
5440 netdev_info(dev
, "illegal Tx ring size value %d, round to %d\n",
5441 ring
->tx_pending
, new_tx_pending
);
5442 ring
->tx_pending
= new_tx_pending
;
5448 static void mvpp2_get_mac_address(struct mvpp2_port
*port
, unsigned char *addr
)
5450 u32 mac_addr_l
, mac_addr_m
, mac_addr_h
;
5452 mac_addr_l
= readl(port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
5453 mac_addr_m
= readl(port
->priv
->lms_base
+ MVPP2_SRC_ADDR_MIDDLE
);
5454 mac_addr_h
= readl(port
->priv
->lms_base
+ MVPP2_SRC_ADDR_HIGH
);
5455 addr
[0] = (mac_addr_h
>> 24) & 0xFF;
5456 addr
[1] = (mac_addr_h
>> 16) & 0xFF;
5457 addr
[2] = (mac_addr_h
>> 8) & 0xFF;
5458 addr
[3] = mac_addr_h
& 0xFF;
5459 addr
[4] = mac_addr_m
& 0xFF;
5460 addr
[5] = (mac_addr_l
>> MVPP2_GMAC_SA_LOW_OFFS
) & 0xFF;
5463 static int mvpp2_phy_connect(struct mvpp2_port
*port
)
5465 struct phy_device
*phy_dev
;
5467 phy_dev
= of_phy_connect(port
->dev
, port
->phy_node
, mvpp2_link_event
, 0,
5468 port
->phy_interface
);
5470 netdev_err(port
->dev
, "cannot connect to phy\n");
5473 phy_dev
->supported
&= PHY_GBIT_FEATURES
;
5474 phy_dev
->advertising
= phy_dev
->supported
;
5476 port
->phy_dev
= phy_dev
;
5484 static void mvpp2_phy_disconnect(struct mvpp2_port
*port
)
5486 phy_disconnect(port
->phy_dev
);
5487 port
->phy_dev
= NULL
;
5490 static int mvpp2_open(struct net_device
*dev
)
5492 struct mvpp2_port
*port
= netdev_priv(dev
);
5493 unsigned char mac_bcast
[ETH_ALEN
] = {
5494 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
5497 err
= mvpp2_prs_mac_da_accept(port
->priv
, port
->id
, mac_bcast
, true);
5499 netdev_err(dev
, "mvpp2_prs_mac_da_accept BC failed\n");
5502 err
= mvpp2_prs_mac_da_accept(port
->priv
, port
->id
,
5503 dev
->dev_addr
, true);
5505 netdev_err(dev
, "mvpp2_prs_mac_da_accept MC failed\n");
5508 err
= mvpp2_prs_tag_mode_set(port
->priv
, port
->id
, MVPP2_TAG_TYPE_MH
);
5510 netdev_err(dev
, "mvpp2_prs_tag_mode_set failed\n");
5513 err
= mvpp2_prs_def_flow(port
);
5515 netdev_err(dev
, "mvpp2_prs_def_flow failed\n");
5519 /* Allocate the Rx/Tx queues */
5520 err
= mvpp2_setup_rxqs(port
);
5522 netdev_err(port
->dev
, "cannot allocate Rx queues\n");
5526 err
= mvpp2_setup_txqs(port
);
5528 netdev_err(port
->dev
, "cannot allocate Tx queues\n");
5529 goto err_cleanup_rxqs
;
5532 err
= request_irq(port
->irq
, mvpp2_isr
, 0, dev
->name
, port
);
5534 netdev_err(port
->dev
, "cannot request IRQ %d\n", port
->irq
);
5535 goto err_cleanup_txqs
;
5538 /* In default link is down */
5539 netif_carrier_off(port
->dev
);
5541 err
= mvpp2_phy_connect(port
);
5545 /* Unmask interrupts on all CPUs */
5546 on_each_cpu(mvpp2_interrupts_unmask
, port
, 1);
5548 mvpp2_start_dev(port
);
5553 free_irq(port
->irq
, port
);
5555 mvpp2_cleanup_txqs(port
);
5557 mvpp2_cleanup_rxqs(port
);
5561 static int mvpp2_stop(struct net_device
*dev
)
5563 struct mvpp2_port
*port
= netdev_priv(dev
);
5565 mvpp2_stop_dev(port
);
5566 mvpp2_phy_disconnect(port
);
5568 /* Mask interrupts on all CPUs */
5569 on_each_cpu(mvpp2_interrupts_mask
, port
, 1);
5571 free_irq(port
->irq
, port
);
5572 mvpp2_cleanup_rxqs(port
);
5573 mvpp2_cleanup_txqs(port
);
5578 static void mvpp2_set_rx_mode(struct net_device
*dev
)
5580 struct mvpp2_port
*port
= netdev_priv(dev
);
5581 struct mvpp2
*priv
= port
->priv
;
5582 struct netdev_hw_addr
*ha
;
5584 bool allmulti
= dev
->flags
& IFF_ALLMULTI
;
5586 mvpp2_prs_mac_promisc_set(priv
, id
, dev
->flags
& IFF_PROMISC
);
5587 mvpp2_prs_mac_multi_set(priv
, id
, MVPP2_PE_MAC_MC_ALL
, allmulti
);
5588 mvpp2_prs_mac_multi_set(priv
, id
, MVPP2_PE_MAC_MC_IP6
, allmulti
);
5590 /* Remove all port->id's mcast enries */
5591 mvpp2_prs_mcast_del_all(priv
, id
);
5593 if (allmulti
&& !netdev_mc_empty(dev
)) {
5594 netdev_for_each_mc_addr(ha
, dev
)
5595 mvpp2_prs_mac_da_accept(priv
, id
, ha
->addr
, true);
5599 static int mvpp2_set_mac_address(struct net_device
*dev
, void *p
)
5601 struct mvpp2_port
*port
= netdev_priv(dev
);
5602 const struct sockaddr
*addr
= p
;
5605 if (!is_valid_ether_addr(addr
->sa_data
)) {
5606 err
= -EADDRNOTAVAIL
;
5610 if (!netif_running(dev
)) {
5611 err
= mvpp2_prs_update_mac_da(dev
, addr
->sa_data
);
5614 /* Reconfigure parser to accept the original MAC address */
5615 err
= mvpp2_prs_update_mac_da(dev
, dev
->dev_addr
);
5620 mvpp2_stop_dev(port
);
5622 err
= mvpp2_prs_update_mac_da(dev
, addr
->sa_data
);
5626 /* Reconfigure parser accept the original MAC address */
5627 err
= mvpp2_prs_update_mac_da(dev
, dev
->dev_addr
);
5631 mvpp2_start_dev(port
);
5632 mvpp2_egress_enable(port
);
5633 mvpp2_ingress_enable(port
);
5637 netdev_err(dev
, "fail to change MAC address\n");
5641 static int mvpp2_change_mtu(struct net_device
*dev
, int mtu
)
5643 struct mvpp2_port
*port
= netdev_priv(dev
);
5646 mtu
= mvpp2_check_mtu_valid(dev
, mtu
);
5652 if (!netif_running(dev
)) {
5653 err
= mvpp2_bm_update_mtu(dev
, mtu
);
5655 port
->pkt_size
= MVPP2_RX_PKT_SIZE(mtu
);
5659 /* Reconfigure BM to the original MTU */
5660 err
= mvpp2_bm_update_mtu(dev
, dev
->mtu
);
5665 mvpp2_stop_dev(port
);
5667 err
= mvpp2_bm_update_mtu(dev
, mtu
);
5669 port
->pkt_size
= MVPP2_RX_PKT_SIZE(mtu
);
5673 /* Reconfigure BM to the original MTU */
5674 err
= mvpp2_bm_update_mtu(dev
, dev
->mtu
);
5679 mvpp2_start_dev(port
);
5680 mvpp2_egress_enable(port
);
5681 mvpp2_ingress_enable(port
);
5686 netdev_err(dev
, "fail to change MTU\n");
5690 static struct rtnl_link_stats64
*
5691 mvpp2_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
5693 struct mvpp2_port
*port
= netdev_priv(dev
);
5697 for_each_possible_cpu(cpu
) {
5698 struct mvpp2_pcpu_stats
*cpu_stats
;
5704 cpu_stats
= per_cpu_ptr(port
->stats
, cpu
);
5706 start
= u64_stats_fetch_begin_irq(&cpu_stats
->syncp
);
5707 rx_packets
= cpu_stats
->rx_packets
;
5708 rx_bytes
= cpu_stats
->rx_bytes
;
5709 tx_packets
= cpu_stats
->tx_packets
;
5710 tx_bytes
= cpu_stats
->tx_bytes
;
5711 } while (u64_stats_fetch_retry_irq(&cpu_stats
->syncp
, start
));
5713 stats
->rx_packets
+= rx_packets
;
5714 stats
->rx_bytes
+= rx_bytes
;
5715 stats
->tx_packets
+= tx_packets
;
5716 stats
->tx_bytes
+= tx_bytes
;
5719 stats
->rx_errors
= dev
->stats
.rx_errors
;
5720 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
5721 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
5726 static int mvpp2_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
5728 struct mvpp2_port
*port
= netdev_priv(dev
);
5734 ret
= phy_mii_ioctl(port
->phy_dev
, ifr
, cmd
);
5736 mvpp2_link_event(dev
);
5741 /* Ethtool methods */
5743 /* Get settings (phy address, speed) for ethtools */
5744 static int mvpp2_ethtool_get_settings(struct net_device
*dev
,
5745 struct ethtool_cmd
*cmd
)
5747 struct mvpp2_port
*port
= netdev_priv(dev
);
5751 return phy_ethtool_gset(port
->phy_dev
, cmd
);
5754 /* Set settings (phy address, speed) for ethtools */
5755 static int mvpp2_ethtool_set_settings(struct net_device
*dev
,
5756 struct ethtool_cmd
*cmd
)
5758 struct mvpp2_port
*port
= netdev_priv(dev
);
5762 return phy_ethtool_sset(port
->phy_dev
, cmd
);
5765 /* Set interrupt coalescing for ethtools */
5766 static int mvpp2_ethtool_set_coalesce(struct net_device
*dev
,
5767 struct ethtool_coalesce
*c
)
5769 struct mvpp2_port
*port
= netdev_priv(dev
);
5772 for (queue
= 0; queue
< rxq_number
; queue
++) {
5773 struct mvpp2_rx_queue
*rxq
= port
->rxqs
[queue
];
5775 rxq
->time_coal
= c
->rx_coalesce_usecs
;
5776 rxq
->pkts_coal
= c
->rx_max_coalesced_frames
;
5777 mvpp2_rx_pkts_coal_set(port
, rxq
, rxq
->pkts_coal
);
5778 mvpp2_rx_time_coal_set(port
, rxq
, rxq
->time_coal
);
5781 for (queue
= 0; queue
< txq_number
; queue
++) {
5782 struct mvpp2_tx_queue
*txq
= port
->txqs
[queue
];
5784 txq
->done_pkts_coal
= c
->tx_max_coalesced_frames
;
5787 on_each_cpu(mvpp2_tx_done_pkts_coal_set
, port
, 1);
5791 /* get coalescing for ethtools */
5792 static int mvpp2_ethtool_get_coalesce(struct net_device
*dev
,
5793 struct ethtool_coalesce
*c
)
5795 struct mvpp2_port
*port
= netdev_priv(dev
);
5797 c
->rx_coalesce_usecs
= port
->rxqs
[0]->time_coal
;
5798 c
->rx_max_coalesced_frames
= port
->rxqs
[0]->pkts_coal
;
5799 c
->tx_max_coalesced_frames
= port
->txqs
[0]->done_pkts_coal
;
5803 static void mvpp2_ethtool_get_drvinfo(struct net_device
*dev
,
5804 struct ethtool_drvinfo
*drvinfo
)
5806 strlcpy(drvinfo
->driver
, MVPP2_DRIVER_NAME
,
5807 sizeof(drvinfo
->driver
));
5808 strlcpy(drvinfo
->version
, MVPP2_DRIVER_VERSION
,
5809 sizeof(drvinfo
->version
));
5810 strlcpy(drvinfo
->bus_info
, dev_name(&dev
->dev
),
5811 sizeof(drvinfo
->bus_info
));
5814 static void mvpp2_ethtool_get_ringparam(struct net_device
*dev
,
5815 struct ethtool_ringparam
*ring
)
5817 struct mvpp2_port
*port
= netdev_priv(dev
);
5819 ring
->rx_max_pending
= MVPP2_MAX_RXD
;
5820 ring
->tx_max_pending
= MVPP2_MAX_TXD
;
5821 ring
->rx_pending
= port
->rx_ring_size
;
5822 ring
->tx_pending
= port
->tx_ring_size
;
5825 static int mvpp2_ethtool_set_ringparam(struct net_device
*dev
,
5826 struct ethtool_ringparam
*ring
)
5828 struct mvpp2_port
*port
= netdev_priv(dev
);
5829 u16 prev_rx_ring_size
= port
->rx_ring_size
;
5830 u16 prev_tx_ring_size
= port
->tx_ring_size
;
5833 err
= mvpp2_check_ringparam_valid(dev
, ring
);
5837 if (!netif_running(dev
)) {
5838 port
->rx_ring_size
= ring
->rx_pending
;
5839 port
->tx_ring_size
= ring
->tx_pending
;
5843 /* The interface is running, so we have to force a
5844 * reallocation of the queues
5846 mvpp2_stop_dev(port
);
5847 mvpp2_cleanup_rxqs(port
);
5848 mvpp2_cleanup_txqs(port
);
5850 port
->rx_ring_size
= ring
->rx_pending
;
5851 port
->tx_ring_size
= ring
->tx_pending
;
5853 err
= mvpp2_setup_rxqs(port
);
5855 /* Reallocate Rx queues with the original ring size */
5856 port
->rx_ring_size
= prev_rx_ring_size
;
5857 ring
->rx_pending
= prev_rx_ring_size
;
5858 err
= mvpp2_setup_rxqs(port
);
5862 err
= mvpp2_setup_txqs(port
);
5864 /* Reallocate Tx queues with the original ring size */
5865 port
->tx_ring_size
= prev_tx_ring_size
;
5866 ring
->tx_pending
= prev_tx_ring_size
;
5867 err
= mvpp2_setup_txqs(port
);
5869 goto err_clean_rxqs
;
5872 mvpp2_start_dev(port
);
5873 mvpp2_egress_enable(port
);
5874 mvpp2_ingress_enable(port
);
5879 mvpp2_cleanup_rxqs(port
);
5881 netdev_err(dev
, "fail to change ring parameters");
5887 static const struct net_device_ops mvpp2_netdev_ops
= {
5888 .ndo_open
= mvpp2_open
,
5889 .ndo_stop
= mvpp2_stop
,
5890 .ndo_start_xmit
= mvpp2_tx
,
5891 .ndo_set_rx_mode
= mvpp2_set_rx_mode
,
5892 .ndo_set_mac_address
= mvpp2_set_mac_address
,
5893 .ndo_change_mtu
= mvpp2_change_mtu
,
5894 .ndo_get_stats64
= mvpp2_get_stats64
,
5895 .ndo_do_ioctl
= mvpp2_ioctl
,
5898 static const struct ethtool_ops mvpp2_eth_tool_ops
= {
5899 .get_link
= ethtool_op_get_link
,
5900 .get_settings
= mvpp2_ethtool_get_settings
,
5901 .set_settings
= mvpp2_ethtool_set_settings
,
5902 .set_coalesce
= mvpp2_ethtool_set_coalesce
,
5903 .get_coalesce
= mvpp2_ethtool_get_coalesce
,
5904 .get_drvinfo
= mvpp2_ethtool_get_drvinfo
,
5905 .get_ringparam
= mvpp2_ethtool_get_ringparam
,
5906 .set_ringparam
= mvpp2_ethtool_set_ringparam
,
5909 /* Driver initialization */
5911 static void mvpp2_port_power_up(struct mvpp2_port
*port
)
5913 mvpp2_port_mii_set(port
);
5914 mvpp2_port_periodic_xon_disable(port
);
5915 mvpp2_port_fc_adv_enable(port
);
5916 mvpp2_port_reset(port
);
5919 /* Initialize port HW */
5920 static int mvpp2_port_init(struct mvpp2_port
*port
)
5922 struct device
*dev
= port
->dev
->dev
.parent
;
5923 struct mvpp2
*priv
= port
->priv
;
5924 struct mvpp2_txq_pcpu
*txq_pcpu
;
5925 int queue
, cpu
, err
;
5927 if (port
->first_rxq
+ rxq_number
> MVPP2_RXQ_TOTAL_NUM
)
5931 mvpp2_egress_disable(port
);
5932 mvpp2_port_disable(port
);
5934 port
->txqs
= devm_kcalloc(dev
, txq_number
, sizeof(*port
->txqs
),
5939 /* Associate physical Tx queues to this port and initialize.
5940 * The mapping is predefined.
5942 for (queue
= 0; queue
< txq_number
; queue
++) {
5943 int queue_phy_id
= mvpp2_txq_phys(port
->id
, queue
);
5944 struct mvpp2_tx_queue
*txq
;
5946 txq
= devm_kzalloc(dev
, sizeof(*txq
), GFP_KERNEL
);
5950 txq
->pcpu
= alloc_percpu(struct mvpp2_txq_pcpu
);
5953 goto err_free_percpu
;
5956 txq
->id
= queue_phy_id
;
5957 txq
->log_id
= queue
;
5958 txq
->done_pkts_coal
= MVPP2_TXDONE_COAL_PKTS_THRESH
;
5959 for_each_present_cpu(cpu
) {
5960 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
5961 txq_pcpu
->cpu
= cpu
;
5964 port
->txqs
[queue
] = txq
;
5967 port
->rxqs
= devm_kcalloc(dev
, rxq_number
, sizeof(*port
->rxqs
),
5971 goto err_free_percpu
;
5974 /* Allocate and initialize Rx queue for this port */
5975 for (queue
= 0; queue
< rxq_number
; queue
++) {
5976 struct mvpp2_rx_queue
*rxq
;
5978 /* Map physical Rx queue to port's logical Rx queue */
5979 rxq
= devm_kzalloc(dev
, sizeof(*rxq
), GFP_KERNEL
);
5981 goto err_free_percpu
;
5982 /* Map this Rx queue to a physical queue */
5983 rxq
->id
= port
->first_rxq
+ queue
;
5984 rxq
->port
= port
->id
;
5985 rxq
->logic_rxq
= queue
;
5987 port
->rxqs
[queue
] = rxq
;
5990 /* Configure Rx queue group interrupt for this port */
5991 mvpp2_write(priv
, MVPP2_ISR_RXQ_GROUP_REG(port
->id
), rxq_number
);
5993 /* Create Rx descriptor rings */
5994 for (queue
= 0; queue
< rxq_number
; queue
++) {
5995 struct mvpp2_rx_queue
*rxq
= port
->rxqs
[queue
];
5997 rxq
->size
= port
->rx_ring_size
;
5998 rxq
->pkts_coal
= MVPP2_RX_COAL_PKTS
;
5999 rxq
->time_coal
= MVPP2_RX_COAL_USEC
;
6002 mvpp2_ingress_disable(port
);
6004 /* Port default configuration */
6005 mvpp2_defaults_set(port
);
6007 /* Port's classifier configuration */
6008 mvpp2_cls_oversize_rxq_set(port
);
6009 mvpp2_cls_port_config(port
);
6011 /* Provide an initial Rx packet size */
6012 port
->pkt_size
= MVPP2_RX_PKT_SIZE(port
->dev
->mtu
);
6014 /* Initialize pools for swf */
6015 err
= mvpp2_swf_bm_pool_init(port
);
6017 goto err_free_percpu
;
6022 for (queue
= 0; queue
< txq_number
; queue
++) {
6023 if (!port
->txqs
[queue
])
6025 free_percpu(port
->txqs
[queue
]->pcpu
);
6030 /* Ports initialization */
6031 static int mvpp2_port_probe(struct platform_device
*pdev
,
6032 struct device_node
*port_node
,
6034 int *next_first_rxq
)
6036 struct device_node
*phy_node
;
6037 struct mvpp2_port
*port
;
6038 struct net_device
*dev
;
6039 struct resource
*res
;
6040 const char *dt_mac_addr
;
6041 const char *mac_from
;
6042 char hw_mac_addr
[ETH_ALEN
];
6046 int priv_common_regs_num
= 2;
6049 dev
= alloc_etherdev_mqs(sizeof(struct mvpp2_port
), txq_number
,
6054 phy_node
= of_parse_phandle(port_node
, "phy", 0);
6056 dev_err(&pdev
->dev
, "missing phy\n");
6058 goto err_free_netdev
;
6061 phy_mode
= of_get_phy_mode(port_node
);
6063 dev_err(&pdev
->dev
, "incorrect phy mode\n");
6065 goto err_free_netdev
;
6068 if (of_property_read_u32(port_node
, "port-id", &id
)) {
6070 dev_err(&pdev
->dev
, "missing port-id value\n");
6071 goto err_free_netdev
;
6074 dev
->tx_queue_len
= MVPP2_MAX_TXD
;
6075 dev
->watchdog_timeo
= 5 * HZ
;
6076 dev
->netdev_ops
= &mvpp2_netdev_ops
;
6077 dev
->ethtool_ops
= &mvpp2_eth_tool_ops
;
6079 port
= netdev_priv(dev
);
6081 port
->irq
= irq_of_parse_and_map(port_node
, 0);
6082 if (port
->irq
<= 0) {
6084 goto err_free_netdev
;
6087 if (of_property_read_bool(port_node
, "marvell,loopback"))
6088 port
->flags
|= MVPP2_F_LOOPBACK
;
6092 port
->first_rxq
= *next_first_rxq
;
6093 port
->phy_node
= phy_node
;
6094 port
->phy_interface
= phy_mode
;
6096 res
= platform_get_resource(pdev
, IORESOURCE_MEM
,
6097 priv_common_regs_num
+ id
);
6098 port
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
6099 if (IS_ERR(port
->base
)) {
6100 err
= PTR_ERR(port
->base
);
6104 /* Alloc per-cpu stats */
6105 port
->stats
= netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats
);
6111 dt_mac_addr
= of_get_mac_address(port_node
);
6112 if (dt_mac_addr
&& is_valid_ether_addr(dt_mac_addr
)) {
6113 mac_from
= "device tree";
6114 ether_addr_copy(dev
->dev_addr
, dt_mac_addr
);
6116 mvpp2_get_mac_address(port
, hw_mac_addr
);
6117 if (is_valid_ether_addr(hw_mac_addr
)) {
6118 mac_from
= "hardware";
6119 ether_addr_copy(dev
->dev_addr
, hw_mac_addr
);
6121 mac_from
= "random";
6122 eth_hw_addr_random(dev
);
6126 port
->tx_ring_size
= MVPP2_MAX_TXD
;
6127 port
->rx_ring_size
= MVPP2_MAX_RXD
;
6129 SET_NETDEV_DEV(dev
, &pdev
->dev
);
6131 err
= mvpp2_port_init(port
);
6133 dev_err(&pdev
->dev
, "failed to init port %d\n", id
);
6134 goto err_free_stats
;
6136 mvpp2_port_power_up(port
);
6138 netif_napi_add(dev
, &port
->napi
, mvpp2_poll
, NAPI_POLL_WEIGHT
);
6139 features
= NETIF_F_SG
| NETIF_F_IP_CSUM
;
6140 dev
->features
= features
| NETIF_F_RXCSUM
;
6141 dev
->hw_features
|= features
| NETIF_F_RXCSUM
| NETIF_F_GRO
;
6142 dev
->vlan_features
|= features
;
6144 err
= register_netdev(dev
);
6146 dev_err(&pdev
->dev
, "failed to register netdev\n");
6147 goto err_free_txq_pcpu
;
6149 netdev_info(dev
, "Using %s mac address %pM\n", mac_from
, dev
->dev_addr
);
6151 /* Increment the first Rx queue number to be used by the next port */
6152 *next_first_rxq
+= rxq_number
;
6153 priv
->port_list
[id
] = port
;
6157 for (i
= 0; i
< txq_number
; i
++)
6158 free_percpu(port
->txqs
[i
]->pcpu
);
6160 free_percpu(port
->stats
);
6162 irq_dispose_mapping(port
->irq
);
6168 /* Ports removal routine */
6169 static void mvpp2_port_remove(struct mvpp2_port
*port
)
6173 unregister_netdev(port
->dev
);
6174 free_percpu(port
->stats
);
6175 for (i
= 0; i
< txq_number
; i
++)
6176 free_percpu(port
->txqs
[i
]->pcpu
);
6177 irq_dispose_mapping(port
->irq
);
6178 free_netdev(port
->dev
);
6181 /* Initialize decoding windows */
6182 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info
*dram
,
6188 for (i
= 0; i
< 6; i
++) {
6189 mvpp2_write(priv
, MVPP2_WIN_BASE(i
), 0);
6190 mvpp2_write(priv
, MVPP2_WIN_SIZE(i
), 0);
6193 mvpp2_write(priv
, MVPP2_WIN_REMAP(i
), 0);
6198 for (i
= 0; i
< dram
->num_cs
; i
++) {
6199 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
6201 mvpp2_write(priv
, MVPP2_WIN_BASE(i
),
6202 (cs
->base
& 0xffff0000) | (cs
->mbus_attr
<< 8) |
6203 dram
->mbus_dram_target_id
);
6205 mvpp2_write(priv
, MVPP2_WIN_SIZE(i
),
6206 (cs
->size
- 1) & 0xffff0000);
6208 win_enable
|= (1 << i
);
6211 mvpp2_write(priv
, MVPP2_BASE_ADDR_ENABLE
, win_enable
);
6214 /* Initialize Rx FIFO's */
6215 static void mvpp2_rx_fifo_init(struct mvpp2
*priv
)
6219 for (port
= 0; port
< MVPP2_MAX_PORTS
; port
++) {
6220 mvpp2_write(priv
, MVPP2_RX_DATA_FIFO_SIZE_REG(port
),
6221 MVPP2_RX_FIFO_PORT_DATA_SIZE
);
6222 mvpp2_write(priv
, MVPP2_RX_ATTR_FIFO_SIZE_REG(port
),
6223 MVPP2_RX_FIFO_PORT_ATTR_SIZE
);
6226 mvpp2_write(priv
, MVPP2_RX_MIN_PKT_SIZE_REG
,
6227 MVPP2_RX_FIFO_PORT_MIN_PKT
);
6228 mvpp2_write(priv
, MVPP2_RX_FIFO_INIT_REG
, 0x1);
6231 /* Initialize network controller common part HW */
6232 static int mvpp2_init(struct platform_device
*pdev
, struct mvpp2
*priv
)
6234 const struct mbus_dram_target_info
*dram_target_info
;
6238 /* Checks for hardware constraints */
6239 if (rxq_number
% 4 || (rxq_number
> MVPP2_MAX_RXQ
) ||
6240 (txq_number
> MVPP2_MAX_TXQ
)) {
6241 dev_err(&pdev
->dev
, "invalid queue size parameter\n");
6245 /* MBUS windows configuration */
6246 dram_target_info
= mv_mbus_dram_info();
6247 if (dram_target_info
)
6248 mvpp2_conf_mbus_windows(dram_target_info
, priv
);
6250 /* Disable HW PHY polling */
6251 val
= readl(priv
->lms_base
+ MVPP2_PHY_AN_CFG0_REG
);
6252 val
|= MVPP2_PHY_AN_STOP_SMI0_MASK
;
6253 writel(val
, priv
->lms_base
+ MVPP2_PHY_AN_CFG0_REG
);
6255 /* Allocate and initialize aggregated TXQs */
6256 priv
->aggr_txqs
= devm_kcalloc(&pdev
->dev
, num_present_cpus(),
6257 sizeof(struct mvpp2_tx_queue
),
6259 if (!priv
->aggr_txqs
)
6262 for_each_present_cpu(i
) {
6263 priv
->aggr_txqs
[i
].id
= i
;
6264 priv
->aggr_txqs
[i
].size
= MVPP2_AGGR_TXQ_SIZE
;
6265 err
= mvpp2_aggr_txq_init(pdev
, &priv
->aggr_txqs
[i
],
6266 MVPP2_AGGR_TXQ_SIZE
, i
, priv
);
6272 mvpp2_rx_fifo_init(priv
);
6274 /* Reset Rx queue group interrupt configuration */
6275 for (i
= 0; i
< MVPP2_MAX_PORTS
; i
++)
6276 mvpp2_write(priv
, MVPP2_ISR_RXQ_GROUP_REG(i
), rxq_number
);
6278 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT
,
6279 priv
->lms_base
+ MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG
);
6281 /* Allow cache snoop when transmiting packets */
6282 mvpp2_write(priv
, MVPP2_TX_SNOOP_REG
, 0x1);
6284 /* Buffer Manager initialization */
6285 err
= mvpp2_bm_init(pdev
, priv
);
6289 /* Parser default initialization */
6290 err
= mvpp2_prs_default_init(pdev
, priv
);
6294 /* Classifier default initialization */
6295 mvpp2_cls_init(priv
);
6300 static int mvpp2_probe(struct platform_device
*pdev
)
6302 struct device_node
*dn
= pdev
->dev
.of_node
;
6303 struct device_node
*port_node
;
6305 struct resource
*res
;
6306 int port_count
, first_rxq
;
6309 priv
= devm_kzalloc(&pdev
->dev
, sizeof(struct mvpp2
), GFP_KERNEL
);
6313 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
6314 priv
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
6315 if (IS_ERR(priv
->base
))
6316 return PTR_ERR(priv
->base
);
6318 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
6319 priv
->lms_base
= devm_ioremap_resource(&pdev
->dev
, res
);
6320 if (IS_ERR(priv
->lms_base
))
6321 return PTR_ERR(priv
->lms_base
);
6323 priv
->pp_clk
= devm_clk_get(&pdev
->dev
, "pp_clk");
6324 if (IS_ERR(priv
->pp_clk
))
6325 return PTR_ERR(priv
->pp_clk
);
6326 err
= clk_prepare_enable(priv
->pp_clk
);
6330 priv
->gop_clk
= devm_clk_get(&pdev
->dev
, "gop_clk");
6331 if (IS_ERR(priv
->gop_clk
)) {
6332 err
= PTR_ERR(priv
->gop_clk
);
6335 err
= clk_prepare_enable(priv
->gop_clk
);
6339 /* Get system's tclk rate */
6340 priv
->tclk
= clk_get_rate(priv
->pp_clk
);
6342 /* Initialize network controller */
6343 err
= mvpp2_init(pdev
, priv
);
6345 dev_err(&pdev
->dev
, "failed to initialize controller\n");
6349 port_count
= of_get_available_child_count(dn
);
6350 if (port_count
== 0) {
6351 dev_err(&pdev
->dev
, "no ports enabled\n");
6356 priv
->port_list
= devm_kcalloc(&pdev
->dev
, port_count
,
6357 sizeof(struct mvpp2_port
*),
6359 if (!priv
->port_list
) {
6364 /* Initialize ports */
6366 for_each_available_child_of_node(dn
, port_node
) {
6367 err
= mvpp2_port_probe(pdev
, port_node
, priv
, &first_rxq
);
6372 platform_set_drvdata(pdev
, priv
);
6376 clk_disable_unprepare(priv
->gop_clk
);
6378 clk_disable_unprepare(priv
->pp_clk
);
6382 static int mvpp2_remove(struct platform_device
*pdev
)
6384 struct mvpp2
*priv
= platform_get_drvdata(pdev
);
6385 struct device_node
*dn
= pdev
->dev
.of_node
;
6386 struct device_node
*port_node
;
6389 for_each_available_child_of_node(dn
, port_node
) {
6390 if (priv
->port_list
[i
])
6391 mvpp2_port_remove(priv
->port_list
[i
]);
6395 for (i
= 0; i
< MVPP2_BM_POOLS_NUM
; i
++) {
6396 struct mvpp2_bm_pool
*bm_pool
= &priv
->bm_pools
[i
];
6398 mvpp2_bm_pool_destroy(pdev
, priv
, bm_pool
);
6401 for_each_present_cpu(i
) {
6402 struct mvpp2_tx_queue
*aggr_txq
= &priv
->aggr_txqs
[i
];
6404 dma_free_coherent(&pdev
->dev
,
6405 MVPP2_AGGR_TXQ_SIZE
* MVPP2_DESC_ALIGNED_SIZE
,
6407 aggr_txq
->descs_phys
);
6410 clk_disable_unprepare(priv
->pp_clk
);
6411 clk_disable_unprepare(priv
->gop_clk
);
6416 static const struct of_device_id mvpp2_match
[] = {
6417 { .compatible
= "marvell,armada-375-pp2" },
6420 MODULE_DEVICE_TABLE(of
, mvpp2_match
);
6422 static struct platform_driver mvpp2_driver
= {
6423 .probe
= mvpp2_probe
,
6424 .remove
= mvpp2_remove
,
6426 .name
= MVPP2_DRIVER_NAME
,
6427 .of_match_table
= mvpp2_match
,
6431 module_platform_driver(mvpp2_driver
);
6433 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
6434 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
6435 MODULE_LICENSE("GPL v2");