2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
4 * Copyright (C) 2014 Marvell
6 * Marcin Wojtas <mw@semihalf.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/platform_device.h>
17 #include <linux/skbuff.h>
18 #include <linux/inetdevice.h>
19 #include <linux/mbus.h>
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/cpumask.h>
24 #include <linux/of_irq.h>
25 #include <linux/of_mdio.h>
26 #include <linux/of_net.h>
27 #include <linux/of_address.h>
28 #include <linux/of_device.h>
29 #include <linux/phy.h>
30 #include <linux/clk.h>
31 #include <linux/hrtimer.h>
32 #include <linux/ktime.h>
33 #include <uapi/linux/ppp_defs.h>
37 /* RX Fifo Registers */
38 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
39 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
40 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
41 #define MVPP2_RX_FIFO_INIT_REG 0x64
43 /* RX DMA Top Registers */
44 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
45 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
46 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
47 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
48 #define MVPP2_POOL_BUF_SIZE_OFFSET 5
49 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
50 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
51 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
52 #define MVPP2_RXQ_POOL_SHORT_OFFS 20
53 #define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
54 #define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
55 #define MVPP2_RXQ_POOL_LONG_OFFS 24
56 #define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
57 #define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
58 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
59 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
60 #define MVPP2_RXQ_DISABLE_MASK BIT(31)
62 /* Parser Registers */
63 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
64 #define MVPP2_PRS_PORT_LU_MAX 0xf
65 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
66 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
67 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
68 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
69 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
70 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
71 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
72 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
73 #define MVPP2_PRS_TCAM_IDX_REG 0x1100
74 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
75 #define MVPP2_PRS_TCAM_INV_MASK BIT(31)
76 #define MVPP2_PRS_SRAM_IDX_REG 0x1200
77 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
78 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230
79 #define MVPP2_PRS_TCAM_EN_MASK BIT(0)
81 /* Classifier Registers */
82 #define MVPP2_CLS_MODE_REG 0x1800
83 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
84 #define MVPP2_CLS_PORT_WAY_REG 0x1810
85 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
86 #define MVPP2_CLS_LKP_INDEX_REG 0x1814
87 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
88 #define MVPP2_CLS_LKP_TBL_REG 0x1818
89 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
90 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
91 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820
92 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824
93 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828
94 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c
95 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
96 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
97 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
98 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
99 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
100 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
102 /* Descriptor Manager Top Registers */
103 #define MVPP2_RXQ_NUM_REG 0x2040
104 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044
105 #define MVPP22_DESC_ADDR_OFFS 8
106 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048
107 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
108 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
109 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
110 #define MVPP2_RXQ_NUM_NEW_OFFSET 16
111 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
112 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
113 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
114 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
115 #define MVPP2_RXQ_THRESH_REG 0x204c
116 #define MVPP2_OCCUPIED_THRESH_OFFSET 0
117 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
118 #define MVPP2_RXQ_INDEX_REG 0x2050
119 #define MVPP2_TXQ_NUM_REG 0x2080
120 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084
121 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088
122 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
123 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
124 #define MVPP2_TXQ_INDEX_REG 0x2098
125 #define MVPP2_TXQ_PREF_BUF_REG 0x209c
126 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
127 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
128 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
129 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
130 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
131 #define MVPP2_TXQ_PENDING_REG 0x20a0
132 #define MVPP2_TXQ_PENDING_MASK 0x3fff
133 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4
134 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
135 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16
136 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
137 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
138 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
139 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
140 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
141 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
142 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16
143 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
144 #define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
145 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
146 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
147 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
148 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
149 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
151 /* MBUS bridge registers */
152 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
153 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
154 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
155 #define MVPP2_BASE_ADDR_ENABLE 0x4060
157 /* AXI Bridge Registers */
158 #define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
159 #define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
160 #define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
161 #define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
162 #define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
163 #define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
164 #define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
165 #define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
166 #define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
167 #define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
168 #define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
169 #define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
171 /* Values for AXI Bridge registers */
172 #define MVPP22_AXI_ATTR_CACHE_OFFS 0
173 #define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
175 #define MVPP22_AXI_CODE_CACHE_OFFS 0
176 #define MVPP22_AXI_CODE_DOMAIN_OFFS 4
178 #define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
179 #define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
180 #define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
182 #define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
183 #define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
185 /* Interrupt Cause and Mask registers */
186 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
187 #define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
188 #define MVPP21_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
190 #define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
191 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
192 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
193 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
195 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
196 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
198 #define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
199 #define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
200 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
201 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
203 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
204 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
205 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
206 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
207 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
208 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
209 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
210 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
211 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
212 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
213 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
214 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
215 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
216 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
217 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
218 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
219 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
220 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
222 /* Buffer Manager registers */
223 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
224 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
225 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
226 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0
227 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
228 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
229 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
230 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
231 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
232 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
233 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
234 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
235 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
236 #define MVPP2_BM_START_MASK BIT(0)
237 #define MVPP2_BM_STOP_MASK BIT(1)
238 #define MVPP2_BM_STATE_MASK BIT(4)
239 #define MVPP2_BM_LOW_THRESH_OFFS 8
240 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00
241 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
242 MVPP2_BM_LOW_THRESH_OFFS)
243 #define MVPP2_BM_HIGH_THRESH_OFFS 16
244 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
245 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
246 MVPP2_BM_HIGH_THRESH_OFFS)
247 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
248 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
249 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
250 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
251 #define MVPP2_BM_BPPE_FULL_MASK BIT(3)
252 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
253 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
254 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
255 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
256 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440
257 #define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444
258 #define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff
259 #define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00
260 #define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8
261 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
262 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
263 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
264 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
265 #define MVPP2_BM_VIRT_RLS_REG 0x64c0
266 #define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
267 #define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
268 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
269 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
271 /* TX Scheduler registers */
272 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
273 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
274 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff
275 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8
276 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
277 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
278 #define MVPP2_TXP_SCHED_MTU_REG 0x801c
279 #define MVPP2_TXP_MTU_MAX 0x7FFFF
280 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020
281 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
282 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
283 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
284 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
285 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
286 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
287 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
288 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
289 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
290 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
291 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
292 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
293 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
295 /* TX general registers */
296 #define MVPP2_TX_SNOOP_REG 0x8800
297 #define MVPP2_TX_PORT_FLUSH_REG 0x8810
298 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
301 #define MVPP2_SRC_ADDR_MIDDLE 0x24
302 #define MVPP2_SRC_ADDR_HIGH 0x28
303 #define MVPP2_PHY_AN_CFG0_REG 0x34
304 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
305 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
306 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
308 /* Per-port registers */
309 #define MVPP2_GMAC_CTRL_0_REG 0x0
310 #define MVPP2_GMAC_PORT_EN_MASK BIT(0)
311 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
312 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
313 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
314 #define MVPP2_GMAC_CTRL_1_REG 0x4
315 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
316 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
317 #define MVPP2_GMAC_PCS_LB_EN_BIT 6
318 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
319 #define MVPP2_GMAC_SA_LOW_OFFS 7
320 #define MVPP2_GMAC_CTRL_2_REG 0x8
321 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
322 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
323 #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
324 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
325 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc
326 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
327 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
328 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
329 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
330 #define MVPP2_GMAC_AN_SPEED_EN BIT(7)
331 #define MVPP2_GMAC_FC_ADV_EN BIT(9)
332 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
333 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
334 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
335 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
336 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
337 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
338 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
339 #define MVPP22_GMAC_CTRL_4_REG 0x90
340 #define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0)
341 #define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
342 #define MVPP22_CTRL4_SYNC_BYPASS BIT(6)
343 #define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
345 /* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
346 * relative to port->base.
348 #define MVPP22_XLG_CTRL3_REG 0x11c
349 #define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
350 #define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
352 /* SMI registers. PPv2.2 only, relative to priv->iface_base. */
353 #define MVPP22_SMI_MISC_CFG_REG 0x1204
354 #define MVPP22_SMI_POLLING_EN BIT(10)
356 #define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00)
358 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
360 /* Descriptor ring Macros */
361 #define MVPP2_QUEUE_NEXT_DESC(q, index) \
362 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
364 /* Various constants */
367 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15
368 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
369 #define MVPP2_RX_COAL_PKTS 32
370 #define MVPP2_RX_COAL_USEC 100
372 /* The two bytes Marvell header. Either contains a special value used
373 * by Marvell switches when a specific hardware mode is enabled (not
374 * supported by this driver) or is filled automatically by zeroes on
375 * the RX side. Those two bytes being at the front of the Ethernet
376 * header, they allow to have the IP header aligned on a 4 bytes
377 * boundary automatically: the hardware skips those two bytes on its
380 #define MVPP2_MH_SIZE 2
381 #define MVPP2_ETH_TYPE_LEN 2
382 #define MVPP2_PPPOE_HDR_SIZE 8
383 #define MVPP2_VLAN_TAG_LEN 4
385 /* Lbtd 802.3 type */
386 #define MVPP2_IP_LBDT_TYPE 0xfffa
388 #define MVPP2_TX_CSUM_MAX_SIZE 9800
390 /* Timeout constants */
391 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
392 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
394 #define MVPP2_TX_MTU_MAX 0x7ffff
396 /* Maximum number of T-CONTs of PON port */
397 #define MVPP2_MAX_TCONT 16
399 /* Maximum number of supported ports */
400 #define MVPP2_MAX_PORTS 4
402 /* Maximum number of TXQs used by single port */
403 #define MVPP2_MAX_TXQ 8
405 /* Dfault number of RXQs in use */
406 #define MVPP2_DEFAULT_RXQ 4
408 /* Max number of Rx descriptors */
409 #define MVPP2_MAX_RXD 128
411 /* Max number of Tx descriptors */
412 #define MVPP2_MAX_TXD 1024
414 /* Amount of Tx descriptors that can be reserved at once by CPU */
415 #define MVPP2_CPU_DESC_CHUNK 64
417 /* Max number of Tx descriptors in each aggregated queue */
418 #define MVPP2_AGGR_TXQ_SIZE 256
420 /* Descriptor aligned size */
421 #define MVPP2_DESC_ALIGNED_SIZE 32
423 /* Descriptor alignment mask */
424 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
426 /* RX FIFO constants */
427 #define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
428 #define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
429 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
431 /* RX buffer constants */
432 #define MVPP2_SKB_SHINFO_SIZE \
433 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
435 #define MVPP2_RX_PKT_SIZE(mtu) \
436 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
437 ETH_HLEN + ETH_FCS_LEN, cache_line_size())
439 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
440 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
441 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
442 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
444 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
446 /* IPv6 max L3 address size */
447 #define MVPP2_MAX_L3_ADDR_SIZE 16
450 #define MVPP2_F_LOOPBACK BIT(0)
452 /* Marvell tag types */
453 enum mvpp2_tag_type
{
454 MVPP2_TAG_TYPE_NONE
= 0,
455 MVPP2_TAG_TYPE_MH
= 1,
456 MVPP2_TAG_TYPE_DSA
= 2,
457 MVPP2_TAG_TYPE_EDSA
= 3,
458 MVPP2_TAG_TYPE_VLAN
= 4,
459 MVPP2_TAG_TYPE_LAST
= 5
462 /* Parser constants */
463 #define MVPP2_PRS_TCAM_SRAM_SIZE 256
464 #define MVPP2_PRS_TCAM_WORDS 6
465 #define MVPP2_PRS_SRAM_WORDS 4
466 #define MVPP2_PRS_FLOW_ID_SIZE 64
467 #define MVPP2_PRS_FLOW_ID_MASK 0x3f
468 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1
469 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
470 #define MVPP2_PRS_IPV4_HEAD 0x40
471 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
472 #define MVPP2_PRS_IPV4_MC 0xe0
473 #define MVPP2_PRS_IPV4_MC_MASK 0xf0
474 #define MVPP2_PRS_IPV4_BC_MASK 0xff
475 #define MVPP2_PRS_IPV4_IHL 0x5
476 #define MVPP2_PRS_IPV4_IHL_MASK 0xf
477 #define MVPP2_PRS_IPV6_MC 0xff
478 #define MVPP2_PRS_IPV6_MC_MASK 0xff
479 #define MVPP2_PRS_IPV6_HOP_MASK 0xff
480 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff
481 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
482 #define MVPP2_PRS_DBL_VLANS_MAX 100
485 * - lookup ID - 4 bits
487 * - additional information - 1 byte
488 * - header data - 8 bytes
489 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
491 #define MVPP2_PRS_AI_BITS 8
492 #define MVPP2_PRS_PORT_MASK 0xff
493 #define MVPP2_PRS_LU_MASK 0xf
494 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
495 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
496 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
497 (((offs) * 2) - ((offs) % 2) + 2)
498 #define MVPP2_PRS_TCAM_AI_BYTE 16
499 #define MVPP2_PRS_TCAM_PORT_BYTE 17
500 #define MVPP2_PRS_TCAM_LU_BYTE 20
501 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
502 #define MVPP2_PRS_TCAM_INV_WORD 5
503 /* Tcam entries ID */
504 #define MVPP2_PE_DROP_ALL 0
505 #define MVPP2_PE_FIRST_FREE_TID 1
506 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
507 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
508 #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
509 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
510 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
511 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
512 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
513 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
514 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
515 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
516 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
517 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
518 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
519 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
520 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
521 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
522 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
523 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
524 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
525 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
526 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
527 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
528 #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
529 #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
530 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
533 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
535 #define MVPP2_PRS_SRAM_RI_OFFS 0
536 #define MVPP2_PRS_SRAM_RI_WORD 0
537 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
538 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
539 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
540 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64
541 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
542 #define MVPP2_PRS_SRAM_UDF_OFFS 73
543 #define MVPP2_PRS_SRAM_UDF_BITS 8
544 #define MVPP2_PRS_SRAM_UDF_MASK 0xff
545 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
546 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
547 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
548 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
549 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
550 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
551 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
552 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
553 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
554 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
555 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
556 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
557 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
558 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
559 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
560 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
561 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
562 #define MVPP2_PRS_SRAM_AI_OFFS 90
563 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
564 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
565 #define MVPP2_PRS_SRAM_AI_MASK 0xff
566 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
567 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
568 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110
569 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111
571 /* Sram result info bits assignment */
572 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1
573 #define MVPP2_PRS_RI_DSA_MASK 0x2
574 #define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
575 #define MVPP2_PRS_RI_VLAN_NONE 0x0
576 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
577 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
578 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
579 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
580 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
581 #define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
582 #define MVPP2_PRS_RI_L2_UCAST 0x0
583 #define MVPP2_PRS_RI_L2_MCAST BIT(9)
584 #define MVPP2_PRS_RI_L2_BCAST BIT(10)
585 #define MVPP2_PRS_RI_PPPOE_MASK 0x800
586 #define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
587 #define MVPP2_PRS_RI_L3_UN 0x0
588 #define MVPP2_PRS_RI_L3_IP4 BIT(12)
589 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
590 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
591 #define MVPP2_PRS_RI_L3_IP6 BIT(14)
592 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
593 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
594 #define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
595 #define MVPP2_PRS_RI_L3_UCAST 0x0
596 #define MVPP2_PRS_RI_L3_MCAST BIT(15)
597 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
598 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
599 #define MVPP2_PRS_RI_UDF3_MASK 0x300000
600 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
601 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
602 #define MVPP2_PRS_RI_L4_TCP BIT(22)
603 #define MVPP2_PRS_RI_L4_UDP BIT(23)
604 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
605 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000
606 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
607 #define MVPP2_PRS_RI_DROP_MASK 0x80000000
609 /* Sram additional info bits assignment */
610 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
611 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
612 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
613 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
614 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
615 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
616 #define MVPP2_PRS_SINGLE_VLAN_AI 0
617 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
620 #define MVPP2_PRS_TAGGED true
621 #define MVPP2_PRS_UNTAGGED false
622 #define MVPP2_PRS_EDSA true
623 #define MVPP2_PRS_DSA false
625 /* MAC entries, shadow udf */
627 MVPP2_PRS_UDF_MAC_DEF
,
628 MVPP2_PRS_UDF_MAC_RANGE
,
629 MVPP2_PRS_UDF_L2_DEF
,
630 MVPP2_PRS_UDF_L2_DEF_COPY
,
631 MVPP2_PRS_UDF_L2_USER
,
635 enum mvpp2_prs_lookup
{
649 enum mvpp2_prs_l3_cast
{
650 MVPP2_PRS_L3_UNI_CAST
,
651 MVPP2_PRS_L3_MULTI_CAST
,
652 MVPP2_PRS_L3_BROAD_CAST
655 /* Classifier constants */
656 #define MVPP2_CLS_FLOWS_TBL_SIZE 512
657 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
658 #define MVPP2_CLS_LKP_TBL_SIZE 64
661 #define MVPP2_BM_POOLS_NUM 8
662 #define MVPP2_BM_LONG_BUF_NUM 1024
663 #define MVPP2_BM_SHORT_BUF_NUM 2048
664 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
665 #define MVPP2_BM_POOL_PTR_ALIGN 128
666 #define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
667 #define MVPP2_BM_SWF_SHORT_POOL 3
669 /* BM cookie (32 bits) definition */
670 #define MVPP2_BM_COOKIE_POOL_OFFS 8
671 #define MVPP2_BM_COOKIE_CPU_OFFS 24
673 /* BM short pool packet size
674 * These value assure that for SWF the total number
675 * of bytes allocated for each buffer will be 512
677 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
679 #define MVPP21_ADDR_SPACE_SZ 0
680 #define MVPP22_ADDR_SPACE_SZ SZ_64K
682 #define MVPP2_MAX_CPUS 4
692 /* Shared Packet Processor resources */
694 /* Shared registers' base addresses */
695 void __iomem
*lms_base
;
696 void __iomem
*iface_base
;
698 /* On PPv2.2, each CPU can access the base register through a
699 * separate address space, each 64 KB apart from each
702 void __iomem
*cpu_base
[MVPP2_MAX_CPUS
];
709 /* List of pointers to port structures */
710 struct mvpp2_port
**port_list
;
712 /* Aggregated TXQs */
713 struct mvpp2_tx_queue
*aggr_txqs
;
716 struct mvpp2_bm_pool
*bm_pools
;
718 /* PRS shadow table */
719 struct mvpp2_prs_shadow
*prs_shadow
;
720 /* PRS auxiliary table for double vlan entries control */
721 bool *prs_double_vlans
;
727 enum { MVPP21
, MVPP22
} hw_version
;
729 /* Maximum number of RXQs per port */
730 unsigned int max_port_rxqs
;
733 struct mvpp2_pcpu_stats
{
734 struct u64_stats_sync syncp
;
741 /* Per-CPU port control */
742 struct mvpp2_port_pcpu
{
743 struct hrtimer tx_done_timer
;
744 bool timer_scheduled
;
745 /* Tasklet for egress finalization */
746 struct tasklet_struct tx_done_tasklet
;
752 /* Index of the port from the "group of ports" complex point
761 /* Per-port registers' base address */
764 struct mvpp2_rx_queue
**rxqs
;
765 struct mvpp2_tx_queue
**txqs
;
766 struct net_device
*dev
;
770 u32 pending_cause_rx
;
771 struct napi_struct napi
;
773 /* Per-CPU port control */
774 struct mvpp2_port_pcpu __percpu
*pcpu
;
781 struct mvpp2_pcpu_stats __percpu
*stats
;
783 phy_interface_t phy_interface
;
784 struct device_node
*phy_node
;
789 struct mvpp2_bm_pool
*pool_long
;
790 struct mvpp2_bm_pool
*pool_short
;
792 /* Index of first port's physical RXQ */
796 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
797 * layout of the transmit and reception DMA descriptors, and their
798 * layout is therefore defined by the hardware design
801 #define MVPP2_TXD_L3_OFF_SHIFT 0
802 #define MVPP2_TXD_IP_HLEN_SHIFT 8
803 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
804 #define MVPP2_TXD_L4_CSUM_NOT BIT(14)
805 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
806 #define MVPP2_TXD_PADDING_DISABLE BIT(23)
807 #define MVPP2_TXD_L4_UDP BIT(24)
808 #define MVPP2_TXD_L3_IP6 BIT(26)
809 #define MVPP2_TXD_L_DESC BIT(28)
810 #define MVPP2_TXD_F_DESC BIT(29)
812 #define MVPP2_RXD_ERR_SUMMARY BIT(15)
813 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
814 #define MVPP2_RXD_ERR_CRC 0x0
815 #define MVPP2_RXD_ERR_OVERRUN BIT(13)
816 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
817 #define MVPP2_RXD_BM_POOL_ID_OFFS 16
818 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
819 #define MVPP2_RXD_HWF_SYNC BIT(21)
820 #define MVPP2_RXD_L4_CSUM_OK BIT(22)
821 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
822 #define MVPP2_RXD_L4_TCP BIT(25)
823 #define MVPP2_RXD_L4_UDP BIT(26)
824 #define MVPP2_RXD_L3_IP4 BIT(28)
825 #define MVPP2_RXD_L3_IP6 BIT(30)
826 #define MVPP2_RXD_BUF_HDR BIT(31)
828 /* HW TX descriptor for PPv2.1 */
829 struct mvpp21_tx_desc
{
830 u32 command
; /* Options used by HW for packet transmitting.*/
831 u8 packet_offset
; /* the offset from the buffer beginning */
832 u8 phys_txq
; /* destination queue ID */
833 u16 data_size
; /* data size of transmitted packet in bytes */
834 u32 buf_dma_addr
; /* physical addr of transmitted buffer */
835 u32 buf_cookie
; /* cookie for access to TX buffer in tx path */
836 u32 reserved1
[3]; /* hw_cmd (for future use, BM, PON, PNC) */
837 u32 reserved2
; /* reserved (for future use) */
840 /* HW RX descriptor for PPv2.1 */
841 struct mvpp21_rx_desc
{
842 u32 status
; /* info about received packet */
843 u16 reserved1
; /* parser_info (for future use, PnC) */
844 u16 data_size
; /* size of received packet in bytes */
845 u32 buf_dma_addr
; /* physical address of the buffer */
846 u32 buf_cookie
; /* cookie for access to RX buffer in rx path */
847 u16 reserved2
; /* gem_port_id (for future use, PON) */
848 u16 reserved3
; /* csum_l4 (for future use, PnC) */
849 u8 reserved4
; /* bm_qset (for future use, BM) */
851 u16 reserved6
; /* classify_info (for future use, PnC) */
852 u32 reserved7
; /* flow_id (for future use, PnC) */
856 /* HW TX descriptor for PPv2.2 */
857 struct mvpp22_tx_desc
{
863 u64 buf_dma_addr_ptp
;
867 /* HW RX descriptor for PPv2.2 */
868 struct mvpp22_rx_desc
{
874 u64 buf_dma_addr_key_hash
;
878 /* Opaque type used by the driver to manipulate the HW TX and RX
881 struct mvpp2_tx_desc
{
883 struct mvpp21_tx_desc pp21
;
884 struct mvpp22_tx_desc pp22
;
888 struct mvpp2_rx_desc
{
890 struct mvpp21_rx_desc pp21
;
891 struct mvpp22_rx_desc pp22
;
895 struct mvpp2_txq_pcpu_buf
{
896 /* Transmitted SKB */
899 /* Physical address of transmitted buffer */
902 /* Size transmitted */
906 /* Per-CPU Tx queue control */
907 struct mvpp2_txq_pcpu
{
910 /* Number of Tx DMA descriptors in the descriptor ring */
913 /* Number of currently used Tx DMA descriptor in the
918 /* Number of Tx DMA descriptors reserved for each CPU */
921 /* Infos about transmitted buffers */
922 struct mvpp2_txq_pcpu_buf
*buffs
;
924 /* Index of last TX DMA descriptor that was inserted */
927 /* Index of the TX DMA descriptor to be cleaned up */
931 struct mvpp2_tx_queue
{
932 /* Physical number of this Tx queue */
935 /* Logical number of this Tx queue */
938 /* Number of Tx DMA descriptors in the descriptor ring */
941 /* Number of currently used Tx DMA descriptor in the descriptor ring */
944 /* Per-CPU control of physical Tx queues */
945 struct mvpp2_txq_pcpu __percpu
*pcpu
;
949 /* Virtual address of thex Tx DMA descriptors array */
950 struct mvpp2_tx_desc
*descs
;
952 /* DMA address of the Tx DMA descriptors array */
953 dma_addr_t descs_dma
;
955 /* Index of the last Tx DMA descriptor */
958 /* Index of the next Tx DMA descriptor to process */
959 int next_desc_to_proc
;
962 struct mvpp2_rx_queue
{
963 /* RX queue number, in the range 0-31 for physical RXQs */
966 /* Num of rx descriptors in the rx descriptor ring */
972 /* Virtual address of the RX DMA descriptors array */
973 struct mvpp2_rx_desc
*descs
;
975 /* DMA address of the RX DMA descriptors array */
976 dma_addr_t descs_dma
;
978 /* Index of the last RX DMA descriptor */
981 /* Index of the next RX DMA descriptor to process */
982 int next_desc_to_proc
;
984 /* ID of port to which physical RXQ is mapped */
987 /* Port's logic RXQ number to which physical RXQ is mapped */
991 union mvpp2_prs_tcam_entry
{
992 u32 word
[MVPP2_PRS_TCAM_WORDS
];
993 u8 byte
[MVPP2_PRS_TCAM_WORDS
* 4];
996 union mvpp2_prs_sram_entry
{
997 u32 word
[MVPP2_PRS_SRAM_WORDS
];
998 u8 byte
[MVPP2_PRS_SRAM_WORDS
* 4];
1001 struct mvpp2_prs_entry
{
1003 union mvpp2_prs_tcam_entry tcam
;
1004 union mvpp2_prs_sram_entry sram
;
1007 struct mvpp2_prs_shadow
{
1014 /* User defined offset */
1022 struct mvpp2_cls_flow_entry
{
1024 u32 data
[MVPP2_CLS_FLOWS_TBL_DATA_WORDS
];
1027 struct mvpp2_cls_lookup_entry
{
1033 struct mvpp2_bm_pool
{
1034 /* Pool number in the range 0-7 */
1036 enum mvpp2_bm_type type
;
1038 /* Buffer Pointers Pool External (BPPE) size */
1040 /* BPPE size in bytes */
1042 /* Number of buffers for this pool */
1044 /* Pool buffer size */
1050 /* BPPE virtual base address */
1052 /* BPPE DMA base address */
1053 dma_addr_t dma_addr
;
1055 /* Ports using BM pool */
1059 /* Static declaractions */
1061 /* Number of RXQs used by single port */
1062 static int rxq_number
= MVPP2_DEFAULT_RXQ
;
1063 /* Number of TXQs used by single port */
1064 static int txq_number
= MVPP2_MAX_TXQ
;
1066 #define MVPP2_DRIVER_NAME "mvpp2"
1067 #define MVPP2_DRIVER_VERSION "1.0"
1069 /* Utility/helper methods */
1071 static void mvpp2_write(struct mvpp2
*priv
, u32 offset
, u32 data
)
1073 writel(data
, priv
->cpu_base
[0] + offset
);
1076 static u32
mvpp2_read(struct mvpp2
*priv
, u32 offset
)
1078 return readl(priv
->cpu_base
[0] + offset
);
1081 /* These accessors should be used to access:
1083 * - per-CPU registers, where each CPU has its own copy of the
1086 * MVPP2_BM_VIRT_ALLOC_REG
1087 * MVPP2_BM_ADDR_HIGH_ALLOC
1088 * MVPP22_BM_ADDR_HIGH_RLS_REG
1089 * MVPP2_BM_VIRT_RLS_REG
1090 * MVPP2_ISR_RX_TX_CAUSE_REG
1091 * MVPP2_ISR_RX_TX_MASK_REG
1093 * MVPP2_AGGR_TXQ_UPDATE_REG
1094 * MVPP2_TXQ_RSVD_REQ_REG
1095 * MVPP2_TXQ_RSVD_RSLT_REG
1096 * MVPP2_TXQ_SENT_REG
1099 * - global registers that must be accessed through a specific CPU
1100 * window, because they are related to an access to a per-CPU
1103 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
1104 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
1105 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
1106 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
1107 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
1108 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
1109 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1110 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
1111 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
1112 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
1113 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1114 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1115 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1117 static void mvpp2_percpu_write(struct mvpp2
*priv
, int cpu
,
1118 u32 offset
, u32 data
)
1120 writel(data
, priv
->cpu_base
[cpu
] + offset
);
1123 static u32
mvpp2_percpu_read(struct mvpp2
*priv
, int cpu
,
1126 return readl(priv
->cpu_base
[cpu
] + offset
);
1129 static dma_addr_t
mvpp2_txdesc_dma_addr_get(struct mvpp2_port
*port
,
1130 struct mvpp2_tx_desc
*tx_desc
)
1132 if (port
->priv
->hw_version
== MVPP21
)
1133 return tx_desc
->pp21
.buf_dma_addr
;
1135 return tx_desc
->pp22
.buf_dma_addr_ptp
& GENMASK_ULL(40, 0);
1138 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port
*port
,
1139 struct mvpp2_tx_desc
*tx_desc
,
1140 dma_addr_t dma_addr
)
1142 if (port
->priv
->hw_version
== MVPP21
) {
1143 tx_desc
->pp21
.buf_dma_addr
= dma_addr
;
1145 u64 val
= (u64
)dma_addr
;
1147 tx_desc
->pp22
.buf_dma_addr_ptp
&= ~GENMASK_ULL(40, 0);
1148 tx_desc
->pp22
.buf_dma_addr_ptp
|= val
;
1152 static size_t mvpp2_txdesc_size_get(struct mvpp2_port
*port
,
1153 struct mvpp2_tx_desc
*tx_desc
)
1155 if (port
->priv
->hw_version
== MVPP21
)
1156 return tx_desc
->pp21
.data_size
;
1158 return tx_desc
->pp22
.data_size
;
1161 static void mvpp2_txdesc_size_set(struct mvpp2_port
*port
,
1162 struct mvpp2_tx_desc
*tx_desc
,
1165 if (port
->priv
->hw_version
== MVPP21
)
1166 tx_desc
->pp21
.data_size
= size
;
1168 tx_desc
->pp22
.data_size
= size
;
1171 static void mvpp2_txdesc_txq_set(struct mvpp2_port
*port
,
1172 struct mvpp2_tx_desc
*tx_desc
,
1175 if (port
->priv
->hw_version
== MVPP21
)
1176 tx_desc
->pp21
.phys_txq
= txq
;
1178 tx_desc
->pp22
.phys_txq
= txq
;
1181 static void mvpp2_txdesc_cmd_set(struct mvpp2_port
*port
,
1182 struct mvpp2_tx_desc
*tx_desc
,
1183 unsigned int command
)
1185 if (port
->priv
->hw_version
== MVPP21
)
1186 tx_desc
->pp21
.command
= command
;
1188 tx_desc
->pp22
.command
= command
;
1191 static void mvpp2_txdesc_offset_set(struct mvpp2_port
*port
,
1192 struct mvpp2_tx_desc
*tx_desc
,
1193 unsigned int offset
)
1195 if (port
->priv
->hw_version
== MVPP21
)
1196 tx_desc
->pp21
.packet_offset
= offset
;
1198 tx_desc
->pp22
.packet_offset
= offset
;
1201 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port
*port
,
1202 struct mvpp2_tx_desc
*tx_desc
)
1204 if (port
->priv
->hw_version
== MVPP21
)
1205 return tx_desc
->pp21
.packet_offset
;
1207 return tx_desc
->pp22
.packet_offset
;
1210 static dma_addr_t
mvpp2_rxdesc_dma_addr_get(struct mvpp2_port
*port
,
1211 struct mvpp2_rx_desc
*rx_desc
)
1213 if (port
->priv
->hw_version
== MVPP21
)
1214 return rx_desc
->pp21
.buf_dma_addr
;
1216 return rx_desc
->pp22
.buf_dma_addr_key_hash
& GENMASK_ULL(40, 0);
1219 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port
*port
,
1220 struct mvpp2_rx_desc
*rx_desc
)
1222 if (port
->priv
->hw_version
== MVPP21
)
1223 return rx_desc
->pp21
.buf_cookie
;
1225 return rx_desc
->pp22
.buf_cookie_misc
& GENMASK_ULL(40, 0);
1228 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port
*port
,
1229 struct mvpp2_rx_desc
*rx_desc
)
1231 if (port
->priv
->hw_version
== MVPP21
)
1232 return rx_desc
->pp21
.data_size
;
1234 return rx_desc
->pp22
.data_size
;
1237 static u32
mvpp2_rxdesc_status_get(struct mvpp2_port
*port
,
1238 struct mvpp2_rx_desc
*rx_desc
)
1240 if (port
->priv
->hw_version
== MVPP21
)
1241 return rx_desc
->pp21
.status
;
1243 return rx_desc
->pp22
.status
;
1246 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu
*txq_pcpu
)
1248 txq_pcpu
->txq_get_index
++;
1249 if (txq_pcpu
->txq_get_index
== txq_pcpu
->size
)
1250 txq_pcpu
->txq_get_index
= 0;
1253 static void mvpp2_txq_inc_put(struct mvpp2_port
*port
,
1254 struct mvpp2_txq_pcpu
*txq_pcpu
,
1255 struct sk_buff
*skb
,
1256 struct mvpp2_tx_desc
*tx_desc
)
1258 struct mvpp2_txq_pcpu_buf
*tx_buf
=
1259 txq_pcpu
->buffs
+ txq_pcpu
->txq_put_index
;
1261 tx_buf
->size
= mvpp2_txdesc_size_get(port
, tx_desc
);
1262 tx_buf
->dma
= mvpp2_txdesc_dma_addr_get(port
, tx_desc
) +
1263 mvpp2_txdesc_offset_get(port
, tx_desc
);
1264 txq_pcpu
->txq_put_index
++;
1265 if (txq_pcpu
->txq_put_index
== txq_pcpu
->size
)
1266 txq_pcpu
->txq_put_index
= 0;
1269 /* Get number of physical egress port */
1270 static inline int mvpp2_egress_port(struct mvpp2_port
*port
)
1272 return MVPP2_MAX_TCONT
+ port
->id
;
1275 /* Get number of physical TXQ */
1276 static inline int mvpp2_txq_phys(int port
, int txq
)
1278 return (MVPP2_MAX_TCONT
+ port
) * MVPP2_MAX_TXQ
+ txq
;
1281 /* Parser configuration routines */
1283 /* Update parser tcam and sram hw entries */
1284 static int mvpp2_prs_hw_write(struct mvpp2
*priv
, struct mvpp2_prs_entry
*pe
)
1288 if (pe
->index
> MVPP2_PRS_TCAM_SRAM_SIZE
- 1)
1291 /* Clear entry invalidation bit */
1292 pe
->tcam
.word
[MVPP2_PRS_TCAM_INV_WORD
] &= ~MVPP2_PRS_TCAM_INV_MASK
;
1294 /* Write tcam index - indirect access */
1295 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, pe
->index
);
1296 for (i
= 0; i
< MVPP2_PRS_TCAM_WORDS
; i
++)
1297 mvpp2_write(priv
, MVPP2_PRS_TCAM_DATA_REG(i
), pe
->tcam
.word
[i
]);
1299 /* Write sram index - indirect access */
1300 mvpp2_write(priv
, MVPP2_PRS_SRAM_IDX_REG
, pe
->index
);
1301 for (i
= 0; i
< MVPP2_PRS_SRAM_WORDS
; i
++)
1302 mvpp2_write(priv
, MVPP2_PRS_SRAM_DATA_REG(i
), pe
->sram
.word
[i
]);
1307 /* Read tcam entry from hw */
1308 static int mvpp2_prs_hw_read(struct mvpp2
*priv
, struct mvpp2_prs_entry
*pe
)
1312 if (pe
->index
> MVPP2_PRS_TCAM_SRAM_SIZE
- 1)
1315 /* Write tcam index - indirect access */
1316 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, pe
->index
);
1318 pe
->tcam
.word
[MVPP2_PRS_TCAM_INV_WORD
] = mvpp2_read(priv
,
1319 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD
));
1320 if (pe
->tcam
.word
[MVPP2_PRS_TCAM_INV_WORD
] & MVPP2_PRS_TCAM_INV_MASK
)
1321 return MVPP2_PRS_TCAM_ENTRY_INVALID
;
1323 for (i
= 0; i
< MVPP2_PRS_TCAM_WORDS
; i
++)
1324 pe
->tcam
.word
[i
] = mvpp2_read(priv
, MVPP2_PRS_TCAM_DATA_REG(i
));
1326 /* Write sram index - indirect access */
1327 mvpp2_write(priv
, MVPP2_PRS_SRAM_IDX_REG
, pe
->index
);
1328 for (i
= 0; i
< MVPP2_PRS_SRAM_WORDS
; i
++)
1329 pe
->sram
.word
[i
] = mvpp2_read(priv
, MVPP2_PRS_SRAM_DATA_REG(i
));
1334 /* Invalidate tcam hw entry */
1335 static void mvpp2_prs_hw_inv(struct mvpp2
*priv
, int index
)
1337 /* Write index - indirect access */
1338 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, index
);
1339 mvpp2_write(priv
, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD
),
1340 MVPP2_PRS_TCAM_INV_MASK
);
1343 /* Enable shadow table entry and set its lookup ID */
1344 static void mvpp2_prs_shadow_set(struct mvpp2
*priv
, int index
, int lu
)
1346 priv
->prs_shadow
[index
].valid
= true;
1347 priv
->prs_shadow
[index
].lu
= lu
;
1350 /* Update ri fields in shadow table entry */
1351 static void mvpp2_prs_shadow_ri_set(struct mvpp2
*priv
, int index
,
1352 unsigned int ri
, unsigned int ri_mask
)
1354 priv
->prs_shadow
[index
].ri_mask
= ri_mask
;
1355 priv
->prs_shadow
[index
].ri
= ri
;
1358 /* Update lookup field in tcam sw entry */
1359 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry
*pe
, unsigned int lu
)
1361 int enable_off
= MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE
);
1363 pe
->tcam
.byte
[MVPP2_PRS_TCAM_LU_BYTE
] = lu
;
1364 pe
->tcam
.byte
[enable_off
] = MVPP2_PRS_LU_MASK
;
1367 /* Update mask for single port in tcam sw entry */
1368 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry
*pe
,
1369 unsigned int port
, bool add
)
1371 int enable_off
= MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE
);
1374 pe
->tcam
.byte
[enable_off
] &= ~(1 << port
);
1376 pe
->tcam
.byte
[enable_off
] |= 1 << port
;
1379 /* Update port map in tcam sw entry */
1380 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry
*pe
,
1383 unsigned char port_mask
= MVPP2_PRS_PORT_MASK
;
1384 int enable_off
= MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE
);
1386 pe
->tcam
.byte
[MVPP2_PRS_TCAM_PORT_BYTE
] = 0;
1387 pe
->tcam
.byte
[enable_off
] &= ~port_mask
;
1388 pe
->tcam
.byte
[enable_off
] |= ~ports
& MVPP2_PRS_PORT_MASK
;
1391 /* Obtain port map from tcam sw entry */
1392 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry
*pe
)
1394 int enable_off
= MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE
);
1396 return ~(pe
->tcam
.byte
[enable_off
]) & MVPP2_PRS_PORT_MASK
;
1399 /* Set byte of data and its enable bits in tcam sw entry */
1400 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry
*pe
,
1401 unsigned int offs
, unsigned char byte
,
1402 unsigned char enable
)
1404 pe
->tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE(offs
)] = byte
;
1405 pe
->tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs
)] = enable
;
1408 /* Get byte of data and its enable bits from tcam sw entry */
1409 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry
*pe
,
1410 unsigned int offs
, unsigned char *byte
,
1411 unsigned char *enable
)
1413 *byte
= pe
->tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE(offs
)];
1414 *enable
= pe
->tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs
)];
1417 /* Compare tcam data bytes with a pattern */
1418 static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry
*pe
, int offs
,
1421 int off
= MVPP2_PRS_TCAM_DATA_BYTE(offs
);
1424 tcam_data
= (8 << pe
->tcam
.byte
[off
+ 1]) | pe
->tcam
.byte
[off
];
1425 if (tcam_data
!= data
)
1430 /* Update ai bits in tcam sw entry */
1431 static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry
*pe
,
1432 unsigned int bits
, unsigned int enable
)
1434 int i
, ai_idx
= MVPP2_PRS_TCAM_AI_BYTE
;
1436 for (i
= 0; i
< MVPP2_PRS_AI_BITS
; i
++) {
1438 if (!(enable
& BIT(i
)))
1442 pe
->tcam
.byte
[ai_idx
] |= 1 << i
;
1444 pe
->tcam
.byte
[ai_idx
] &= ~(1 << i
);
1447 pe
->tcam
.byte
[MVPP2_PRS_TCAM_EN_OFFS(ai_idx
)] |= enable
;
1450 /* Get ai bits from tcam sw entry */
1451 static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry
*pe
)
1453 return pe
->tcam
.byte
[MVPP2_PRS_TCAM_AI_BYTE
];
1456 /* Set ethertype in tcam sw entry */
1457 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry
*pe
, int offset
,
1458 unsigned short ethertype
)
1460 mvpp2_prs_tcam_data_byte_set(pe
, offset
+ 0, ethertype
>> 8, 0xff);
1461 mvpp2_prs_tcam_data_byte_set(pe
, offset
+ 1, ethertype
& 0xff, 0xff);
1464 /* Set bits in sram sw entry */
1465 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry
*pe
, int bit_num
,
1468 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(bit_num
)] |= (val
<< (bit_num
% 8));
1471 /* Clear bits in sram sw entry */
1472 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry
*pe
, int bit_num
,
1475 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(bit_num
)] &= ~(val
<< (bit_num
% 8));
1478 /* Update ri bits in sram sw entry */
1479 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry
*pe
,
1480 unsigned int bits
, unsigned int mask
)
1484 for (i
= 0; i
< MVPP2_PRS_SRAM_RI_CTRL_BITS
; i
++) {
1485 int ri_off
= MVPP2_PRS_SRAM_RI_OFFS
;
1487 if (!(mask
& BIT(i
)))
1491 mvpp2_prs_sram_bits_set(pe
, ri_off
+ i
, 1);
1493 mvpp2_prs_sram_bits_clear(pe
, ri_off
+ i
, 1);
1495 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_RI_CTRL_OFFS
+ i
, 1);
1499 /* Obtain ri bits from sram sw entry */
1500 static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry
*pe
)
1502 return pe
->sram
.word
[MVPP2_PRS_SRAM_RI_WORD
];
1505 /* Update ai bits in sram sw entry */
1506 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry
*pe
,
1507 unsigned int bits
, unsigned int mask
)
1510 int ai_off
= MVPP2_PRS_SRAM_AI_OFFS
;
1512 for (i
= 0; i
< MVPP2_PRS_SRAM_AI_CTRL_BITS
; i
++) {
1514 if (!(mask
& BIT(i
)))
1518 mvpp2_prs_sram_bits_set(pe
, ai_off
+ i
, 1);
1520 mvpp2_prs_sram_bits_clear(pe
, ai_off
+ i
, 1);
1522 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_AI_CTRL_OFFS
+ i
, 1);
1526 /* Read ai bits from sram sw entry */
1527 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry
*pe
)
1530 int ai_off
= MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS
);
1531 int ai_en_off
= ai_off
+ 1;
1532 int ai_shift
= MVPP2_PRS_SRAM_AI_OFFS
% 8;
1534 bits
= (pe
->sram
.byte
[ai_off
] >> ai_shift
) |
1535 (pe
->sram
.byte
[ai_en_off
] << (8 - ai_shift
));
1540 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
1543 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry
*pe
,
1546 int sram_next_off
= MVPP2_PRS_SRAM_NEXT_LU_OFFS
;
1548 mvpp2_prs_sram_bits_clear(pe
, sram_next_off
,
1549 MVPP2_PRS_SRAM_NEXT_LU_MASK
);
1550 mvpp2_prs_sram_bits_set(pe
, sram_next_off
, lu
);
1553 /* In the sram sw entry set sign and value of the next lookup offset
1554 * and the offset value generated to the classifier
1556 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry
*pe
, int shift
,
1561 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT
, 1);
1564 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT
, 1);
1568 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS
)] =
1569 (unsigned char)shift
;
1571 /* Reset and set operation */
1572 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS
,
1573 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK
);
1574 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS
, op
);
1576 /* Set base offset as current */
1577 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS
, 1);
1580 /* In the sram sw entry set sign and value of the user defined offset
1581 * generated to the classifier
1583 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry
*pe
,
1584 unsigned int type
, int offset
,
1589 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_UDF_SIGN_BIT
, 1);
1590 offset
= 0 - offset
;
1592 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_UDF_SIGN_BIT
, 1);
1596 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_UDF_OFFS
,
1597 MVPP2_PRS_SRAM_UDF_MASK
);
1598 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_UDF_OFFS
, offset
);
1599 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS
+
1600 MVPP2_PRS_SRAM_UDF_BITS
)] &=
1601 ~(MVPP2_PRS_SRAM_UDF_MASK
>> (8 - (MVPP2_PRS_SRAM_UDF_OFFS
% 8)));
1602 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS
+
1603 MVPP2_PRS_SRAM_UDF_BITS
)] |=
1604 (offset
>> (8 - (MVPP2_PRS_SRAM_UDF_OFFS
% 8)));
1606 /* Set offset type */
1607 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_UDF_TYPE_OFFS
,
1608 MVPP2_PRS_SRAM_UDF_TYPE_MASK
);
1609 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_UDF_TYPE_OFFS
, type
);
1611 /* Set offset operation */
1612 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
,
1613 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK
);
1614 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
, op
);
1616 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
+
1617 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS
)] &=
1618 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK
>>
1619 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
% 8)));
1621 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
+
1622 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS
)] |=
1623 (op
>> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
% 8)));
1625 /* Set base offset as current */
1626 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS
, 1);
1629 /* Find parser flow entry */
1630 static struct mvpp2_prs_entry
*mvpp2_prs_flow_find(struct mvpp2
*priv
, int flow
)
1632 struct mvpp2_prs_entry
*pe
;
1635 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
1638 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_FLOWS
);
1640 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1641 for (tid
= MVPP2_PRS_TCAM_SRAM_SIZE
- 1; tid
>= 0; tid
--) {
1644 if (!priv
->prs_shadow
[tid
].valid
||
1645 priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_FLOWS
)
1649 mvpp2_prs_hw_read(priv
, pe
);
1650 bits
= mvpp2_prs_sram_ai_get(pe
);
1652 /* Sram store classification lookup ID in AI bits [5:0] */
1653 if ((bits
& MVPP2_PRS_FLOW_ID_MASK
) == flow
)
1661 /* Return first free tcam index, seeking from start to end */
1662 static int mvpp2_prs_tcam_first_free(struct mvpp2
*priv
, unsigned char start
,
1670 if (end
>= MVPP2_PRS_TCAM_SRAM_SIZE
)
1671 end
= MVPP2_PRS_TCAM_SRAM_SIZE
- 1;
1673 for (tid
= start
; tid
<= end
; tid
++) {
1674 if (!priv
->prs_shadow
[tid
].valid
)
1681 /* Enable/disable dropping all mac da's */
1682 static void mvpp2_prs_mac_drop_all_set(struct mvpp2
*priv
, int port
, bool add
)
1684 struct mvpp2_prs_entry pe
;
1686 if (priv
->prs_shadow
[MVPP2_PE_DROP_ALL
].valid
) {
1687 /* Entry exist - update port only */
1688 pe
.index
= MVPP2_PE_DROP_ALL
;
1689 mvpp2_prs_hw_read(priv
, &pe
);
1691 /* Entry doesn't exist - create new */
1692 memset(&pe
, 0, sizeof(pe
));
1693 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
1694 pe
.index
= MVPP2_PE_DROP_ALL
;
1696 /* Non-promiscuous mode for all ports - DROP unknown packets */
1697 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_DROP_MASK
,
1698 MVPP2_PRS_RI_DROP_MASK
);
1700 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
1701 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
1703 /* Update shadow table */
1704 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
1706 /* Mask all ports */
1707 mvpp2_prs_tcam_port_map_set(&pe
, 0);
1710 /* Update port mask */
1711 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
1713 mvpp2_prs_hw_write(priv
, &pe
);
1716 /* Set port to promiscuous mode */
1717 static void mvpp2_prs_mac_promisc_set(struct mvpp2
*priv
, int port
, bool add
)
1719 struct mvpp2_prs_entry pe
;
1721 /* Promiscuous mode - Accept unknown packets */
1723 if (priv
->prs_shadow
[MVPP2_PE_MAC_PROMISCUOUS
].valid
) {
1724 /* Entry exist - update port only */
1725 pe
.index
= MVPP2_PE_MAC_PROMISCUOUS
;
1726 mvpp2_prs_hw_read(priv
, &pe
);
1728 /* Entry doesn't exist - create new */
1729 memset(&pe
, 0, sizeof(pe
));
1730 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
1731 pe
.index
= MVPP2_PE_MAC_PROMISCUOUS
;
1733 /* Continue - set next lookup */
1734 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
1736 /* Set result info bits */
1737 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L2_UCAST
,
1738 MVPP2_PRS_RI_L2_CAST_MASK
);
1740 /* Shift to ethertype */
1741 mvpp2_prs_sram_shift_set(&pe
, 2 * ETH_ALEN
,
1742 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1744 /* Mask all ports */
1745 mvpp2_prs_tcam_port_map_set(&pe
, 0);
1747 /* Update shadow table */
1748 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
1751 /* Update port mask */
1752 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
1754 mvpp2_prs_hw_write(priv
, &pe
);
1757 /* Accept multicast */
1758 static void mvpp2_prs_mac_multi_set(struct mvpp2
*priv
, int port
, int index
,
1761 struct mvpp2_prs_entry pe
;
1762 unsigned char da_mc
;
1764 /* Ethernet multicast address first byte is
1765 * 0x01 for IPv4 and 0x33 for IPv6
1767 da_mc
= (index
== MVPP2_PE_MAC_MC_ALL
) ? 0x01 : 0x33;
1769 if (priv
->prs_shadow
[index
].valid
) {
1770 /* Entry exist - update port only */
1772 mvpp2_prs_hw_read(priv
, &pe
);
1774 /* Entry doesn't exist - create new */
1775 memset(&pe
, 0, sizeof(pe
));
1776 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
1779 /* Continue - set next lookup */
1780 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
1782 /* Set result info bits */
1783 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L2_MCAST
,
1784 MVPP2_PRS_RI_L2_CAST_MASK
);
1786 /* Update tcam entry data first byte */
1787 mvpp2_prs_tcam_data_byte_set(&pe
, 0, da_mc
, 0xff);
1789 /* Shift to ethertype */
1790 mvpp2_prs_sram_shift_set(&pe
, 2 * ETH_ALEN
,
1791 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1793 /* Mask all ports */
1794 mvpp2_prs_tcam_port_map_set(&pe
, 0);
1796 /* Update shadow table */
1797 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
1800 /* Update port mask */
1801 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
1803 mvpp2_prs_hw_write(priv
, &pe
);
1806 /* Set entry for dsa packets */
1807 static void mvpp2_prs_dsa_tag_set(struct mvpp2
*priv
, int port
, bool add
,
1808 bool tagged
, bool extend
)
1810 struct mvpp2_prs_entry pe
;
1814 tid
= tagged
? MVPP2_PE_EDSA_TAGGED
: MVPP2_PE_EDSA_UNTAGGED
;
1817 tid
= tagged
? MVPP2_PE_DSA_TAGGED
: MVPP2_PE_DSA_UNTAGGED
;
1821 if (priv
->prs_shadow
[tid
].valid
) {
1822 /* Entry exist - update port only */
1824 mvpp2_prs_hw_read(priv
, &pe
);
1826 /* Entry doesn't exist - create new */
1827 memset(&pe
, 0, sizeof(pe
));
1828 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
1831 /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
1832 mvpp2_prs_sram_shift_set(&pe
, shift
,
1833 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1835 /* Update shadow table */
1836 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_DSA
);
1839 /* Set tagged bit in DSA tag */
1840 mvpp2_prs_tcam_data_byte_set(&pe
, 0,
1841 MVPP2_PRS_TCAM_DSA_TAGGED_BIT
,
1842 MVPP2_PRS_TCAM_DSA_TAGGED_BIT
);
1843 /* Clear all ai bits for next iteration */
1844 mvpp2_prs_sram_ai_update(&pe
, 0,
1845 MVPP2_PRS_SRAM_AI_MASK
);
1846 /* If packet is tagged continue check vlans */
1847 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
1849 /* Set result info bits to 'no vlans' */
1850 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_NONE
,
1851 MVPP2_PRS_RI_VLAN_MASK
);
1852 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_L2
);
1855 /* Mask all ports */
1856 mvpp2_prs_tcam_port_map_set(&pe
, 0);
1859 /* Update port mask */
1860 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
1862 mvpp2_prs_hw_write(priv
, &pe
);
1865 /* Set entry for dsa ethertype */
1866 static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2
*priv
, int port
,
1867 bool add
, bool tagged
, bool extend
)
1869 struct mvpp2_prs_entry pe
;
1870 int tid
, shift
, port_mask
;
1873 tid
= tagged
? MVPP2_PE_ETYPE_EDSA_TAGGED
:
1874 MVPP2_PE_ETYPE_EDSA_UNTAGGED
;
1878 tid
= tagged
? MVPP2_PE_ETYPE_DSA_TAGGED
:
1879 MVPP2_PE_ETYPE_DSA_UNTAGGED
;
1880 port_mask
= MVPP2_PRS_PORT_MASK
;
1884 if (priv
->prs_shadow
[tid
].valid
) {
1885 /* Entry exist - update port only */
1887 mvpp2_prs_hw_read(priv
, &pe
);
1889 /* Entry doesn't exist - create new */
1890 memset(&pe
, 0, sizeof(pe
));
1891 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
1895 mvpp2_prs_match_etype(&pe
, 0, ETH_P_EDSA
);
1896 mvpp2_prs_match_etype(&pe
, 2, 0);
1898 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_DSA_MASK
,
1899 MVPP2_PRS_RI_DSA_MASK
);
1900 /* Shift ethertype + 2 byte reserved + tag*/
1901 mvpp2_prs_sram_shift_set(&pe
, 2 + MVPP2_ETH_TYPE_LEN
+ shift
,
1902 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1904 /* Update shadow table */
1905 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_DSA
);
1908 /* Set tagged bit in DSA tag */
1909 mvpp2_prs_tcam_data_byte_set(&pe
,
1910 MVPP2_ETH_TYPE_LEN
+ 2 + 3,
1911 MVPP2_PRS_TCAM_DSA_TAGGED_BIT
,
1912 MVPP2_PRS_TCAM_DSA_TAGGED_BIT
);
1913 /* Clear all ai bits for next iteration */
1914 mvpp2_prs_sram_ai_update(&pe
, 0,
1915 MVPP2_PRS_SRAM_AI_MASK
);
1916 /* If packet is tagged continue check vlans */
1917 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
1919 /* Set result info bits to 'no vlans' */
1920 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_NONE
,
1921 MVPP2_PRS_RI_VLAN_MASK
);
1922 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_L2
);
1924 /* Mask/unmask all ports, depending on dsa type */
1925 mvpp2_prs_tcam_port_map_set(&pe
, port_mask
);
1928 /* Update port mask */
1929 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
1931 mvpp2_prs_hw_write(priv
, &pe
);
1934 /* Search for existing single/triple vlan entry */
1935 static struct mvpp2_prs_entry
*mvpp2_prs_vlan_find(struct mvpp2
*priv
,
1936 unsigned short tpid
, int ai
)
1938 struct mvpp2_prs_entry
*pe
;
1941 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
1944 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_VLAN
);
1946 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1947 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
1948 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++) {
1949 unsigned int ri_bits
, ai_bits
;
1952 if (!priv
->prs_shadow
[tid
].valid
||
1953 priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_VLAN
)
1958 mvpp2_prs_hw_read(priv
, pe
);
1959 match
= mvpp2_prs_tcam_data_cmp(pe
, 0, swab16(tpid
));
1964 ri_bits
= mvpp2_prs_sram_ri_get(pe
);
1965 ri_bits
&= MVPP2_PRS_RI_VLAN_MASK
;
1967 /* Get current ai value from tcam */
1968 ai_bits
= mvpp2_prs_tcam_ai_get(pe
);
1969 /* Clear double vlan bit */
1970 ai_bits
&= ~MVPP2_PRS_DBL_VLAN_AI_BIT
;
1975 if (ri_bits
== MVPP2_PRS_RI_VLAN_SINGLE
||
1976 ri_bits
== MVPP2_PRS_RI_VLAN_TRIPLE
)
1984 /* Add/update single/triple vlan entry */
1985 static int mvpp2_prs_vlan_add(struct mvpp2
*priv
, unsigned short tpid
, int ai
,
1986 unsigned int port_map
)
1988 struct mvpp2_prs_entry
*pe
;
1992 pe
= mvpp2_prs_vlan_find(priv
, tpid
, ai
);
1995 /* Create new tcam entry */
1996 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_LAST_FREE_TID
,
1997 MVPP2_PE_FIRST_FREE_TID
);
2001 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
2005 /* Get last double vlan tid */
2006 for (tid_aux
= MVPP2_PE_LAST_FREE_TID
;
2007 tid_aux
>= MVPP2_PE_FIRST_FREE_TID
; tid_aux
--) {
2008 unsigned int ri_bits
;
2010 if (!priv
->prs_shadow
[tid_aux
].valid
||
2011 priv
->prs_shadow
[tid_aux
].lu
!= MVPP2_PRS_LU_VLAN
)
2014 pe
->index
= tid_aux
;
2015 mvpp2_prs_hw_read(priv
, pe
);
2016 ri_bits
= mvpp2_prs_sram_ri_get(pe
);
2017 if ((ri_bits
& MVPP2_PRS_RI_VLAN_MASK
) ==
2018 MVPP2_PRS_RI_VLAN_DOUBLE
)
2022 if (tid
<= tid_aux
) {
2027 memset(pe
, 0, sizeof(*pe
));
2028 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_VLAN
);
2031 mvpp2_prs_match_etype(pe
, 0, tpid
);
2033 mvpp2_prs_sram_next_lu_set(pe
, MVPP2_PRS_LU_L2
);
2034 /* Shift 4 bytes - skip 1 vlan tag */
2035 mvpp2_prs_sram_shift_set(pe
, MVPP2_VLAN_TAG_LEN
,
2036 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2037 /* Clear all ai bits for next iteration */
2038 mvpp2_prs_sram_ai_update(pe
, 0, MVPP2_PRS_SRAM_AI_MASK
);
2040 if (ai
== MVPP2_PRS_SINGLE_VLAN_AI
) {
2041 mvpp2_prs_sram_ri_update(pe
, MVPP2_PRS_RI_VLAN_SINGLE
,
2042 MVPP2_PRS_RI_VLAN_MASK
);
2044 ai
|= MVPP2_PRS_DBL_VLAN_AI_BIT
;
2045 mvpp2_prs_sram_ri_update(pe
, MVPP2_PRS_RI_VLAN_TRIPLE
,
2046 MVPP2_PRS_RI_VLAN_MASK
);
2048 mvpp2_prs_tcam_ai_update(pe
, ai
, MVPP2_PRS_SRAM_AI_MASK
);
2050 mvpp2_prs_shadow_set(priv
, pe
->index
, MVPP2_PRS_LU_VLAN
);
2052 /* Update ports' mask */
2053 mvpp2_prs_tcam_port_map_set(pe
, port_map
);
2055 mvpp2_prs_hw_write(priv
, pe
);
2062 /* Get first free double vlan ai number */
2063 static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2
*priv
)
2067 for (i
= 1; i
< MVPP2_PRS_DBL_VLANS_MAX
; i
++) {
2068 if (!priv
->prs_double_vlans
[i
])
2075 /* Search for existing double vlan entry */
2076 static struct mvpp2_prs_entry
*mvpp2_prs_double_vlan_find(struct mvpp2
*priv
,
2077 unsigned short tpid1
,
2078 unsigned short tpid2
)
2080 struct mvpp2_prs_entry
*pe
;
2083 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
2086 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_VLAN
);
2088 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2089 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
2090 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++) {
2091 unsigned int ri_mask
;
2094 if (!priv
->prs_shadow
[tid
].valid
||
2095 priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_VLAN
)
2099 mvpp2_prs_hw_read(priv
, pe
);
2101 match
= mvpp2_prs_tcam_data_cmp(pe
, 0, swab16(tpid1
))
2102 && mvpp2_prs_tcam_data_cmp(pe
, 4, swab16(tpid2
));
2107 ri_mask
= mvpp2_prs_sram_ri_get(pe
) & MVPP2_PRS_RI_VLAN_MASK
;
2108 if (ri_mask
== MVPP2_PRS_RI_VLAN_DOUBLE
)
2116 /* Add or update double vlan entry */
2117 static int mvpp2_prs_double_vlan_add(struct mvpp2
*priv
, unsigned short tpid1
,
2118 unsigned short tpid2
,
2119 unsigned int port_map
)
2121 struct mvpp2_prs_entry
*pe
;
2122 int tid_aux
, tid
, ai
, ret
= 0;
2124 pe
= mvpp2_prs_double_vlan_find(priv
, tpid1
, tpid2
);
2127 /* Create new tcam entry */
2128 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2129 MVPP2_PE_LAST_FREE_TID
);
2133 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
2137 /* Set ai value for new double vlan entry */
2138 ai
= mvpp2_prs_double_vlan_ai_free_get(priv
);
2144 /* Get first single/triple vlan tid */
2145 for (tid_aux
= MVPP2_PE_FIRST_FREE_TID
;
2146 tid_aux
<= MVPP2_PE_LAST_FREE_TID
; tid_aux
++) {
2147 unsigned int ri_bits
;
2149 if (!priv
->prs_shadow
[tid_aux
].valid
||
2150 priv
->prs_shadow
[tid_aux
].lu
!= MVPP2_PRS_LU_VLAN
)
2153 pe
->index
= tid_aux
;
2154 mvpp2_prs_hw_read(priv
, pe
);
2155 ri_bits
= mvpp2_prs_sram_ri_get(pe
);
2156 ri_bits
&= MVPP2_PRS_RI_VLAN_MASK
;
2157 if (ri_bits
== MVPP2_PRS_RI_VLAN_SINGLE
||
2158 ri_bits
== MVPP2_PRS_RI_VLAN_TRIPLE
)
2162 if (tid
>= tid_aux
) {
2167 memset(pe
, 0, sizeof(*pe
));
2168 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_VLAN
);
2171 priv
->prs_double_vlans
[ai
] = true;
2173 mvpp2_prs_match_etype(pe
, 0, tpid1
);
2174 mvpp2_prs_match_etype(pe
, 4, tpid2
);
2176 mvpp2_prs_sram_next_lu_set(pe
, MVPP2_PRS_LU_VLAN
);
2177 /* Shift 8 bytes - skip 2 vlan tags */
2178 mvpp2_prs_sram_shift_set(pe
, 2 * MVPP2_VLAN_TAG_LEN
,
2179 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2180 mvpp2_prs_sram_ri_update(pe
, MVPP2_PRS_RI_VLAN_DOUBLE
,
2181 MVPP2_PRS_RI_VLAN_MASK
);
2182 mvpp2_prs_sram_ai_update(pe
, ai
| MVPP2_PRS_DBL_VLAN_AI_BIT
,
2183 MVPP2_PRS_SRAM_AI_MASK
);
2185 mvpp2_prs_shadow_set(priv
, pe
->index
, MVPP2_PRS_LU_VLAN
);
2188 /* Update ports' mask */
2189 mvpp2_prs_tcam_port_map_set(pe
, port_map
);
2190 mvpp2_prs_hw_write(priv
, pe
);
2196 /* IPv4 header parsing for fragmentation and L4 offset */
2197 static int mvpp2_prs_ip4_proto(struct mvpp2
*priv
, unsigned short proto
,
2198 unsigned int ri
, unsigned int ri_mask
)
2200 struct mvpp2_prs_entry pe
;
2203 if ((proto
!= IPPROTO_TCP
) && (proto
!= IPPROTO_UDP
) &&
2204 (proto
!= IPPROTO_IGMP
))
2207 /* Fragmented packet */
2208 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2209 MVPP2_PE_LAST_FREE_TID
);
2213 memset(&pe
, 0, sizeof(pe
));
2214 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2217 /* Set next lu to IPv4 */
2218 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2219 mvpp2_prs_sram_shift_set(&pe
, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2221 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L4
,
2222 sizeof(struct iphdr
) - 4,
2223 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2224 mvpp2_prs_sram_ai_update(&pe
, MVPP2_PRS_IPV4_DIP_AI_BIT
,
2225 MVPP2_PRS_IPV4_DIP_AI_BIT
);
2226 mvpp2_prs_sram_ri_update(&pe
, ri
| MVPP2_PRS_RI_IP_FRAG_MASK
,
2227 ri_mask
| MVPP2_PRS_RI_IP_FRAG_MASK
);
2229 mvpp2_prs_tcam_data_byte_set(&pe
, 5, proto
, MVPP2_PRS_TCAM_PROTO_MASK
);
2230 mvpp2_prs_tcam_ai_update(&pe
, 0, MVPP2_PRS_IPV4_DIP_AI_BIT
);
2231 /* Unmask all ports */
2232 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2234 /* Update shadow table and hw entry */
2235 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
2236 mvpp2_prs_hw_write(priv
, &pe
);
2238 /* Not fragmented packet */
2239 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2240 MVPP2_PE_LAST_FREE_TID
);
2245 /* Clear ri before updating */
2246 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_WORD
] = 0x0;
2247 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_CTRL_WORD
] = 0x0;
2248 mvpp2_prs_sram_ri_update(&pe
, ri
, ri_mask
);
2250 mvpp2_prs_tcam_data_byte_set(&pe
, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L
);
2251 mvpp2_prs_tcam_data_byte_set(&pe
, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK
);
2253 /* Update shadow table and hw entry */
2254 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
2255 mvpp2_prs_hw_write(priv
, &pe
);
2260 /* IPv4 L3 multicast or broadcast */
2261 static int mvpp2_prs_ip4_cast(struct mvpp2
*priv
, unsigned short l3_cast
)
2263 struct mvpp2_prs_entry pe
;
2266 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2267 MVPP2_PE_LAST_FREE_TID
);
2271 memset(&pe
, 0, sizeof(pe
));
2272 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2276 case MVPP2_PRS_L3_MULTI_CAST
:
2277 mvpp2_prs_tcam_data_byte_set(&pe
, 0, MVPP2_PRS_IPV4_MC
,
2278 MVPP2_PRS_IPV4_MC_MASK
);
2279 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_MCAST
,
2280 MVPP2_PRS_RI_L3_ADDR_MASK
);
2282 case MVPP2_PRS_L3_BROAD_CAST
:
2283 mask
= MVPP2_PRS_IPV4_BC_MASK
;
2284 mvpp2_prs_tcam_data_byte_set(&pe
, 0, mask
, mask
);
2285 mvpp2_prs_tcam_data_byte_set(&pe
, 1, mask
, mask
);
2286 mvpp2_prs_tcam_data_byte_set(&pe
, 2, mask
, mask
);
2287 mvpp2_prs_tcam_data_byte_set(&pe
, 3, mask
, mask
);
2288 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_BCAST
,
2289 MVPP2_PRS_RI_L3_ADDR_MASK
);
2295 /* Finished: go to flowid generation */
2296 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2297 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2299 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV4_DIP_AI_BIT
,
2300 MVPP2_PRS_IPV4_DIP_AI_BIT
);
2301 /* Unmask all ports */
2302 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2304 /* Update shadow table and hw entry */
2305 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
2306 mvpp2_prs_hw_write(priv
, &pe
);
2311 /* Set entries for protocols over IPv6 */
2312 static int mvpp2_prs_ip6_proto(struct mvpp2
*priv
, unsigned short proto
,
2313 unsigned int ri
, unsigned int ri_mask
)
2315 struct mvpp2_prs_entry pe
;
2318 if ((proto
!= IPPROTO_TCP
) && (proto
!= IPPROTO_UDP
) &&
2319 (proto
!= IPPROTO_ICMPV6
) && (proto
!= IPPROTO_IPIP
))
2322 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2323 MVPP2_PE_LAST_FREE_TID
);
2327 memset(&pe
, 0, sizeof(pe
));
2328 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2331 /* Finished: go to flowid generation */
2332 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2333 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2334 mvpp2_prs_sram_ri_update(&pe
, ri
, ri_mask
);
2335 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L4
,
2336 sizeof(struct ipv6hdr
) - 6,
2337 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2339 mvpp2_prs_tcam_data_byte_set(&pe
, 0, proto
, MVPP2_PRS_TCAM_PROTO_MASK
);
2340 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
2341 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
2342 /* Unmask all ports */
2343 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2346 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP6
);
2347 mvpp2_prs_hw_write(priv
, &pe
);
2352 /* IPv6 L3 multicast entry */
2353 static int mvpp2_prs_ip6_cast(struct mvpp2
*priv
, unsigned short l3_cast
)
2355 struct mvpp2_prs_entry pe
;
2358 if (l3_cast
!= MVPP2_PRS_L3_MULTI_CAST
)
2361 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2362 MVPP2_PE_LAST_FREE_TID
);
2366 memset(&pe
, 0, sizeof(pe
));
2367 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2370 /* Finished: go to flowid generation */
2371 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2372 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_MCAST
,
2373 MVPP2_PRS_RI_L3_ADDR_MASK
);
2374 mvpp2_prs_sram_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
2375 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
2376 /* Shift back to IPv6 NH */
2377 mvpp2_prs_sram_shift_set(&pe
, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2379 mvpp2_prs_tcam_data_byte_set(&pe
, 0, MVPP2_PRS_IPV6_MC
,
2380 MVPP2_PRS_IPV6_MC_MASK
);
2381 mvpp2_prs_tcam_ai_update(&pe
, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
2382 /* Unmask all ports */
2383 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2385 /* Update shadow table and hw entry */
2386 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP6
);
2387 mvpp2_prs_hw_write(priv
, &pe
);
2392 /* Parser per-port initialization */
2393 static void mvpp2_prs_hw_port_init(struct mvpp2
*priv
, int port
, int lu_first
,
2394 int lu_max
, int offset
)
2399 val
= mvpp2_read(priv
, MVPP2_PRS_INIT_LOOKUP_REG
);
2400 val
&= ~MVPP2_PRS_PORT_LU_MASK(port
);
2401 val
|= MVPP2_PRS_PORT_LU_VAL(port
, lu_first
);
2402 mvpp2_write(priv
, MVPP2_PRS_INIT_LOOKUP_REG
, val
);
2404 /* Set maximum number of loops for packet received from port */
2405 val
= mvpp2_read(priv
, MVPP2_PRS_MAX_LOOP_REG(port
));
2406 val
&= ~MVPP2_PRS_MAX_LOOP_MASK(port
);
2407 val
|= MVPP2_PRS_MAX_LOOP_VAL(port
, lu_max
);
2408 mvpp2_write(priv
, MVPP2_PRS_MAX_LOOP_REG(port
), val
);
2410 /* Set initial offset for packet header extraction for the first
2413 val
= mvpp2_read(priv
, MVPP2_PRS_INIT_OFFS_REG(port
));
2414 val
&= ~MVPP2_PRS_INIT_OFF_MASK(port
);
2415 val
|= MVPP2_PRS_INIT_OFF_VAL(port
, offset
);
2416 mvpp2_write(priv
, MVPP2_PRS_INIT_OFFS_REG(port
), val
);
2419 /* Default flow entries initialization for all ports */
2420 static void mvpp2_prs_def_flow_init(struct mvpp2
*priv
)
2422 struct mvpp2_prs_entry pe
;
2425 for (port
= 0; port
< MVPP2_MAX_PORTS
; port
++) {
2426 memset(&pe
, 0, sizeof(pe
));
2427 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2428 pe
.index
= MVPP2_PE_FIRST_DEFAULT_FLOW
- port
;
2430 /* Mask all ports */
2431 mvpp2_prs_tcam_port_map_set(&pe
, 0);
2434 mvpp2_prs_sram_ai_update(&pe
, port
, MVPP2_PRS_FLOW_ID_MASK
);
2435 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_DONE_BIT
, 1);
2437 /* Update shadow table and hw entry */
2438 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_FLOWS
);
2439 mvpp2_prs_hw_write(priv
, &pe
);
2443 /* Set default entry for Marvell Header field */
2444 static void mvpp2_prs_mh_init(struct mvpp2
*priv
)
2446 struct mvpp2_prs_entry pe
;
2448 memset(&pe
, 0, sizeof(pe
));
2450 pe
.index
= MVPP2_PE_MH_DEFAULT
;
2451 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MH
);
2452 mvpp2_prs_sram_shift_set(&pe
, MVPP2_MH_SIZE
,
2453 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2454 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
2456 /* Unmask all ports */
2457 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2459 /* Update shadow table and hw entry */
2460 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MH
);
2461 mvpp2_prs_hw_write(priv
, &pe
);
2464 /* Set default entires (place holder) for promiscuous, non-promiscuous and
2465 * multicast MAC addresses
2467 static void mvpp2_prs_mac_init(struct mvpp2
*priv
)
2469 struct mvpp2_prs_entry pe
;
2471 memset(&pe
, 0, sizeof(pe
));
2473 /* Non-promiscuous mode for all ports - DROP unknown packets */
2474 pe
.index
= MVPP2_PE_MAC_NON_PROMISCUOUS
;
2475 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
2477 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_DROP_MASK
,
2478 MVPP2_PRS_RI_DROP_MASK
);
2479 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2480 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2482 /* Unmask all ports */
2483 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2485 /* Update shadow table and hw entry */
2486 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
2487 mvpp2_prs_hw_write(priv
, &pe
);
2489 /* place holders only - no ports */
2490 mvpp2_prs_mac_drop_all_set(priv
, 0, false);
2491 mvpp2_prs_mac_promisc_set(priv
, 0, false);
2492 mvpp2_prs_mac_multi_set(priv
, MVPP2_PE_MAC_MC_ALL
, 0, false);
2493 mvpp2_prs_mac_multi_set(priv
, MVPP2_PE_MAC_MC_IP6
, 0, false);
2496 /* Set default entries for various types of dsa packets */
2497 static void mvpp2_prs_dsa_init(struct mvpp2
*priv
)
2499 struct mvpp2_prs_entry pe
;
2501 /* None tagged EDSA entry - place holder */
2502 mvpp2_prs_dsa_tag_set(priv
, 0, false, MVPP2_PRS_UNTAGGED
,
2505 /* Tagged EDSA entry - place holder */
2506 mvpp2_prs_dsa_tag_set(priv
, 0, false, MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
2508 /* None tagged DSA entry - place holder */
2509 mvpp2_prs_dsa_tag_set(priv
, 0, false, MVPP2_PRS_UNTAGGED
,
2512 /* Tagged DSA entry - place holder */
2513 mvpp2_prs_dsa_tag_set(priv
, 0, false, MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
2515 /* None tagged EDSA ethertype entry - place holder*/
2516 mvpp2_prs_dsa_tag_ethertype_set(priv
, 0, false,
2517 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_EDSA
);
2519 /* Tagged EDSA ethertype entry - place holder*/
2520 mvpp2_prs_dsa_tag_ethertype_set(priv
, 0, false,
2521 MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
2523 /* None tagged DSA ethertype entry */
2524 mvpp2_prs_dsa_tag_ethertype_set(priv
, 0, true,
2525 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_DSA
);
2527 /* Tagged DSA ethertype entry */
2528 mvpp2_prs_dsa_tag_ethertype_set(priv
, 0, true,
2529 MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
2531 /* Set default entry, in case DSA or EDSA tag not found */
2532 memset(&pe
, 0, sizeof(pe
));
2533 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
2534 pe
.index
= MVPP2_PE_DSA_DEFAULT
;
2535 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
2538 mvpp2_prs_sram_shift_set(&pe
, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2539 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
2541 /* Clear all sram ai bits for next iteration */
2542 mvpp2_prs_sram_ai_update(&pe
, 0, MVPP2_PRS_SRAM_AI_MASK
);
2544 /* Unmask all ports */
2545 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2547 mvpp2_prs_hw_write(priv
, &pe
);
2550 /* Match basic ethertypes */
2551 static int mvpp2_prs_etype_init(struct mvpp2
*priv
)
2553 struct mvpp2_prs_entry pe
;
2556 /* Ethertype: PPPoE */
2557 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2558 MVPP2_PE_LAST_FREE_TID
);
2562 memset(&pe
, 0, sizeof(pe
));
2563 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2566 mvpp2_prs_match_etype(&pe
, 0, ETH_P_PPP_SES
);
2568 mvpp2_prs_sram_shift_set(&pe
, MVPP2_PPPOE_HDR_SIZE
,
2569 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2570 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_PPPOE
);
2571 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_PPPOE_MASK
,
2572 MVPP2_PRS_RI_PPPOE_MASK
);
2574 /* Update shadow table and hw entry */
2575 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2576 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2577 priv
->prs_shadow
[pe
.index
].finish
= false;
2578 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_PPPOE_MASK
,
2579 MVPP2_PRS_RI_PPPOE_MASK
);
2580 mvpp2_prs_hw_write(priv
, &pe
);
2582 /* Ethertype: ARP */
2583 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2584 MVPP2_PE_LAST_FREE_TID
);
2588 memset(&pe
, 0, sizeof(pe
));
2589 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2592 mvpp2_prs_match_etype(&pe
, 0, ETH_P_ARP
);
2594 /* Generate flow in the next iteration*/
2595 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2596 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2597 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_ARP
,
2598 MVPP2_PRS_RI_L3_PROTO_MASK
);
2600 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2602 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2604 /* Update shadow table and hw entry */
2605 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2606 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2607 priv
->prs_shadow
[pe
.index
].finish
= true;
2608 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_ARP
,
2609 MVPP2_PRS_RI_L3_PROTO_MASK
);
2610 mvpp2_prs_hw_write(priv
, &pe
);
2612 /* Ethertype: LBTD */
2613 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2614 MVPP2_PE_LAST_FREE_TID
);
2618 memset(&pe
, 0, sizeof(pe
));
2619 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2622 mvpp2_prs_match_etype(&pe
, 0, MVPP2_IP_LBDT_TYPE
);
2624 /* Generate flow in the next iteration*/
2625 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2626 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2627 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_CPU_CODE_RX_SPEC
|
2628 MVPP2_PRS_RI_UDF3_RX_SPECIAL
,
2629 MVPP2_PRS_RI_CPU_CODE_MASK
|
2630 MVPP2_PRS_RI_UDF3_MASK
);
2632 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2634 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2636 /* Update shadow table and hw entry */
2637 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2638 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2639 priv
->prs_shadow
[pe
.index
].finish
= true;
2640 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_CPU_CODE_RX_SPEC
|
2641 MVPP2_PRS_RI_UDF3_RX_SPECIAL
,
2642 MVPP2_PRS_RI_CPU_CODE_MASK
|
2643 MVPP2_PRS_RI_UDF3_MASK
);
2644 mvpp2_prs_hw_write(priv
, &pe
);
2646 /* Ethertype: IPv4 without options */
2647 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2648 MVPP2_PE_LAST_FREE_TID
);
2652 memset(&pe
, 0, sizeof(pe
));
2653 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2656 mvpp2_prs_match_etype(&pe
, 0, ETH_P_IP
);
2657 mvpp2_prs_tcam_data_byte_set(&pe
, MVPP2_ETH_TYPE_LEN
,
2658 MVPP2_PRS_IPV4_HEAD
| MVPP2_PRS_IPV4_IHL
,
2659 MVPP2_PRS_IPV4_HEAD_MASK
|
2660 MVPP2_PRS_IPV4_IHL_MASK
);
2662 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2663 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP4
,
2664 MVPP2_PRS_RI_L3_PROTO_MASK
);
2665 /* Skip eth_type + 4 bytes of IP header */
2666 mvpp2_prs_sram_shift_set(&pe
, MVPP2_ETH_TYPE_LEN
+ 4,
2667 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2669 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2671 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2673 /* Update shadow table and hw entry */
2674 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2675 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2676 priv
->prs_shadow
[pe
.index
].finish
= false;
2677 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_IP4
,
2678 MVPP2_PRS_RI_L3_PROTO_MASK
);
2679 mvpp2_prs_hw_write(priv
, &pe
);
2681 /* Ethertype: IPv4 with options */
2682 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2683 MVPP2_PE_LAST_FREE_TID
);
2689 /* Clear tcam data before updating */
2690 pe
.tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN
)] = 0x0;
2691 pe
.tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN
)] = 0x0;
2693 mvpp2_prs_tcam_data_byte_set(&pe
, MVPP2_ETH_TYPE_LEN
,
2694 MVPP2_PRS_IPV4_HEAD
,
2695 MVPP2_PRS_IPV4_HEAD_MASK
);
2697 /* Clear ri before updating */
2698 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_WORD
] = 0x0;
2699 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_CTRL_WORD
] = 0x0;
2700 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP4_OPT
,
2701 MVPP2_PRS_RI_L3_PROTO_MASK
);
2703 /* Update shadow table and hw entry */
2704 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2705 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2706 priv
->prs_shadow
[pe
.index
].finish
= false;
2707 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_IP4_OPT
,
2708 MVPP2_PRS_RI_L3_PROTO_MASK
);
2709 mvpp2_prs_hw_write(priv
, &pe
);
2711 /* Ethertype: IPv6 without options */
2712 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2713 MVPP2_PE_LAST_FREE_TID
);
2717 memset(&pe
, 0, sizeof(pe
));
2718 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2721 mvpp2_prs_match_etype(&pe
, 0, ETH_P_IPV6
);
2723 /* Skip DIP of IPV6 header */
2724 mvpp2_prs_sram_shift_set(&pe
, MVPP2_ETH_TYPE_LEN
+ 8 +
2725 MVPP2_MAX_L3_ADDR_SIZE
,
2726 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2727 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2728 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP6
,
2729 MVPP2_PRS_RI_L3_PROTO_MASK
);
2731 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2733 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2735 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2736 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2737 priv
->prs_shadow
[pe
.index
].finish
= false;
2738 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_IP6
,
2739 MVPP2_PRS_RI_L3_PROTO_MASK
);
2740 mvpp2_prs_hw_write(priv
, &pe
);
2742 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2743 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2744 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2745 pe
.index
= MVPP2_PE_ETH_TYPE_UN
;
2747 /* Unmask all ports */
2748 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2750 /* Generate flow in the next iteration*/
2751 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2752 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2753 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UN
,
2754 MVPP2_PRS_RI_L3_PROTO_MASK
);
2755 /* Set L3 offset even it's unknown L3 */
2756 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2758 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2760 /* Update shadow table and hw entry */
2761 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2762 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2763 priv
->prs_shadow
[pe
.index
].finish
= true;
2764 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_UN
,
2765 MVPP2_PRS_RI_L3_PROTO_MASK
);
2766 mvpp2_prs_hw_write(priv
, &pe
);
2771 /* Configure vlan entries and detect up to 2 successive VLAN tags.
2778 static int mvpp2_prs_vlan_init(struct platform_device
*pdev
, struct mvpp2
*priv
)
2780 struct mvpp2_prs_entry pe
;
2783 priv
->prs_double_vlans
= devm_kcalloc(&pdev
->dev
, sizeof(bool),
2784 MVPP2_PRS_DBL_VLANS_MAX
,
2786 if (!priv
->prs_double_vlans
)
2789 /* Double VLAN: 0x8100, 0x88A8 */
2790 err
= mvpp2_prs_double_vlan_add(priv
, ETH_P_8021Q
, ETH_P_8021AD
,
2791 MVPP2_PRS_PORT_MASK
);
2795 /* Double VLAN: 0x8100, 0x8100 */
2796 err
= mvpp2_prs_double_vlan_add(priv
, ETH_P_8021Q
, ETH_P_8021Q
,
2797 MVPP2_PRS_PORT_MASK
);
2801 /* Single VLAN: 0x88a8 */
2802 err
= mvpp2_prs_vlan_add(priv
, ETH_P_8021AD
, MVPP2_PRS_SINGLE_VLAN_AI
,
2803 MVPP2_PRS_PORT_MASK
);
2807 /* Single VLAN: 0x8100 */
2808 err
= mvpp2_prs_vlan_add(priv
, ETH_P_8021Q
, MVPP2_PRS_SINGLE_VLAN_AI
,
2809 MVPP2_PRS_PORT_MASK
);
2813 /* Set default double vlan entry */
2814 memset(&pe
, 0, sizeof(pe
));
2815 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
2816 pe
.index
= MVPP2_PE_VLAN_DBL
;
2818 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2819 /* Clear ai for next iterations */
2820 mvpp2_prs_sram_ai_update(&pe
, 0, MVPP2_PRS_SRAM_AI_MASK
);
2821 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_DOUBLE
,
2822 MVPP2_PRS_RI_VLAN_MASK
);
2824 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_DBL_VLAN_AI_BIT
,
2825 MVPP2_PRS_DBL_VLAN_AI_BIT
);
2826 /* Unmask all ports */
2827 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2829 /* Update shadow table and hw entry */
2830 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_VLAN
);
2831 mvpp2_prs_hw_write(priv
, &pe
);
2833 /* Set default vlan none entry */
2834 memset(&pe
, 0, sizeof(pe
));
2835 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
2836 pe
.index
= MVPP2_PE_VLAN_NONE
;
2838 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2839 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_NONE
,
2840 MVPP2_PRS_RI_VLAN_MASK
);
2842 /* Unmask all ports */
2843 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2845 /* Update shadow table and hw entry */
2846 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_VLAN
);
2847 mvpp2_prs_hw_write(priv
, &pe
);
2852 /* Set entries for PPPoE ethertype */
2853 static int mvpp2_prs_pppoe_init(struct mvpp2
*priv
)
2855 struct mvpp2_prs_entry pe
;
2858 /* IPv4 over PPPoE with options */
2859 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2860 MVPP2_PE_LAST_FREE_TID
);
2864 memset(&pe
, 0, sizeof(pe
));
2865 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_PPPOE
);
2868 mvpp2_prs_match_etype(&pe
, 0, PPP_IP
);
2870 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2871 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP4_OPT
,
2872 MVPP2_PRS_RI_L3_PROTO_MASK
);
2873 /* Skip eth_type + 4 bytes of IP header */
2874 mvpp2_prs_sram_shift_set(&pe
, MVPP2_ETH_TYPE_LEN
+ 4,
2875 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2877 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2879 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2881 /* Update shadow table and hw entry */
2882 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_PPPOE
);
2883 mvpp2_prs_hw_write(priv
, &pe
);
2885 /* IPv4 over PPPoE without options */
2886 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2887 MVPP2_PE_LAST_FREE_TID
);
2893 mvpp2_prs_tcam_data_byte_set(&pe
, MVPP2_ETH_TYPE_LEN
,
2894 MVPP2_PRS_IPV4_HEAD
| MVPP2_PRS_IPV4_IHL
,
2895 MVPP2_PRS_IPV4_HEAD_MASK
|
2896 MVPP2_PRS_IPV4_IHL_MASK
);
2898 /* Clear ri before updating */
2899 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_WORD
] = 0x0;
2900 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_CTRL_WORD
] = 0x0;
2901 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP4
,
2902 MVPP2_PRS_RI_L3_PROTO_MASK
);
2904 /* Update shadow table and hw entry */
2905 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_PPPOE
);
2906 mvpp2_prs_hw_write(priv
, &pe
);
2908 /* IPv6 over PPPoE */
2909 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2910 MVPP2_PE_LAST_FREE_TID
);
2914 memset(&pe
, 0, sizeof(pe
));
2915 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_PPPOE
);
2918 mvpp2_prs_match_etype(&pe
, 0, PPP_IPV6
);
2920 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2921 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP6
,
2922 MVPP2_PRS_RI_L3_PROTO_MASK
);
2923 /* Skip eth_type + 4 bytes of IPv6 header */
2924 mvpp2_prs_sram_shift_set(&pe
, MVPP2_ETH_TYPE_LEN
+ 4,
2925 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2927 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2929 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2931 /* Update shadow table and hw entry */
2932 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_PPPOE
);
2933 mvpp2_prs_hw_write(priv
, &pe
);
2935 /* Non-IP over PPPoE */
2936 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2937 MVPP2_PE_LAST_FREE_TID
);
2941 memset(&pe
, 0, sizeof(pe
));
2942 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_PPPOE
);
2945 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UN
,
2946 MVPP2_PRS_RI_L3_PROTO_MASK
);
2948 /* Finished: go to flowid generation */
2949 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2950 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2951 /* Set L3 offset even if it's unknown L3 */
2952 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2954 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2956 /* Update shadow table and hw entry */
2957 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_PPPOE
);
2958 mvpp2_prs_hw_write(priv
, &pe
);
2963 /* Initialize entries for IPv4 */
2964 static int mvpp2_prs_ip4_init(struct mvpp2
*priv
)
2966 struct mvpp2_prs_entry pe
;
2969 /* Set entries for TCP, UDP and IGMP over IPv4 */
2970 err
= mvpp2_prs_ip4_proto(priv
, IPPROTO_TCP
, MVPP2_PRS_RI_L4_TCP
,
2971 MVPP2_PRS_RI_L4_PROTO_MASK
);
2975 err
= mvpp2_prs_ip4_proto(priv
, IPPROTO_UDP
, MVPP2_PRS_RI_L4_UDP
,
2976 MVPP2_PRS_RI_L4_PROTO_MASK
);
2980 err
= mvpp2_prs_ip4_proto(priv
, IPPROTO_IGMP
,
2981 MVPP2_PRS_RI_CPU_CODE_RX_SPEC
|
2982 MVPP2_PRS_RI_UDF3_RX_SPECIAL
,
2983 MVPP2_PRS_RI_CPU_CODE_MASK
|
2984 MVPP2_PRS_RI_UDF3_MASK
);
2988 /* IPv4 Broadcast */
2989 err
= mvpp2_prs_ip4_cast(priv
, MVPP2_PRS_L3_BROAD_CAST
);
2993 /* IPv4 Multicast */
2994 err
= mvpp2_prs_ip4_cast(priv
, MVPP2_PRS_L3_MULTI_CAST
);
2998 /* Default IPv4 entry for unknown protocols */
2999 memset(&pe
, 0, sizeof(pe
));
3000 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
3001 pe
.index
= MVPP2_PE_IP4_PROTO_UN
;
3003 /* Set next lu to IPv4 */
3004 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
3005 mvpp2_prs_sram_shift_set(&pe
, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
3007 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L4
,
3008 sizeof(struct iphdr
) - 4,
3009 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
3010 mvpp2_prs_sram_ai_update(&pe
, MVPP2_PRS_IPV4_DIP_AI_BIT
,
3011 MVPP2_PRS_IPV4_DIP_AI_BIT
);
3012 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L4_OTHER
,
3013 MVPP2_PRS_RI_L4_PROTO_MASK
);
3015 mvpp2_prs_tcam_ai_update(&pe
, 0, MVPP2_PRS_IPV4_DIP_AI_BIT
);
3016 /* Unmask all ports */
3017 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
3019 /* Update shadow table and hw entry */
3020 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
3021 mvpp2_prs_hw_write(priv
, &pe
);
3023 /* Default IPv4 entry for unicast address */
3024 memset(&pe
, 0, sizeof(pe
));
3025 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
3026 pe
.index
= MVPP2_PE_IP4_ADDR_UN
;
3028 /* Finished: go to flowid generation */
3029 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
3030 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
3031 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UCAST
,
3032 MVPP2_PRS_RI_L3_ADDR_MASK
);
3034 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV4_DIP_AI_BIT
,
3035 MVPP2_PRS_IPV4_DIP_AI_BIT
);
3036 /* Unmask all ports */
3037 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
3039 /* Update shadow table and hw entry */
3040 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
3041 mvpp2_prs_hw_write(priv
, &pe
);
3046 /* Initialize entries for IPv6 */
3047 static int mvpp2_prs_ip6_init(struct mvpp2
*priv
)
3049 struct mvpp2_prs_entry pe
;
3052 /* Set entries for TCP, UDP and ICMP over IPv6 */
3053 err
= mvpp2_prs_ip6_proto(priv
, IPPROTO_TCP
,
3054 MVPP2_PRS_RI_L4_TCP
,
3055 MVPP2_PRS_RI_L4_PROTO_MASK
);
3059 err
= mvpp2_prs_ip6_proto(priv
, IPPROTO_UDP
,
3060 MVPP2_PRS_RI_L4_UDP
,
3061 MVPP2_PRS_RI_L4_PROTO_MASK
);
3065 err
= mvpp2_prs_ip6_proto(priv
, IPPROTO_ICMPV6
,
3066 MVPP2_PRS_RI_CPU_CODE_RX_SPEC
|
3067 MVPP2_PRS_RI_UDF3_RX_SPECIAL
,
3068 MVPP2_PRS_RI_CPU_CODE_MASK
|
3069 MVPP2_PRS_RI_UDF3_MASK
);
3073 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
3074 /* Result Info: UDF7=1, DS lite */
3075 err
= mvpp2_prs_ip6_proto(priv
, IPPROTO_IPIP
,
3076 MVPP2_PRS_RI_UDF7_IP6_LITE
,
3077 MVPP2_PRS_RI_UDF7_MASK
);
3081 /* IPv6 multicast */
3082 err
= mvpp2_prs_ip6_cast(priv
, MVPP2_PRS_L3_MULTI_CAST
);
3086 /* Entry for checking hop limit */
3087 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
3088 MVPP2_PE_LAST_FREE_TID
);
3092 memset(&pe
, 0, sizeof(pe
));
3093 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
3096 /* Finished: go to flowid generation */
3097 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
3098 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
3099 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UN
|
3100 MVPP2_PRS_RI_DROP_MASK
,
3101 MVPP2_PRS_RI_L3_PROTO_MASK
|
3102 MVPP2_PRS_RI_DROP_MASK
);
3104 mvpp2_prs_tcam_data_byte_set(&pe
, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK
);
3105 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
3106 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
3108 /* Update shadow table and hw entry */
3109 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
3110 mvpp2_prs_hw_write(priv
, &pe
);
3112 /* Default IPv6 entry for unknown protocols */
3113 memset(&pe
, 0, sizeof(pe
));
3114 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
3115 pe
.index
= MVPP2_PE_IP6_PROTO_UN
;
3117 /* Finished: go to flowid generation */
3118 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
3119 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
3120 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L4_OTHER
,
3121 MVPP2_PRS_RI_L4_PROTO_MASK
);
3122 /* Set L4 offset relatively to our current place */
3123 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L4
,
3124 sizeof(struct ipv6hdr
) - 4,
3125 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
3127 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
3128 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
3129 /* Unmask all ports */
3130 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
3132 /* Update shadow table and hw entry */
3133 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
3134 mvpp2_prs_hw_write(priv
, &pe
);
3136 /* Default IPv6 entry for unknown ext protocols */
3137 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
3138 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
3139 pe
.index
= MVPP2_PE_IP6_EXT_PROTO_UN
;
3141 /* Finished: go to flowid generation */
3142 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
3143 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
3144 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L4_OTHER
,
3145 MVPP2_PRS_RI_L4_PROTO_MASK
);
3147 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV6_EXT_AI_BIT
,
3148 MVPP2_PRS_IPV6_EXT_AI_BIT
);
3149 /* Unmask all ports */
3150 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
3152 /* Update shadow table and hw entry */
3153 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
3154 mvpp2_prs_hw_write(priv
, &pe
);
3156 /* Default IPv6 entry for unicast address */
3157 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
3158 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
3159 pe
.index
= MVPP2_PE_IP6_ADDR_UN
;
3161 /* Finished: go to IPv6 again */
3162 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
3163 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UCAST
,
3164 MVPP2_PRS_RI_L3_ADDR_MASK
);
3165 mvpp2_prs_sram_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
3166 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
3167 /* Shift back to IPV6 NH */
3168 mvpp2_prs_sram_shift_set(&pe
, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
3170 mvpp2_prs_tcam_ai_update(&pe
, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
3171 /* Unmask all ports */
3172 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
3174 /* Update shadow table and hw entry */
3175 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP6
);
3176 mvpp2_prs_hw_write(priv
, &pe
);
3181 /* Parser default initialization */
3182 static int mvpp2_prs_default_init(struct platform_device
*pdev
,
3187 /* Enable tcam table */
3188 mvpp2_write(priv
, MVPP2_PRS_TCAM_CTRL_REG
, MVPP2_PRS_TCAM_EN_MASK
);
3190 /* Clear all tcam and sram entries */
3191 for (index
= 0; index
< MVPP2_PRS_TCAM_SRAM_SIZE
; index
++) {
3192 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, index
);
3193 for (i
= 0; i
< MVPP2_PRS_TCAM_WORDS
; i
++)
3194 mvpp2_write(priv
, MVPP2_PRS_TCAM_DATA_REG(i
), 0);
3196 mvpp2_write(priv
, MVPP2_PRS_SRAM_IDX_REG
, index
);
3197 for (i
= 0; i
< MVPP2_PRS_SRAM_WORDS
; i
++)
3198 mvpp2_write(priv
, MVPP2_PRS_SRAM_DATA_REG(i
), 0);
3201 /* Invalidate all tcam entries */
3202 for (index
= 0; index
< MVPP2_PRS_TCAM_SRAM_SIZE
; index
++)
3203 mvpp2_prs_hw_inv(priv
, index
);
3205 priv
->prs_shadow
= devm_kcalloc(&pdev
->dev
, MVPP2_PRS_TCAM_SRAM_SIZE
,
3206 sizeof(*priv
->prs_shadow
),
3208 if (!priv
->prs_shadow
)
3211 /* Always start from lookup = 0 */
3212 for (index
= 0; index
< MVPP2_MAX_PORTS
; index
++)
3213 mvpp2_prs_hw_port_init(priv
, index
, MVPP2_PRS_LU_MH
,
3214 MVPP2_PRS_PORT_LU_MAX
, 0);
3216 mvpp2_prs_def_flow_init(priv
);
3218 mvpp2_prs_mh_init(priv
);
3220 mvpp2_prs_mac_init(priv
);
3222 mvpp2_prs_dsa_init(priv
);
3224 err
= mvpp2_prs_etype_init(priv
);
3228 err
= mvpp2_prs_vlan_init(pdev
, priv
);
3232 err
= mvpp2_prs_pppoe_init(priv
);
3236 err
= mvpp2_prs_ip6_init(priv
);
3240 err
= mvpp2_prs_ip4_init(priv
);
3247 /* Compare MAC DA with tcam entry data */
3248 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry
*pe
,
3249 const u8
*da
, unsigned char *mask
)
3251 unsigned char tcam_byte
, tcam_mask
;
3254 for (index
= 0; index
< ETH_ALEN
; index
++) {
3255 mvpp2_prs_tcam_data_byte_get(pe
, index
, &tcam_byte
, &tcam_mask
);
3256 if (tcam_mask
!= mask
[index
])
3259 if ((tcam_mask
& tcam_byte
) != (da
[index
] & mask
[index
]))
3266 /* Find tcam entry with matched pair <MAC DA, port> */
3267 static struct mvpp2_prs_entry
*
3268 mvpp2_prs_mac_da_range_find(struct mvpp2
*priv
, int pmap
, const u8
*da
,
3269 unsigned char *mask
, int udf_type
)
3271 struct mvpp2_prs_entry
*pe
;
3274 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
3277 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_MAC
);
3279 /* Go through the all entires with MVPP2_PRS_LU_MAC */
3280 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
3281 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++) {
3282 unsigned int entry_pmap
;
3284 if (!priv
->prs_shadow
[tid
].valid
||
3285 (priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_MAC
) ||
3286 (priv
->prs_shadow
[tid
].udf
!= udf_type
))
3290 mvpp2_prs_hw_read(priv
, pe
);
3291 entry_pmap
= mvpp2_prs_tcam_port_map_get(pe
);
3293 if (mvpp2_prs_mac_range_equals(pe
, da
, mask
) &&
3302 /* Update parser's mac da entry */
3303 static int mvpp2_prs_mac_da_accept(struct mvpp2
*priv
, int port
,
3304 const u8
*da
, bool add
)
3306 struct mvpp2_prs_entry
*pe
;
3307 unsigned int pmap
, len
, ri
;
3308 unsigned char mask
[ETH_ALEN
] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3311 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3312 pe
= mvpp2_prs_mac_da_range_find(priv
, (1 << port
), da
, mask
,
3313 MVPP2_PRS_UDF_MAC_DEF
);
3320 /* Create new TCAM entry */
3321 /* Find first range mac entry*/
3322 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
3323 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++)
3324 if (priv
->prs_shadow
[tid
].valid
&&
3325 (priv
->prs_shadow
[tid
].lu
== MVPP2_PRS_LU_MAC
) &&
3326 (priv
->prs_shadow
[tid
].udf
==
3327 MVPP2_PRS_UDF_MAC_RANGE
))
3330 /* Go through the all entries from first to last */
3331 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
3336 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
3339 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_MAC
);
3342 /* Mask all ports */
3343 mvpp2_prs_tcam_port_map_set(pe
, 0);
3346 /* Update port mask */
3347 mvpp2_prs_tcam_port_set(pe
, port
, add
);
3349 /* Invalidate the entry if no ports are left enabled */
3350 pmap
= mvpp2_prs_tcam_port_map_get(pe
);
3356 mvpp2_prs_hw_inv(priv
, pe
->index
);
3357 priv
->prs_shadow
[pe
->index
].valid
= false;
3362 /* Continue - set next lookup */
3363 mvpp2_prs_sram_next_lu_set(pe
, MVPP2_PRS_LU_DSA
);
3365 /* Set match on DA */
3368 mvpp2_prs_tcam_data_byte_set(pe
, len
, da
[len
], 0xff);
3370 /* Set result info bits */
3371 if (is_broadcast_ether_addr(da
))
3372 ri
= MVPP2_PRS_RI_L2_BCAST
;
3373 else if (is_multicast_ether_addr(da
))
3374 ri
= MVPP2_PRS_RI_L2_MCAST
;
3376 ri
= MVPP2_PRS_RI_L2_UCAST
| MVPP2_PRS_RI_MAC_ME_MASK
;
3378 mvpp2_prs_sram_ri_update(pe
, ri
, MVPP2_PRS_RI_L2_CAST_MASK
|
3379 MVPP2_PRS_RI_MAC_ME_MASK
);
3380 mvpp2_prs_shadow_ri_set(priv
, pe
->index
, ri
, MVPP2_PRS_RI_L2_CAST_MASK
|
3381 MVPP2_PRS_RI_MAC_ME_MASK
);
3383 /* Shift to ethertype */
3384 mvpp2_prs_sram_shift_set(pe
, 2 * ETH_ALEN
,
3385 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
3387 /* Update shadow table and hw entry */
3388 priv
->prs_shadow
[pe
->index
].udf
= MVPP2_PRS_UDF_MAC_DEF
;
3389 mvpp2_prs_shadow_set(priv
, pe
->index
, MVPP2_PRS_LU_MAC
);
3390 mvpp2_prs_hw_write(priv
, pe
);
3397 static int mvpp2_prs_update_mac_da(struct net_device
*dev
, const u8
*da
)
3399 struct mvpp2_port
*port
= netdev_priv(dev
);
3402 /* Remove old parser entry */
3403 err
= mvpp2_prs_mac_da_accept(port
->priv
, port
->id
, dev
->dev_addr
,
3408 /* Add new parser entry */
3409 err
= mvpp2_prs_mac_da_accept(port
->priv
, port
->id
, da
, true);
3413 /* Set addr in the device */
3414 ether_addr_copy(dev
->dev_addr
, da
);
3419 /* Delete all port's multicast simple (not range) entries */
3420 static void mvpp2_prs_mcast_del_all(struct mvpp2
*priv
, int port
)
3422 struct mvpp2_prs_entry pe
;
3425 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
3426 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++) {
3427 unsigned char da
[ETH_ALEN
], da_mask
[ETH_ALEN
];
3429 if (!priv
->prs_shadow
[tid
].valid
||
3430 (priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_MAC
) ||
3431 (priv
->prs_shadow
[tid
].udf
!= MVPP2_PRS_UDF_MAC_DEF
))
3434 /* Only simple mac entries */
3436 mvpp2_prs_hw_read(priv
, &pe
);
3438 /* Read mac addr from entry */
3439 for (index
= 0; index
< ETH_ALEN
; index
++)
3440 mvpp2_prs_tcam_data_byte_get(&pe
, index
, &da
[index
],
3443 if (is_multicast_ether_addr(da
) && !is_broadcast_ether_addr(da
))
3444 /* Delete this entry */
3445 mvpp2_prs_mac_da_accept(priv
, port
, da
, false);
3449 static int mvpp2_prs_tag_mode_set(struct mvpp2
*priv
, int port
, int type
)
3452 case MVPP2_TAG_TYPE_EDSA
:
3453 /* Add port to EDSA entries */
3454 mvpp2_prs_dsa_tag_set(priv
, port
, true,
3455 MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
3456 mvpp2_prs_dsa_tag_set(priv
, port
, true,
3457 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_EDSA
);
3458 /* Remove port from DSA entries */
3459 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3460 MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
3461 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3462 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_DSA
);
3465 case MVPP2_TAG_TYPE_DSA
:
3466 /* Add port to DSA entries */
3467 mvpp2_prs_dsa_tag_set(priv
, port
, true,
3468 MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
3469 mvpp2_prs_dsa_tag_set(priv
, port
, true,
3470 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_DSA
);
3471 /* Remove port from EDSA entries */
3472 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3473 MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
3474 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3475 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_EDSA
);
3478 case MVPP2_TAG_TYPE_MH
:
3479 case MVPP2_TAG_TYPE_NONE
:
3480 /* Remove port form EDSA and DSA entries */
3481 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3482 MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
3483 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3484 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_DSA
);
3485 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3486 MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
3487 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3488 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_EDSA
);
3492 if ((type
< 0) || (type
> MVPP2_TAG_TYPE_EDSA
))
3499 /* Set prs flow for the port */
3500 static int mvpp2_prs_def_flow(struct mvpp2_port
*port
)
3502 struct mvpp2_prs_entry
*pe
;
3505 pe
= mvpp2_prs_flow_find(port
->priv
, port
->id
);
3507 /* Such entry not exist */
3509 /* Go through the all entires from last to first */
3510 tid
= mvpp2_prs_tcam_first_free(port
->priv
,
3511 MVPP2_PE_LAST_FREE_TID
,
3512 MVPP2_PE_FIRST_FREE_TID
);
3516 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
3520 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_FLOWS
);
3524 mvpp2_prs_sram_ai_update(pe
, port
->id
, MVPP2_PRS_FLOW_ID_MASK
);
3525 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_LU_DONE_BIT
, 1);
3527 /* Update shadow table */
3528 mvpp2_prs_shadow_set(port
->priv
, pe
->index
, MVPP2_PRS_LU_FLOWS
);
3531 mvpp2_prs_tcam_port_map_set(pe
, (1 << port
->id
));
3532 mvpp2_prs_hw_write(port
->priv
, pe
);
3538 /* Classifier configuration routines */
3540 /* Update classification flow table registers */
3541 static void mvpp2_cls_flow_write(struct mvpp2
*priv
,
3542 struct mvpp2_cls_flow_entry
*fe
)
3544 mvpp2_write(priv
, MVPP2_CLS_FLOW_INDEX_REG
, fe
->index
);
3545 mvpp2_write(priv
, MVPP2_CLS_FLOW_TBL0_REG
, fe
->data
[0]);
3546 mvpp2_write(priv
, MVPP2_CLS_FLOW_TBL1_REG
, fe
->data
[1]);
3547 mvpp2_write(priv
, MVPP2_CLS_FLOW_TBL2_REG
, fe
->data
[2]);
3550 /* Update classification lookup table register */
3551 static void mvpp2_cls_lookup_write(struct mvpp2
*priv
,
3552 struct mvpp2_cls_lookup_entry
*le
)
3556 val
= (le
->way
<< MVPP2_CLS_LKP_INDEX_WAY_OFFS
) | le
->lkpid
;
3557 mvpp2_write(priv
, MVPP2_CLS_LKP_INDEX_REG
, val
);
3558 mvpp2_write(priv
, MVPP2_CLS_LKP_TBL_REG
, le
->data
);
3561 /* Classifier default initialization */
3562 static void mvpp2_cls_init(struct mvpp2
*priv
)
3564 struct mvpp2_cls_lookup_entry le
;
3565 struct mvpp2_cls_flow_entry fe
;
3568 /* Enable classifier */
3569 mvpp2_write(priv
, MVPP2_CLS_MODE_REG
, MVPP2_CLS_MODE_ACTIVE_MASK
);
3571 /* Clear classifier flow table */
3572 memset(&fe
.data
, 0, sizeof(fe
.data
));
3573 for (index
= 0; index
< MVPP2_CLS_FLOWS_TBL_SIZE
; index
++) {
3575 mvpp2_cls_flow_write(priv
, &fe
);
3578 /* Clear classifier lookup table */
3580 for (index
= 0; index
< MVPP2_CLS_LKP_TBL_SIZE
; index
++) {
3583 mvpp2_cls_lookup_write(priv
, &le
);
3586 mvpp2_cls_lookup_write(priv
, &le
);
3590 static void mvpp2_cls_port_config(struct mvpp2_port
*port
)
3592 struct mvpp2_cls_lookup_entry le
;
3595 /* Set way for the port */
3596 val
= mvpp2_read(port
->priv
, MVPP2_CLS_PORT_WAY_REG
);
3597 val
&= ~MVPP2_CLS_PORT_WAY_MASK(port
->id
);
3598 mvpp2_write(port
->priv
, MVPP2_CLS_PORT_WAY_REG
, val
);
3600 /* Pick the entry to be accessed in lookup ID decoding table
3601 * according to the way and lkpid.
3603 le
.lkpid
= port
->id
;
3607 /* Set initial CPU queue for receiving packets */
3608 le
.data
&= ~MVPP2_CLS_LKP_TBL_RXQ_MASK
;
3609 le
.data
|= port
->first_rxq
;
3611 /* Disable classification engines */
3612 le
.data
&= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK
;
3614 /* Update lookup ID table entry */
3615 mvpp2_cls_lookup_write(port
->priv
, &le
);
3618 /* Set CPU queue number for oversize packets */
3619 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port
*port
)
3623 mvpp2_write(port
->priv
, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port
->id
),
3624 port
->first_rxq
& MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK
);
3626 mvpp2_write(port
->priv
, MVPP2_CLS_SWFWD_P2HQ_REG(port
->id
),
3627 (port
->first_rxq
>> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS
));
3629 val
= mvpp2_read(port
->priv
, MVPP2_CLS_SWFWD_PCTRL_REG
);
3630 val
|= MVPP2_CLS_SWFWD_PCTRL_MASK(port
->id
);
3631 mvpp2_write(port
->priv
, MVPP2_CLS_SWFWD_PCTRL_REG
, val
);
3634 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool
*pool
)
3636 if (likely(pool
->frag_size
<= PAGE_SIZE
))
3637 return netdev_alloc_frag(pool
->frag_size
);
3639 return kmalloc(pool
->frag_size
, GFP_ATOMIC
);
3642 static void mvpp2_frag_free(const struct mvpp2_bm_pool
*pool
, void *data
)
3644 if (likely(pool
->frag_size
<= PAGE_SIZE
))
3645 skb_free_frag(data
);
3650 /* Buffer Manager configuration routines */
3653 static int mvpp2_bm_pool_create(struct platform_device
*pdev
,
3655 struct mvpp2_bm_pool
*bm_pool
, int size
)
3659 /* Number of buffer pointers must be a multiple of 16, as per
3660 * hardware constraints
3662 if (!IS_ALIGNED(size
, 16))
3665 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
3666 * bytes per buffer pointer
3668 if (priv
->hw_version
== MVPP21
)
3669 bm_pool
->size_bytes
= 2 * sizeof(u32
) * size
;
3671 bm_pool
->size_bytes
= 2 * sizeof(u64
) * size
;
3673 bm_pool
->virt_addr
= dma_alloc_coherent(&pdev
->dev
, bm_pool
->size_bytes
,
3676 if (!bm_pool
->virt_addr
)
3679 if (!IS_ALIGNED((unsigned long)bm_pool
->virt_addr
,
3680 MVPP2_BM_POOL_PTR_ALIGN
)) {
3681 dma_free_coherent(&pdev
->dev
, bm_pool
->size_bytes
,
3682 bm_pool
->virt_addr
, bm_pool
->dma_addr
);
3683 dev_err(&pdev
->dev
, "BM pool %d is not %d bytes aligned\n",
3684 bm_pool
->id
, MVPP2_BM_POOL_PTR_ALIGN
);
3688 mvpp2_write(priv
, MVPP2_BM_POOL_BASE_REG(bm_pool
->id
),
3689 lower_32_bits(bm_pool
->dma_addr
));
3690 mvpp2_write(priv
, MVPP2_BM_POOL_SIZE_REG(bm_pool
->id
), size
);
3692 val
= mvpp2_read(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
));
3693 val
|= MVPP2_BM_START_MASK
;
3694 mvpp2_write(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
), val
);
3696 bm_pool
->type
= MVPP2_BM_FREE
;
3697 bm_pool
->size
= size
;
3698 bm_pool
->pkt_size
= 0;
3699 bm_pool
->buf_num
= 0;
3704 /* Set pool buffer size */
3705 static void mvpp2_bm_pool_bufsize_set(struct mvpp2
*priv
,
3706 struct mvpp2_bm_pool
*bm_pool
,
3711 bm_pool
->buf_size
= buf_size
;
3713 val
= ALIGN(buf_size
, 1 << MVPP2_POOL_BUF_SIZE_OFFSET
);
3714 mvpp2_write(priv
, MVPP2_POOL_BUF_SIZE_REG(bm_pool
->id
), val
);
3717 static void mvpp2_bm_bufs_get_addrs(struct device
*dev
, struct mvpp2
*priv
,
3718 struct mvpp2_bm_pool
*bm_pool
,
3719 dma_addr_t
*dma_addr
,
3720 phys_addr_t
*phys_addr
)
3722 int cpu
= get_cpu();
3724 *dma_addr
= mvpp2_percpu_read(priv
, cpu
,
3725 MVPP2_BM_PHY_ALLOC_REG(bm_pool
->id
));
3726 *phys_addr
= mvpp2_percpu_read(priv
, cpu
, MVPP2_BM_VIRT_ALLOC_REG
);
3728 if (priv
->hw_version
== MVPP22
) {
3730 u32 dma_addr_highbits
, phys_addr_highbits
;
3732 val
= mvpp2_percpu_read(priv
, cpu
, MVPP22_BM_ADDR_HIGH_ALLOC
);
3733 dma_addr_highbits
= (val
& MVPP22_BM_ADDR_HIGH_PHYS_MASK
);
3734 phys_addr_highbits
= (val
& MVPP22_BM_ADDR_HIGH_VIRT_MASK
) >>
3735 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT
;
3737 if (sizeof(dma_addr_t
) == 8)
3738 *dma_addr
|= (u64
)dma_addr_highbits
<< 32;
3740 if (sizeof(phys_addr_t
) == 8)
3741 *phys_addr
|= (u64
)phys_addr_highbits
<< 32;
3747 /* Free all buffers from the pool */
3748 static void mvpp2_bm_bufs_free(struct device
*dev
, struct mvpp2
*priv
,
3749 struct mvpp2_bm_pool
*bm_pool
)
3753 for (i
= 0; i
< bm_pool
->buf_num
; i
++) {
3754 dma_addr_t buf_dma_addr
;
3755 phys_addr_t buf_phys_addr
;
3758 mvpp2_bm_bufs_get_addrs(dev
, priv
, bm_pool
,
3759 &buf_dma_addr
, &buf_phys_addr
);
3761 dma_unmap_single(dev
, buf_dma_addr
,
3762 bm_pool
->buf_size
, DMA_FROM_DEVICE
);
3764 data
= (void *)phys_to_virt(buf_phys_addr
);
3768 mvpp2_frag_free(bm_pool
, data
);
3771 /* Update BM driver with number of buffers removed from pool */
3772 bm_pool
->buf_num
-= i
;
3776 static int mvpp2_bm_pool_destroy(struct platform_device
*pdev
,
3778 struct mvpp2_bm_pool
*bm_pool
)
3782 mvpp2_bm_bufs_free(&pdev
->dev
, priv
, bm_pool
);
3783 if (bm_pool
->buf_num
) {
3784 WARN(1, "cannot free all buffers in pool %d\n", bm_pool
->id
);
3788 val
= mvpp2_read(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
));
3789 val
|= MVPP2_BM_STOP_MASK
;
3790 mvpp2_write(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
), val
);
3792 dma_free_coherent(&pdev
->dev
, bm_pool
->size_bytes
,
3798 static int mvpp2_bm_pools_init(struct platform_device
*pdev
,
3802 struct mvpp2_bm_pool
*bm_pool
;
3804 /* Create all pools with maximum size */
3805 size
= MVPP2_BM_POOL_SIZE_MAX
;
3806 for (i
= 0; i
< MVPP2_BM_POOLS_NUM
; i
++) {
3807 bm_pool
= &priv
->bm_pools
[i
];
3809 err
= mvpp2_bm_pool_create(pdev
, priv
, bm_pool
, size
);
3811 goto err_unroll_pools
;
3812 mvpp2_bm_pool_bufsize_set(priv
, bm_pool
, 0);
3817 dev_err(&pdev
->dev
, "failed to create BM pool %d, size %d\n", i
, size
);
3818 for (i
= i
- 1; i
>= 0; i
--)
3819 mvpp2_bm_pool_destroy(pdev
, priv
, &priv
->bm_pools
[i
]);
3823 static int mvpp2_bm_init(struct platform_device
*pdev
, struct mvpp2
*priv
)
3827 for (i
= 0; i
< MVPP2_BM_POOLS_NUM
; i
++) {
3828 /* Mask BM all interrupts */
3829 mvpp2_write(priv
, MVPP2_BM_INTR_MASK_REG(i
), 0);
3830 /* Clear BM cause register */
3831 mvpp2_write(priv
, MVPP2_BM_INTR_CAUSE_REG(i
), 0);
3834 /* Allocate and initialize BM pools */
3835 priv
->bm_pools
= devm_kcalloc(&pdev
->dev
, MVPP2_BM_POOLS_NUM
,
3836 sizeof(*priv
->bm_pools
), GFP_KERNEL
);
3837 if (!priv
->bm_pools
)
3840 err
= mvpp2_bm_pools_init(pdev
, priv
);
3846 /* Attach long pool to rxq */
3847 static void mvpp2_rxq_long_pool_set(struct mvpp2_port
*port
,
3848 int lrxq
, int long_pool
)
3853 /* Get queue physical ID */
3854 prxq
= port
->rxqs
[lrxq
]->id
;
3856 if (port
->priv
->hw_version
== MVPP21
)
3857 mask
= MVPP21_RXQ_POOL_LONG_MASK
;
3859 mask
= MVPP22_RXQ_POOL_LONG_MASK
;
3861 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
));
3863 val
|= (long_pool
<< MVPP2_RXQ_POOL_LONG_OFFS
) & mask
;
3864 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
), val
);
3867 /* Attach short pool to rxq */
3868 static void mvpp2_rxq_short_pool_set(struct mvpp2_port
*port
,
3869 int lrxq
, int short_pool
)
3874 /* Get queue physical ID */
3875 prxq
= port
->rxqs
[lrxq
]->id
;
3877 if (port
->priv
->hw_version
== MVPP21
)
3878 mask
= MVPP21_RXQ_POOL_SHORT_MASK
;
3880 mask
= MVPP22_RXQ_POOL_SHORT_MASK
;
3882 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
));
3884 val
|= (short_pool
<< MVPP2_RXQ_POOL_SHORT_OFFS
) & mask
;
3885 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
), val
);
3888 static void *mvpp2_buf_alloc(struct mvpp2_port
*port
,
3889 struct mvpp2_bm_pool
*bm_pool
,
3890 dma_addr_t
*buf_dma_addr
,
3891 phys_addr_t
*buf_phys_addr
,
3894 dma_addr_t dma_addr
;
3897 data
= mvpp2_frag_alloc(bm_pool
);
3901 dma_addr
= dma_map_single(port
->dev
->dev
.parent
, data
,
3902 MVPP2_RX_BUF_SIZE(bm_pool
->pkt_size
),
3904 if (unlikely(dma_mapping_error(port
->dev
->dev
.parent
, dma_addr
))) {
3905 mvpp2_frag_free(bm_pool
, data
);
3908 *buf_dma_addr
= dma_addr
;
3909 *buf_phys_addr
= virt_to_phys(data
);
3914 /* Set pool number in a BM cookie */
3915 static inline u32
mvpp2_bm_cookie_pool_set(u32 cookie
, int pool
)
3919 bm
= cookie
& ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS
);
3920 bm
|= ((pool
& 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS
);
3925 /* Release buffer to BM */
3926 static inline void mvpp2_bm_pool_put(struct mvpp2_port
*port
, int pool
,
3927 dma_addr_t buf_dma_addr
,
3928 phys_addr_t buf_phys_addr
)
3930 int cpu
= get_cpu();
3932 if (port
->priv
->hw_version
== MVPP22
) {
3935 if (sizeof(dma_addr_t
) == 8)
3936 val
|= upper_32_bits(buf_dma_addr
) &
3937 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK
;
3939 if (sizeof(phys_addr_t
) == 8)
3940 val
|= (upper_32_bits(buf_phys_addr
)
3941 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT
) &
3942 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK
;
3944 mvpp2_percpu_write(port
->priv
, cpu
,
3945 MVPP22_BM_ADDR_HIGH_RLS_REG
, val
);
3948 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
3949 * returned in the "cookie" field of the RX
3950 * descriptor. Instead of storing the virtual address, we
3951 * store the physical address
3953 mvpp2_percpu_write(port
->priv
, cpu
,
3954 MVPP2_BM_VIRT_RLS_REG
, buf_phys_addr
);
3955 mvpp2_percpu_write(port
->priv
, cpu
,
3956 MVPP2_BM_PHY_RLS_REG(pool
), buf_dma_addr
);
3961 /* Refill BM pool */
3962 static void mvpp2_pool_refill(struct mvpp2_port
*port
, int pool
,
3963 dma_addr_t dma_addr
,
3964 phys_addr_t phys_addr
)
3966 mvpp2_bm_pool_put(port
, pool
, dma_addr
, phys_addr
);
3969 /* Allocate buffers for the pool */
3970 static int mvpp2_bm_bufs_add(struct mvpp2_port
*port
,
3971 struct mvpp2_bm_pool
*bm_pool
, int buf_num
)
3973 int i
, buf_size
, total_size
;
3974 dma_addr_t dma_addr
;
3975 phys_addr_t phys_addr
;
3978 buf_size
= MVPP2_RX_BUF_SIZE(bm_pool
->pkt_size
);
3979 total_size
= MVPP2_RX_TOTAL_SIZE(buf_size
);
3982 (buf_num
+ bm_pool
->buf_num
> bm_pool
->size
)) {
3983 netdev_err(port
->dev
,
3984 "cannot allocate %d buffers for pool %d\n",
3985 buf_num
, bm_pool
->id
);
3989 for (i
= 0; i
< buf_num
; i
++) {
3990 buf
= mvpp2_buf_alloc(port
, bm_pool
, &dma_addr
,
3991 &phys_addr
, GFP_KERNEL
);
3995 mvpp2_bm_pool_put(port
, bm_pool
->id
, dma_addr
,
3999 /* Update BM driver with number of buffers added to pool */
4000 bm_pool
->buf_num
+= i
;
4002 netdev_dbg(port
->dev
,
4003 "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
4004 bm_pool
->type
== MVPP2_BM_SWF_SHORT
? "short" : " long",
4005 bm_pool
->id
, bm_pool
->pkt_size
, buf_size
, total_size
);
4007 netdev_dbg(port
->dev
,
4008 "%s pool %d: %d of %d buffers added\n",
4009 bm_pool
->type
== MVPP2_BM_SWF_SHORT
? "short" : " long",
4010 bm_pool
->id
, i
, buf_num
);
4014 /* Notify the driver that BM pool is being used as specific type and return the
4015 * pool pointer on success
4017 static struct mvpp2_bm_pool
*
4018 mvpp2_bm_pool_use(struct mvpp2_port
*port
, int pool
, enum mvpp2_bm_type type
,
4021 struct mvpp2_bm_pool
*new_pool
= &port
->priv
->bm_pools
[pool
];
4024 if (new_pool
->type
!= MVPP2_BM_FREE
&& new_pool
->type
!= type
) {
4025 netdev_err(port
->dev
, "mixing pool types is forbidden\n");
4029 if (new_pool
->type
== MVPP2_BM_FREE
)
4030 new_pool
->type
= type
;
4032 /* Allocate buffers in case BM pool is used as long pool, but packet
4033 * size doesn't match MTU or BM pool hasn't being used yet
4035 if (((type
== MVPP2_BM_SWF_LONG
) && (pkt_size
> new_pool
->pkt_size
)) ||
4036 (new_pool
->pkt_size
== 0)) {
4039 /* Set default buffer number or free all the buffers in case
4040 * the pool is not empty
4042 pkts_num
= new_pool
->buf_num
;
4044 pkts_num
= type
== MVPP2_BM_SWF_LONG
?
4045 MVPP2_BM_LONG_BUF_NUM
:
4046 MVPP2_BM_SHORT_BUF_NUM
;
4048 mvpp2_bm_bufs_free(port
->dev
->dev
.parent
,
4049 port
->priv
, new_pool
);
4051 new_pool
->pkt_size
= pkt_size
;
4052 new_pool
->frag_size
=
4053 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size
)) +
4054 MVPP2_SKB_SHINFO_SIZE
;
4056 /* Allocate buffers for this pool */
4057 num
= mvpp2_bm_bufs_add(port
, new_pool
, pkts_num
);
4058 if (num
!= pkts_num
) {
4059 WARN(1, "pool %d: %d of %d allocated\n",
4060 new_pool
->id
, num
, pkts_num
);
4065 mvpp2_bm_pool_bufsize_set(port
->priv
, new_pool
,
4066 MVPP2_RX_BUF_SIZE(new_pool
->pkt_size
));
4071 /* Initialize pools for swf */
4072 static int mvpp2_swf_bm_pool_init(struct mvpp2_port
*port
)
4076 if (!port
->pool_long
) {
4078 mvpp2_bm_pool_use(port
, MVPP2_BM_SWF_LONG_POOL(port
->id
),
4081 if (!port
->pool_long
)
4084 port
->pool_long
->port_map
|= (1 << port
->id
);
4086 for (rxq
= 0; rxq
< rxq_number
; rxq
++)
4087 mvpp2_rxq_long_pool_set(port
, rxq
, port
->pool_long
->id
);
4090 if (!port
->pool_short
) {
4092 mvpp2_bm_pool_use(port
, MVPP2_BM_SWF_SHORT_POOL
,
4094 MVPP2_BM_SHORT_PKT_SIZE
);
4095 if (!port
->pool_short
)
4098 port
->pool_short
->port_map
|= (1 << port
->id
);
4100 for (rxq
= 0; rxq
< rxq_number
; rxq
++)
4101 mvpp2_rxq_short_pool_set(port
, rxq
,
4102 port
->pool_short
->id
);
4108 static int mvpp2_bm_update_mtu(struct net_device
*dev
, int mtu
)
4110 struct mvpp2_port
*port
= netdev_priv(dev
);
4111 struct mvpp2_bm_pool
*port_pool
= port
->pool_long
;
4112 int num
, pkts_num
= port_pool
->buf_num
;
4113 int pkt_size
= MVPP2_RX_PKT_SIZE(mtu
);
4115 /* Update BM pool with new buffer size */
4116 mvpp2_bm_bufs_free(dev
->dev
.parent
, port
->priv
, port_pool
);
4117 if (port_pool
->buf_num
) {
4118 WARN(1, "cannot free all buffers in pool %d\n", port_pool
->id
);
4122 port_pool
->pkt_size
= pkt_size
;
4123 port_pool
->frag_size
= SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size
)) +
4124 MVPP2_SKB_SHINFO_SIZE
;
4125 num
= mvpp2_bm_bufs_add(port
, port_pool
, pkts_num
);
4126 if (num
!= pkts_num
) {
4127 WARN(1, "pool %d: %d of %d allocated\n",
4128 port_pool
->id
, num
, pkts_num
);
4132 mvpp2_bm_pool_bufsize_set(port
->priv
, port_pool
,
4133 MVPP2_RX_BUF_SIZE(port_pool
->pkt_size
));
4135 netdev_update_features(dev
);
4139 static inline void mvpp2_interrupts_enable(struct mvpp2_port
*port
)
4141 int cpu
, cpu_mask
= 0;
4143 for_each_present_cpu(cpu
)
4144 cpu_mask
|= 1 << cpu
;
4145 mvpp2_write(port
->priv
, MVPP2_ISR_ENABLE_REG(port
->id
),
4146 MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask
));
4149 static inline void mvpp2_interrupts_disable(struct mvpp2_port
*port
)
4151 int cpu
, cpu_mask
= 0;
4153 for_each_present_cpu(cpu
)
4154 cpu_mask
|= 1 << cpu
;
4155 mvpp2_write(port
->priv
, MVPP2_ISR_ENABLE_REG(port
->id
),
4156 MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask
));
4159 /* Mask the current CPU's Rx/Tx interrupts */
4160 static void mvpp2_interrupts_mask(void *arg
)
4162 struct mvpp2_port
*port
= arg
;
4164 mvpp2_percpu_write(port
->priv
, smp_processor_id(),
4165 MVPP2_ISR_RX_TX_MASK_REG(port
->id
), 0);
4168 /* Unmask the current CPU's Rx/Tx interrupts */
4169 static void mvpp2_interrupts_unmask(void *arg
)
4171 struct mvpp2_port
*port
= arg
;
4173 mvpp2_percpu_write(port
->priv
, smp_processor_id(),
4174 MVPP2_ISR_RX_TX_MASK_REG(port
->id
),
4175 (MVPP2_CAUSE_MISC_SUM_MASK
|
4176 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK
));
4179 /* Port configuration routines */
4181 static void mvpp22_port_mii_set(struct mvpp2_port
*port
)
4185 /* Only GOP port 0 has an XLG MAC */
4186 if (port
->gop_id
== 0) {
4187 val
= readl(port
->base
+ MVPP22_XLG_CTRL3_REG
);
4188 val
&= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK
;
4189 val
|= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC
;
4190 writel(val
, port
->base
+ MVPP22_XLG_CTRL3_REG
);
4193 val
= readl(port
->base
+ MVPP22_GMAC_CTRL_4_REG
);
4194 if (port
->phy_interface
== PHY_INTERFACE_MODE_RGMII
)
4195 val
|= MVPP22_CTRL4_EXT_PIN_GMII_SEL
;
4197 val
&= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL
;
4198 val
&= ~MVPP22_CTRL4_DP_CLK_SEL
;
4199 val
|= MVPP22_CTRL4_SYNC_BYPASS
;
4200 val
|= MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE
;
4201 writel(val
, port
->base
+ MVPP22_GMAC_CTRL_4_REG
);
4204 static void mvpp2_port_mii_set(struct mvpp2_port
*port
)
4208 if (port
->priv
->hw_version
== MVPP22
)
4209 mvpp22_port_mii_set(port
);
4211 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
4213 switch (port
->phy_interface
) {
4214 case PHY_INTERFACE_MODE_SGMII
:
4215 val
|= MVPP2_GMAC_INBAND_AN_MASK
;
4217 case PHY_INTERFACE_MODE_RGMII
:
4218 val
|= MVPP2_GMAC_PORT_RGMII_MASK
;
4220 val
&= ~MVPP2_GMAC_PCS_ENABLE_MASK
;
4223 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
4226 static void mvpp2_port_fc_adv_enable(struct mvpp2_port
*port
)
4230 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4231 val
|= MVPP2_GMAC_FC_ADV_EN
;
4232 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4235 static void mvpp2_port_enable(struct mvpp2_port
*port
)
4239 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
4240 val
|= MVPP2_GMAC_PORT_EN_MASK
;
4241 val
|= MVPP2_GMAC_MIB_CNTR_EN_MASK
;
4242 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
4245 static void mvpp2_port_disable(struct mvpp2_port
*port
)
4249 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
4250 val
&= ~(MVPP2_GMAC_PORT_EN_MASK
);
4251 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
4254 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
4255 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port
*port
)
4259 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_1_REG
) &
4260 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK
;
4261 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
4264 /* Configure loopback port */
4265 static void mvpp2_port_loopback_set(struct mvpp2_port
*port
)
4269 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
4271 if (port
->speed
== 1000)
4272 val
|= MVPP2_GMAC_GMII_LB_EN_MASK
;
4274 val
&= ~MVPP2_GMAC_GMII_LB_EN_MASK
;
4276 if (port
->phy_interface
== PHY_INTERFACE_MODE_SGMII
)
4277 val
|= MVPP2_GMAC_PCS_LB_EN_MASK
;
4279 val
&= ~MVPP2_GMAC_PCS_LB_EN_MASK
;
4281 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
4284 static void mvpp2_port_reset(struct mvpp2_port
*port
)
4288 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
) &
4289 ~MVPP2_GMAC_PORT_RESET_MASK
;
4290 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
4292 while (readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
) &
4293 MVPP2_GMAC_PORT_RESET_MASK
)
4297 /* Change maximum receive size of the port */
4298 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port
*port
)
4302 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
4303 val
&= ~MVPP2_GMAC_MAX_RX_SIZE_MASK
;
4304 val
|= (((port
->pkt_size
- MVPP2_MH_SIZE
) / 2) <<
4305 MVPP2_GMAC_MAX_RX_SIZE_OFFS
);
4306 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
4309 /* Set defaults to the MVPP2 port */
4310 static void mvpp2_defaults_set(struct mvpp2_port
*port
)
4312 int tx_port_num
, val
, queue
, ptxq
, lrxq
;
4314 if (port
->priv
->hw_version
== MVPP21
) {
4315 /* Configure port to loopback if needed */
4316 if (port
->flags
& MVPP2_F_LOOPBACK
)
4317 mvpp2_port_loopback_set(port
);
4319 /* Update TX FIFO MIN Threshold */
4320 val
= readl(port
->base
+ MVPP2_GMAC_PORT_FIFO_CFG_1_REG
);
4321 val
&= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK
;
4322 /* Min. TX threshold must be less than minimal packet length */
4323 val
|= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
4324 writel(val
, port
->base
+ MVPP2_GMAC_PORT_FIFO_CFG_1_REG
);
4327 /* Disable Legacy WRR, Disable EJP, Release from reset */
4328 tx_port_num
= mvpp2_egress_port(port
);
4329 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
,
4331 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_CMD_1_REG
, 0);
4333 /* Close bandwidth for all queues */
4334 for (queue
= 0; queue
< MVPP2_MAX_TXQ
; queue
++) {
4335 ptxq
= mvpp2_txq_phys(port
->id
, queue
);
4336 mvpp2_write(port
->priv
,
4337 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq
), 0);
4340 /* Set refill period to 1 usec, refill tokens
4341 * and bucket size to maximum
4343 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PERIOD_REG
,
4344 port
->priv
->tclk
/ USEC_PER_SEC
);
4345 val
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_REFILL_REG
);
4346 val
&= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK
;
4347 val
|= MVPP2_TXP_REFILL_PERIOD_MASK(1);
4348 val
|= MVPP2_TXP_REFILL_TOKENS_ALL_MASK
;
4349 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_REFILL_REG
, val
);
4350 val
= MVPP2_TXP_TOKEN_SIZE_MAX
;
4351 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_TOKEN_SIZE_REG
, val
);
4353 /* Set MaximumLowLatencyPacketSize value to 256 */
4354 mvpp2_write(port
->priv
, MVPP2_RX_CTRL_REG(port
->id
),
4355 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK
|
4356 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
4358 /* Enable Rx cache snoop */
4359 for (lrxq
= 0; lrxq
< rxq_number
; lrxq
++) {
4360 queue
= port
->rxqs
[lrxq
]->id
;
4361 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
));
4362 val
|= MVPP2_SNOOP_PKT_SIZE_MASK
|
4363 MVPP2_SNOOP_BUF_HDR_MASK
;
4364 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
), val
);
4367 /* At default, mask all interrupts to all present cpus */
4368 mvpp2_interrupts_disable(port
);
4371 /* Enable/disable receiving packets */
4372 static void mvpp2_ingress_enable(struct mvpp2_port
*port
)
4377 for (lrxq
= 0; lrxq
< rxq_number
; lrxq
++) {
4378 queue
= port
->rxqs
[lrxq
]->id
;
4379 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
));
4380 val
&= ~MVPP2_RXQ_DISABLE_MASK
;
4381 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
), val
);
4385 static void mvpp2_ingress_disable(struct mvpp2_port
*port
)
4390 for (lrxq
= 0; lrxq
< rxq_number
; lrxq
++) {
4391 queue
= port
->rxqs
[lrxq
]->id
;
4392 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
));
4393 val
|= MVPP2_RXQ_DISABLE_MASK
;
4394 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
), val
);
4398 /* Enable transmit via physical egress queue
4399 * - HW starts take descriptors from DRAM
4401 static void mvpp2_egress_enable(struct mvpp2_port
*port
)
4405 int tx_port_num
= mvpp2_egress_port(port
);
4407 /* Enable all initialized TXs. */
4409 for (queue
= 0; queue
< txq_number
; queue
++) {
4410 struct mvpp2_tx_queue
*txq
= port
->txqs
[queue
];
4413 qmap
|= (1 << queue
);
4416 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
4417 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
, qmap
);
4420 /* Disable transmit via physical egress queue
4421 * - HW doesn't take descriptors from DRAM
4423 static void mvpp2_egress_disable(struct mvpp2_port
*port
)
4427 int tx_port_num
= mvpp2_egress_port(port
);
4429 /* Issue stop command for active channels only */
4430 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
4431 reg_data
= (mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
)) &
4432 MVPP2_TXP_SCHED_ENQ_MASK
;
4434 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
,
4435 (reg_data
<< MVPP2_TXP_SCHED_DISQ_OFFSET
));
4437 /* Wait for all Tx activity to terminate. */
4440 if (delay
>= MVPP2_TX_DISABLE_TIMEOUT_MSEC
) {
4441 netdev_warn(port
->dev
,
4442 "Tx stop timed out, status=0x%08x\n",
4449 /* Check port TX Command register that all
4450 * Tx queues are stopped
4452 reg_data
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
);
4453 } while (reg_data
& MVPP2_TXP_SCHED_ENQ_MASK
);
4456 /* Rx descriptors helper methods */
4458 /* Get number of Rx descriptors occupied by received packets */
4460 mvpp2_rxq_received(struct mvpp2_port
*port
, int rxq_id
)
4462 u32 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_STATUS_REG(rxq_id
));
4464 return val
& MVPP2_RXQ_OCCUPIED_MASK
;
4467 /* Update Rx queue status with the number of occupied and available
4468 * Rx descriptor slots.
4471 mvpp2_rxq_status_update(struct mvpp2_port
*port
, int rxq_id
,
4472 int used_count
, int free_count
)
4474 /* Decrement the number of used descriptors and increment count
4475 * increment the number of free descriptors.
4477 u32 val
= used_count
| (free_count
<< MVPP2_RXQ_NUM_NEW_OFFSET
);
4479 mvpp2_write(port
->priv
, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id
), val
);
4482 /* Get pointer to next RX descriptor to be processed by SW */
4483 static inline struct mvpp2_rx_desc
*
4484 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue
*rxq
)
4486 int rx_desc
= rxq
->next_desc_to_proc
;
4488 rxq
->next_desc_to_proc
= MVPP2_QUEUE_NEXT_DESC(rxq
, rx_desc
);
4489 prefetch(rxq
->descs
+ rxq
->next_desc_to_proc
);
4490 return rxq
->descs
+ rx_desc
;
4493 /* Set rx queue offset */
4494 static void mvpp2_rxq_offset_set(struct mvpp2_port
*port
,
4495 int prxq
, int offset
)
4499 /* Convert offset from bytes to units of 32 bytes */
4500 offset
= offset
>> 5;
4502 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
));
4503 val
&= ~MVPP2_RXQ_PACKET_OFFSET_MASK
;
4506 val
|= ((offset
<< MVPP2_RXQ_PACKET_OFFSET_OFFS
) &
4507 MVPP2_RXQ_PACKET_OFFSET_MASK
);
4509 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
), val
);
4512 /* Tx descriptors helper methods */
4514 /* Get pointer to next Tx descriptor to be processed (send) by HW */
4515 static struct mvpp2_tx_desc
*
4516 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue
*txq
)
4518 int tx_desc
= txq
->next_desc_to_proc
;
4520 txq
->next_desc_to_proc
= MVPP2_QUEUE_NEXT_DESC(txq
, tx_desc
);
4521 return txq
->descs
+ tx_desc
;
4524 /* Update HW with number of aggregated Tx descriptors to be sent */
4525 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port
*port
, int pending
)
4527 /* aggregated access - relevant TXQ number is written in TX desc */
4528 mvpp2_percpu_write(port
->priv
, smp_processor_id(),
4529 MVPP2_AGGR_TXQ_UPDATE_REG
, pending
);
4533 /* Check if there are enough free descriptors in aggregated txq.
4534 * If not, update the number of occupied descriptors and repeat the check.
4536 static int mvpp2_aggr_desc_num_check(struct mvpp2
*priv
,
4537 struct mvpp2_tx_queue
*aggr_txq
, int num
)
4539 if ((aggr_txq
->count
+ num
) > aggr_txq
->size
) {
4540 /* Update number of occupied aggregated Tx descriptors */
4541 int cpu
= smp_processor_id();
4542 u32 val
= mvpp2_read(priv
, MVPP2_AGGR_TXQ_STATUS_REG(cpu
));
4544 aggr_txq
->count
= val
& MVPP2_AGGR_TXQ_PENDING_MASK
;
4547 if ((aggr_txq
->count
+ num
) > aggr_txq
->size
)
4553 /* Reserved Tx descriptors allocation request */
4554 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2
*priv
,
4555 struct mvpp2_tx_queue
*txq
, int num
)
4558 int cpu
= smp_processor_id();
4560 val
= (txq
->id
<< MVPP2_TXQ_RSVD_REQ_Q_OFFSET
) | num
;
4561 mvpp2_percpu_write(priv
, cpu
, MVPP2_TXQ_RSVD_REQ_REG
, val
);
4563 val
= mvpp2_percpu_read(priv
, cpu
, MVPP2_TXQ_RSVD_RSLT_REG
);
4565 return val
& MVPP2_TXQ_RSVD_RSLT_MASK
;
4568 /* Check if there are enough reserved descriptors for transmission.
4569 * If not, request chunk of reserved descriptors and check again.
4571 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2
*priv
,
4572 struct mvpp2_tx_queue
*txq
,
4573 struct mvpp2_txq_pcpu
*txq_pcpu
,
4576 int req
, cpu
, desc_count
;
4578 if (txq_pcpu
->reserved_num
>= num
)
4581 /* Not enough descriptors reserved! Update the reserved descriptor
4582 * count and check again.
4586 /* Compute total of used descriptors */
4587 for_each_present_cpu(cpu
) {
4588 struct mvpp2_txq_pcpu
*txq_pcpu_aux
;
4590 txq_pcpu_aux
= per_cpu_ptr(txq
->pcpu
, cpu
);
4591 desc_count
+= txq_pcpu_aux
->count
;
4592 desc_count
+= txq_pcpu_aux
->reserved_num
;
4595 req
= max(MVPP2_CPU_DESC_CHUNK
, num
- txq_pcpu
->reserved_num
);
4599 (txq
->size
- (num_present_cpus() * MVPP2_CPU_DESC_CHUNK
)))
4602 txq_pcpu
->reserved_num
+= mvpp2_txq_alloc_reserved_desc(priv
, txq
, req
);
4604 /* OK, the descriptor cound has been updated: check again. */
4605 if (txq_pcpu
->reserved_num
< num
)
4610 /* Release the last allocated Tx descriptor. Useful to handle DMA
4611 * mapping failures in the Tx path.
4613 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue
*txq
)
4615 if (txq
->next_desc_to_proc
== 0)
4616 txq
->next_desc_to_proc
= txq
->last_desc
- 1;
4618 txq
->next_desc_to_proc
--;
4621 /* Set Tx descriptors fields relevant for CSUM calculation */
4622 static u32
mvpp2_txq_desc_csum(int l3_offs
, int l3_proto
,
4623 int ip_hdr_len
, int l4_proto
)
4627 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
4628 * G_L4_chk, L4_type required only for checksum calculation
4630 command
= (l3_offs
<< MVPP2_TXD_L3_OFF_SHIFT
);
4631 command
|= (ip_hdr_len
<< MVPP2_TXD_IP_HLEN_SHIFT
);
4632 command
|= MVPP2_TXD_IP_CSUM_DISABLE
;
4634 if (l3_proto
== swab16(ETH_P_IP
)) {
4635 command
&= ~MVPP2_TXD_IP_CSUM_DISABLE
; /* enable IPv4 csum */
4636 command
&= ~MVPP2_TXD_L3_IP6
; /* enable IPv4 */
4638 command
|= MVPP2_TXD_L3_IP6
; /* enable IPv6 */
4641 if (l4_proto
== IPPROTO_TCP
) {
4642 command
&= ~MVPP2_TXD_L4_UDP
; /* enable TCP */
4643 command
&= ~MVPP2_TXD_L4_CSUM_FRAG
; /* generate L4 csum */
4644 } else if (l4_proto
== IPPROTO_UDP
) {
4645 command
|= MVPP2_TXD_L4_UDP
; /* enable UDP */
4646 command
&= ~MVPP2_TXD_L4_CSUM_FRAG
; /* generate L4 csum */
4648 command
|= MVPP2_TXD_L4_CSUM_NOT
;
4654 /* Get number of sent descriptors and decrement counter.
4655 * The number of sent descriptors is returned.
4658 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port
*port
,
4659 struct mvpp2_tx_queue
*txq
)
4663 /* Reading status reg resets transmitted descriptor counter */
4664 val
= mvpp2_percpu_read(port
->priv
, smp_processor_id(),
4665 MVPP2_TXQ_SENT_REG(txq
->id
));
4667 return (val
& MVPP2_TRANSMITTED_COUNT_MASK
) >>
4668 MVPP2_TRANSMITTED_COUNT_OFFSET
;
4671 static void mvpp2_txq_sent_counter_clear(void *arg
)
4673 struct mvpp2_port
*port
= arg
;
4676 for (queue
= 0; queue
< txq_number
; queue
++) {
4677 int id
= port
->txqs
[queue
]->id
;
4679 mvpp2_percpu_read(port
->priv
, smp_processor_id(),
4680 MVPP2_TXQ_SENT_REG(id
));
4684 /* Set max sizes for Tx queues */
4685 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port
*port
)
4688 int txq
, tx_port_num
;
4690 mtu
= port
->pkt_size
* 8;
4691 if (mtu
> MVPP2_TXP_MTU_MAX
)
4692 mtu
= MVPP2_TXP_MTU_MAX
;
4694 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
4697 /* Indirect access to registers */
4698 tx_port_num
= mvpp2_egress_port(port
);
4699 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
4702 val
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_MTU_REG
);
4703 val
&= ~MVPP2_TXP_MTU_MAX
;
4705 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_MTU_REG
, val
);
4707 /* TXP token size and all TXQs token size must be larger that MTU */
4708 val
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_TOKEN_SIZE_REG
);
4709 size
= val
& MVPP2_TXP_TOKEN_SIZE_MAX
;
4712 val
&= ~MVPP2_TXP_TOKEN_SIZE_MAX
;
4714 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_TOKEN_SIZE_REG
, val
);
4717 for (txq
= 0; txq
< txq_number
; txq
++) {
4718 val
= mvpp2_read(port
->priv
,
4719 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq
));
4720 size
= val
& MVPP2_TXQ_TOKEN_SIZE_MAX
;
4724 val
&= ~MVPP2_TXQ_TOKEN_SIZE_MAX
;
4726 mvpp2_write(port
->priv
,
4727 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq
),
4733 /* Set the number of packets that will be received before Rx interrupt
4734 * will be generated by HW.
4736 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port
*port
,
4737 struct mvpp2_rx_queue
*rxq
)
4739 int cpu
= get_cpu();
4741 if (rxq
->pkts_coal
> MVPP2_OCCUPIED_THRESH_MASK
)
4742 rxq
->pkts_coal
= MVPP2_OCCUPIED_THRESH_MASK
;
4744 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_RXQ_NUM_REG
, rxq
->id
);
4745 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_RXQ_THRESH_REG
,
4751 static u32
mvpp2_usec_to_cycles(u32 usec
, unsigned long clk_hz
)
4753 u64 tmp
= (u64
)clk_hz
* usec
;
4755 do_div(tmp
, USEC_PER_SEC
);
4757 return tmp
> U32_MAX
? U32_MAX
: tmp
;
4760 static u32
mvpp2_cycles_to_usec(u32 cycles
, unsigned long clk_hz
)
4762 u64 tmp
= (u64
)cycles
* USEC_PER_SEC
;
4764 do_div(tmp
, clk_hz
);
4766 return tmp
> U32_MAX
? U32_MAX
: tmp
;
4769 /* Set the time delay in usec before Rx interrupt */
4770 static void mvpp2_rx_time_coal_set(struct mvpp2_port
*port
,
4771 struct mvpp2_rx_queue
*rxq
)
4773 unsigned long freq
= port
->priv
->tclk
;
4774 u32 val
= mvpp2_usec_to_cycles(rxq
->time_coal
, freq
);
4776 if (val
> MVPP2_MAX_ISR_RX_THRESHOLD
) {
4778 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD
, freq
);
4780 /* re-evaluate to get actual register value */
4781 val
= mvpp2_usec_to_cycles(rxq
->time_coal
, freq
);
4784 mvpp2_write(port
->priv
, MVPP2_ISR_RX_THRESHOLD_REG(rxq
->id
), val
);
4787 /* Free Tx queue skbuffs */
4788 static void mvpp2_txq_bufs_free(struct mvpp2_port
*port
,
4789 struct mvpp2_tx_queue
*txq
,
4790 struct mvpp2_txq_pcpu
*txq_pcpu
, int num
)
4794 for (i
= 0; i
< num
; i
++) {
4795 struct mvpp2_txq_pcpu_buf
*tx_buf
=
4796 txq_pcpu
->buffs
+ txq_pcpu
->txq_get_index
;
4798 dma_unmap_single(port
->dev
->dev
.parent
, tx_buf
->dma
,
4799 tx_buf
->size
, DMA_TO_DEVICE
);
4801 dev_kfree_skb_any(tx_buf
->skb
);
4803 mvpp2_txq_inc_get(txq_pcpu
);
4807 static inline struct mvpp2_rx_queue
*mvpp2_get_rx_queue(struct mvpp2_port
*port
,
4810 int queue
= fls(cause
) - 1;
4812 return port
->rxqs
[queue
];
4815 static inline struct mvpp2_tx_queue
*mvpp2_get_tx_queue(struct mvpp2_port
*port
,
4818 int queue
= fls(cause
) - 1;
4820 return port
->txqs
[queue
];
4823 /* Handle end of transmission */
4824 static void mvpp2_txq_done(struct mvpp2_port
*port
, struct mvpp2_tx_queue
*txq
,
4825 struct mvpp2_txq_pcpu
*txq_pcpu
)
4827 struct netdev_queue
*nq
= netdev_get_tx_queue(port
->dev
, txq
->log_id
);
4830 if (txq_pcpu
->cpu
!= smp_processor_id())
4831 netdev_err(port
->dev
, "wrong cpu on the end of Tx processing\n");
4833 tx_done
= mvpp2_txq_sent_desc_proc(port
, txq
);
4836 mvpp2_txq_bufs_free(port
, txq
, txq_pcpu
, tx_done
);
4838 txq_pcpu
->count
-= tx_done
;
4840 if (netif_tx_queue_stopped(nq
))
4841 if (txq_pcpu
->size
- txq_pcpu
->count
>= MAX_SKB_FRAGS
+ 1)
4842 netif_tx_wake_queue(nq
);
4845 static unsigned int mvpp2_tx_done(struct mvpp2_port
*port
, u32 cause
)
4847 struct mvpp2_tx_queue
*txq
;
4848 struct mvpp2_txq_pcpu
*txq_pcpu
;
4849 unsigned int tx_todo
= 0;
4852 txq
= mvpp2_get_tx_queue(port
, cause
);
4856 txq_pcpu
= this_cpu_ptr(txq
->pcpu
);
4858 if (txq_pcpu
->count
) {
4859 mvpp2_txq_done(port
, txq
, txq_pcpu
);
4860 tx_todo
+= txq_pcpu
->count
;
4863 cause
&= ~(1 << txq
->log_id
);
4868 /* Rx/Tx queue initialization/cleanup methods */
4870 /* Allocate and initialize descriptors for aggr TXQ */
4871 static int mvpp2_aggr_txq_init(struct platform_device
*pdev
,
4872 struct mvpp2_tx_queue
*aggr_txq
,
4873 int desc_num
, int cpu
,
4878 /* Allocate memory for TX descriptors */
4879 aggr_txq
->descs
= dma_alloc_coherent(&pdev
->dev
,
4880 desc_num
* MVPP2_DESC_ALIGNED_SIZE
,
4881 &aggr_txq
->descs_dma
, GFP_KERNEL
);
4882 if (!aggr_txq
->descs
)
4885 aggr_txq
->last_desc
= aggr_txq
->size
- 1;
4887 /* Aggr TXQ no reset WA */
4888 aggr_txq
->next_desc_to_proc
= mvpp2_read(priv
,
4889 MVPP2_AGGR_TXQ_INDEX_REG(cpu
));
4891 /* Set Tx descriptors queue starting address indirect
4894 if (priv
->hw_version
== MVPP21
)
4895 txq_dma
= aggr_txq
->descs_dma
;
4897 txq_dma
= aggr_txq
->descs_dma
>>
4898 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS
;
4900 mvpp2_write(priv
, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu
), txq_dma
);
4901 mvpp2_write(priv
, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu
), desc_num
);
4906 /* Create a specified Rx queue */
4907 static int mvpp2_rxq_init(struct mvpp2_port
*port
,
4908 struct mvpp2_rx_queue
*rxq
)
4914 rxq
->size
= port
->rx_ring_size
;
4916 /* Allocate memory for RX descriptors */
4917 rxq
->descs
= dma_alloc_coherent(port
->dev
->dev
.parent
,
4918 rxq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
4919 &rxq
->descs_dma
, GFP_KERNEL
);
4923 rxq
->last_desc
= rxq
->size
- 1;
4925 /* Zero occupied and non-occupied counters - direct access */
4926 mvpp2_write(port
->priv
, MVPP2_RXQ_STATUS_REG(rxq
->id
), 0);
4928 /* Set Rx descriptors queue starting address - indirect access */
4930 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_RXQ_NUM_REG
, rxq
->id
);
4931 if (port
->priv
->hw_version
== MVPP21
)
4932 rxq_dma
= rxq
->descs_dma
;
4934 rxq_dma
= rxq
->descs_dma
>> MVPP22_DESC_ADDR_OFFS
;
4935 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_RXQ_DESC_ADDR_REG
, rxq_dma
);
4936 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_RXQ_DESC_SIZE_REG
, rxq
->size
);
4937 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_RXQ_INDEX_REG
, 0);
4941 mvpp2_rxq_offset_set(port
, rxq
->id
, NET_SKB_PAD
);
4943 /* Set coalescing pkts and time */
4944 mvpp2_rx_pkts_coal_set(port
, rxq
);
4945 mvpp2_rx_time_coal_set(port
, rxq
);
4947 /* Add number of descriptors ready for receiving packets */
4948 mvpp2_rxq_status_update(port
, rxq
->id
, 0, rxq
->size
);
4953 /* Push packets received by the RXQ to BM pool */
4954 static void mvpp2_rxq_drop_pkts(struct mvpp2_port
*port
,
4955 struct mvpp2_rx_queue
*rxq
)
4959 rx_received
= mvpp2_rxq_received(port
, rxq
->id
);
4963 for (i
= 0; i
< rx_received
; i
++) {
4964 struct mvpp2_rx_desc
*rx_desc
= mvpp2_rxq_next_desc_get(rxq
);
4965 u32 status
= mvpp2_rxdesc_status_get(port
, rx_desc
);
4968 pool
= (status
& MVPP2_RXD_BM_POOL_ID_MASK
) >>
4969 MVPP2_RXD_BM_POOL_ID_OFFS
;
4971 mvpp2_pool_refill(port
, pool
,
4972 mvpp2_rxdesc_dma_addr_get(port
, rx_desc
),
4973 mvpp2_rxdesc_cookie_get(port
, rx_desc
));
4975 mvpp2_rxq_status_update(port
, rxq
->id
, rx_received
, rx_received
);
4978 /* Cleanup Rx queue */
4979 static void mvpp2_rxq_deinit(struct mvpp2_port
*port
,
4980 struct mvpp2_rx_queue
*rxq
)
4984 mvpp2_rxq_drop_pkts(port
, rxq
);
4987 dma_free_coherent(port
->dev
->dev
.parent
,
4988 rxq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
4994 rxq
->next_desc_to_proc
= 0;
4997 /* Clear Rx descriptors queue starting address and size;
4998 * free descriptor number
5000 mvpp2_write(port
->priv
, MVPP2_RXQ_STATUS_REG(rxq
->id
), 0);
5002 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_RXQ_NUM_REG
, rxq
->id
);
5003 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_RXQ_DESC_ADDR_REG
, 0);
5004 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_RXQ_DESC_SIZE_REG
, 0);
5008 /* Create and initialize a Tx queue */
5009 static int mvpp2_txq_init(struct mvpp2_port
*port
,
5010 struct mvpp2_tx_queue
*txq
)
5013 int cpu
, desc
, desc_per_txq
, tx_port_num
;
5014 struct mvpp2_txq_pcpu
*txq_pcpu
;
5016 txq
->size
= port
->tx_ring_size
;
5018 /* Allocate memory for Tx descriptors */
5019 txq
->descs
= dma_alloc_coherent(port
->dev
->dev
.parent
,
5020 txq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
5021 &txq
->descs_dma
, GFP_KERNEL
);
5025 txq
->last_desc
= txq
->size
- 1;
5027 /* Set Tx descriptors queue starting address - indirect access */
5029 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_NUM_REG
, txq
->id
);
5030 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_DESC_ADDR_REG
,
5032 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_DESC_SIZE_REG
,
5033 txq
->size
& MVPP2_TXQ_DESC_SIZE_MASK
);
5034 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_INDEX_REG
, 0);
5035 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_RSVD_CLR_REG
,
5036 txq
->id
<< MVPP2_TXQ_RSVD_CLR_OFFSET
);
5037 val
= mvpp2_percpu_read(port
->priv
, cpu
, MVPP2_TXQ_PENDING_REG
);
5038 val
&= ~MVPP2_TXQ_PENDING_MASK
;
5039 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_PENDING_REG
, val
);
5041 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
5042 * for each existing TXQ.
5043 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
5044 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
5047 desc
= (port
->id
* MVPP2_MAX_TXQ
* desc_per_txq
) +
5048 (txq
->log_id
* desc_per_txq
);
5050 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_PREF_BUF_REG
,
5051 MVPP2_PREF_BUF_PTR(desc
) | MVPP2_PREF_BUF_SIZE_16
|
5052 MVPP2_PREF_BUF_THRESH(desc_per_txq
/ 2));
5055 /* WRR / EJP configuration - indirect access */
5056 tx_port_num
= mvpp2_egress_port(port
);
5057 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
5059 val
= mvpp2_read(port
->priv
, MVPP2_TXQ_SCHED_REFILL_REG(txq
->log_id
));
5060 val
&= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK
;
5061 val
|= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
5062 val
|= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK
;
5063 mvpp2_write(port
->priv
, MVPP2_TXQ_SCHED_REFILL_REG(txq
->log_id
), val
);
5065 val
= MVPP2_TXQ_TOKEN_SIZE_MAX
;
5066 mvpp2_write(port
->priv
, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq
->log_id
),
5069 for_each_present_cpu(cpu
) {
5070 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
5071 txq_pcpu
->size
= txq
->size
;
5072 txq_pcpu
->buffs
= kmalloc_array(txq_pcpu
->size
,
5073 sizeof(*txq_pcpu
->buffs
),
5075 if (!txq_pcpu
->buffs
)
5078 txq_pcpu
->count
= 0;
5079 txq_pcpu
->reserved_num
= 0;
5080 txq_pcpu
->txq_put_index
= 0;
5081 txq_pcpu
->txq_get_index
= 0;
5086 for_each_present_cpu(cpu
) {
5087 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
5088 kfree(txq_pcpu
->buffs
);
5091 dma_free_coherent(port
->dev
->dev
.parent
,
5092 txq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
5093 txq
->descs
, txq
->descs_dma
);
5098 /* Free allocated TXQ resources */
5099 static void mvpp2_txq_deinit(struct mvpp2_port
*port
,
5100 struct mvpp2_tx_queue
*txq
)
5102 struct mvpp2_txq_pcpu
*txq_pcpu
;
5105 for_each_present_cpu(cpu
) {
5106 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
5107 kfree(txq_pcpu
->buffs
);
5111 dma_free_coherent(port
->dev
->dev
.parent
,
5112 txq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
5113 txq
->descs
, txq
->descs_dma
);
5117 txq
->next_desc_to_proc
= 0;
5120 /* Set minimum bandwidth for disabled TXQs */
5121 mvpp2_write(port
->priv
, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq
->id
), 0);
5123 /* Set Tx descriptors queue starting address and size */
5125 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_NUM_REG
, txq
->id
);
5126 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_DESC_ADDR_REG
, 0);
5127 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_DESC_SIZE_REG
, 0);
5131 /* Cleanup Tx ports */
5132 static void mvpp2_txq_clean(struct mvpp2_port
*port
, struct mvpp2_tx_queue
*txq
)
5134 struct mvpp2_txq_pcpu
*txq_pcpu
;
5135 int delay
, pending
, cpu
;
5139 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_NUM_REG
, txq
->id
);
5140 val
= mvpp2_percpu_read(port
->priv
, cpu
, MVPP2_TXQ_PREF_BUF_REG
);
5141 val
|= MVPP2_TXQ_DRAIN_EN_MASK
;
5142 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_PREF_BUF_REG
, val
);
5144 /* The napi queue has been stopped so wait for all packets
5145 * to be transmitted.
5149 if (delay
>= MVPP2_TX_PENDING_TIMEOUT_MSEC
) {
5150 netdev_warn(port
->dev
,
5151 "port %d: cleaning queue %d timed out\n",
5152 port
->id
, txq
->log_id
);
5158 pending
= mvpp2_percpu_read(port
->priv
, cpu
,
5159 MVPP2_TXQ_PENDING_REG
);
5160 pending
&= MVPP2_TXQ_PENDING_MASK
;
5163 val
&= ~MVPP2_TXQ_DRAIN_EN_MASK
;
5164 mvpp2_percpu_write(port
->priv
, cpu
, MVPP2_TXQ_PREF_BUF_REG
, val
);
5167 for_each_present_cpu(cpu
) {
5168 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
5170 /* Release all packets */
5171 mvpp2_txq_bufs_free(port
, txq
, txq_pcpu
, txq_pcpu
->count
);
5174 txq_pcpu
->count
= 0;
5175 txq_pcpu
->txq_put_index
= 0;
5176 txq_pcpu
->txq_get_index
= 0;
5180 /* Cleanup all Tx queues */
5181 static void mvpp2_cleanup_txqs(struct mvpp2_port
*port
)
5183 struct mvpp2_tx_queue
*txq
;
5187 val
= mvpp2_read(port
->priv
, MVPP2_TX_PORT_FLUSH_REG
);
5189 /* Reset Tx ports and delete Tx queues */
5190 val
|= MVPP2_TX_PORT_FLUSH_MASK(port
->id
);
5191 mvpp2_write(port
->priv
, MVPP2_TX_PORT_FLUSH_REG
, val
);
5193 for (queue
= 0; queue
< txq_number
; queue
++) {
5194 txq
= port
->txqs
[queue
];
5195 mvpp2_txq_clean(port
, txq
);
5196 mvpp2_txq_deinit(port
, txq
);
5199 on_each_cpu(mvpp2_txq_sent_counter_clear
, port
, 1);
5201 val
&= ~MVPP2_TX_PORT_FLUSH_MASK(port
->id
);
5202 mvpp2_write(port
->priv
, MVPP2_TX_PORT_FLUSH_REG
, val
);
5205 /* Cleanup all Rx queues */
5206 static void mvpp2_cleanup_rxqs(struct mvpp2_port
*port
)
5210 for (queue
= 0; queue
< rxq_number
; queue
++)
5211 mvpp2_rxq_deinit(port
, port
->rxqs
[queue
]);
5214 /* Init all Rx queues for port */
5215 static int mvpp2_setup_rxqs(struct mvpp2_port
*port
)
5219 for (queue
= 0; queue
< rxq_number
; queue
++) {
5220 err
= mvpp2_rxq_init(port
, port
->rxqs
[queue
]);
5227 mvpp2_cleanup_rxqs(port
);
5231 /* Init all tx queues for port */
5232 static int mvpp2_setup_txqs(struct mvpp2_port
*port
)
5234 struct mvpp2_tx_queue
*txq
;
5237 for (queue
= 0; queue
< txq_number
; queue
++) {
5238 txq
= port
->txqs
[queue
];
5239 err
= mvpp2_txq_init(port
, txq
);
5244 on_each_cpu(mvpp2_txq_sent_counter_clear
, port
, 1);
5248 mvpp2_cleanup_txqs(port
);
5252 /* The callback for per-port interrupt */
5253 static irqreturn_t
mvpp2_isr(int irq
, void *dev_id
)
5255 struct mvpp2_port
*port
= (struct mvpp2_port
*)dev_id
;
5257 mvpp2_interrupts_disable(port
);
5259 napi_schedule(&port
->napi
);
5265 static void mvpp2_link_event(struct net_device
*dev
)
5267 struct mvpp2_port
*port
= netdev_priv(dev
);
5268 struct phy_device
*phydev
= dev
->phydev
;
5269 int status_change
= 0;
5273 if ((port
->speed
!= phydev
->speed
) ||
5274 (port
->duplex
!= phydev
->duplex
)) {
5277 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
5278 val
&= ~(MVPP2_GMAC_CONFIG_MII_SPEED
|
5279 MVPP2_GMAC_CONFIG_GMII_SPEED
|
5280 MVPP2_GMAC_CONFIG_FULL_DUPLEX
|
5281 MVPP2_GMAC_AN_SPEED_EN
|
5282 MVPP2_GMAC_AN_DUPLEX_EN
);
5285 val
|= MVPP2_GMAC_CONFIG_FULL_DUPLEX
;
5287 if (phydev
->speed
== SPEED_1000
)
5288 val
|= MVPP2_GMAC_CONFIG_GMII_SPEED
;
5289 else if (phydev
->speed
== SPEED_100
)
5290 val
|= MVPP2_GMAC_CONFIG_MII_SPEED
;
5292 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
5294 port
->duplex
= phydev
->duplex
;
5295 port
->speed
= phydev
->speed
;
5299 if (phydev
->link
!= port
->link
) {
5300 if (!phydev
->link
) {
5305 port
->link
= phydev
->link
;
5309 if (status_change
) {
5311 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
5312 val
|= (MVPP2_GMAC_FORCE_LINK_PASS
|
5313 MVPP2_GMAC_FORCE_LINK_DOWN
);
5314 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
5315 mvpp2_egress_enable(port
);
5316 mvpp2_ingress_enable(port
);
5318 mvpp2_ingress_disable(port
);
5319 mvpp2_egress_disable(port
);
5321 phy_print_status(phydev
);
5325 static void mvpp2_timer_set(struct mvpp2_port_pcpu
*port_pcpu
)
5329 if (!port_pcpu
->timer_scheduled
) {
5330 port_pcpu
->timer_scheduled
= true;
5331 interval
= MVPP2_TXDONE_HRTIMER_PERIOD_NS
;
5332 hrtimer_start(&port_pcpu
->tx_done_timer
, interval
,
5333 HRTIMER_MODE_REL_PINNED
);
5337 static void mvpp2_tx_proc_cb(unsigned long data
)
5339 struct net_device
*dev
= (struct net_device
*)data
;
5340 struct mvpp2_port
*port
= netdev_priv(dev
);
5341 struct mvpp2_port_pcpu
*port_pcpu
= this_cpu_ptr(port
->pcpu
);
5342 unsigned int tx_todo
, cause
;
5344 if (!netif_running(dev
))
5346 port_pcpu
->timer_scheduled
= false;
5348 /* Process all the Tx queues */
5349 cause
= (1 << txq_number
) - 1;
5350 tx_todo
= mvpp2_tx_done(port
, cause
);
5352 /* Set the timer in case not all the packets were processed */
5354 mvpp2_timer_set(port_pcpu
);
5357 static enum hrtimer_restart
mvpp2_hr_timer_cb(struct hrtimer
*timer
)
5359 struct mvpp2_port_pcpu
*port_pcpu
= container_of(timer
,
5360 struct mvpp2_port_pcpu
,
5363 tasklet_schedule(&port_pcpu
->tx_done_tasklet
);
5365 return HRTIMER_NORESTART
;
5368 /* Main RX/TX processing routines */
5370 /* Display more error info */
5371 static void mvpp2_rx_error(struct mvpp2_port
*port
,
5372 struct mvpp2_rx_desc
*rx_desc
)
5374 u32 status
= mvpp2_rxdesc_status_get(port
, rx_desc
);
5375 size_t sz
= mvpp2_rxdesc_size_get(port
, rx_desc
);
5377 switch (status
& MVPP2_RXD_ERR_CODE_MASK
) {
5378 case MVPP2_RXD_ERR_CRC
:
5379 netdev_err(port
->dev
, "bad rx status %08x (crc error), size=%zu\n",
5382 case MVPP2_RXD_ERR_OVERRUN
:
5383 netdev_err(port
->dev
, "bad rx status %08x (overrun error), size=%zu\n",
5386 case MVPP2_RXD_ERR_RESOURCE
:
5387 netdev_err(port
->dev
, "bad rx status %08x (resource error), size=%zu\n",
5393 /* Handle RX checksum offload */
5394 static void mvpp2_rx_csum(struct mvpp2_port
*port
, u32 status
,
5395 struct sk_buff
*skb
)
5397 if (((status
& MVPP2_RXD_L3_IP4
) &&
5398 !(status
& MVPP2_RXD_IP4_HEADER_ERR
)) ||
5399 (status
& MVPP2_RXD_L3_IP6
))
5400 if (((status
& MVPP2_RXD_L4_UDP
) ||
5401 (status
& MVPP2_RXD_L4_TCP
)) &&
5402 (status
& MVPP2_RXD_L4_CSUM_OK
)) {
5404 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5408 skb
->ip_summed
= CHECKSUM_NONE
;
5411 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
5412 static int mvpp2_rx_refill(struct mvpp2_port
*port
,
5413 struct mvpp2_bm_pool
*bm_pool
, int pool
)
5415 dma_addr_t dma_addr
;
5416 phys_addr_t phys_addr
;
5419 /* No recycle or too many buffers are in use, so allocate a new skb */
5420 buf
= mvpp2_buf_alloc(port
, bm_pool
, &dma_addr
, &phys_addr
,
5425 mvpp2_pool_refill(port
, pool
, dma_addr
, phys_addr
);
5430 /* Handle tx checksum */
5431 static u32
mvpp2_skb_tx_csum(struct mvpp2_port
*port
, struct sk_buff
*skb
)
5433 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
5437 if (skb
->protocol
== htons(ETH_P_IP
)) {
5438 struct iphdr
*ip4h
= ip_hdr(skb
);
5440 /* Calculate IPv4 checksum and L4 checksum */
5441 ip_hdr_len
= ip4h
->ihl
;
5442 l4_proto
= ip4h
->protocol
;
5443 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
5444 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
5446 /* Read l4_protocol from one of IPv6 extra headers */
5447 if (skb_network_header_len(skb
) > 0)
5448 ip_hdr_len
= (skb_network_header_len(skb
) >> 2);
5449 l4_proto
= ip6h
->nexthdr
;
5451 return MVPP2_TXD_L4_CSUM_NOT
;
5454 return mvpp2_txq_desc_csum(skb_network_offset(skb
),
5455 skb
->protocol
, ip_hdr_len
, l4_proto
);
5458 return MVPP2_TXD_L4_CSUM_NOT
| MVPP2_TXD_IP_CSUM_DISABLE
;
5461 /* Main rx processing */
5462 static int mvpp2_rx(struct mvpp2_port
*port
, int rx_todo
,
5463 struct mvpp2_rx_queue
*rxq
)
5465 struct net_device
*dev
= port
->dev
;
5471 /* Get number of received packets and clamp the to-do */
5472 rx_received
= mvpp2_rxq_received(port
, rxq
->id
);
5473 if (rx_todo
> rx_received
)
5474 rx_todo
= rx_received
;
5476 while (rx_done
< rx_todo
) {
5477 struct mvpp2_rx_desc
*rx_desc
= mvpp2_rxq_next_desc_get(rxq
);
5478 struct mvpp2_bm_pool
*bm_pool
;
5479 struct sk_buff
*skb
;
5480 unsigned int frag_size
;
5481 dma_addr_t dma_addr
;
5482 phys_addr_t phys_addr
;
5484 int pool
, rx_bytes
, err
;
5488 rx_status
= mvpp2_rxdesc_status_get(port
, rx_desc
);
5489 rx_bytes
= mvpp2_rxdesc_size_get(port
, rx_desc
);
5490 rx_bytes
-= MVPP2_MH_SIZE
;
5491 dma_addr
= mvpp2_rxdesc_dma_addr_get(port
, rx_desc
);
5492 phys_addr
= mvpp2_rxdesc_cookie_get(port
, rx_desc
);
5493 data
= (void *)phys_to_virt(phys_addr
);
5495 pool
= (rx_status
& MVPP2_RXD_BM_POOL_ID_MASK
) >>
5496 MVPP2_RXD_BM_POOL_ID_OFFS
;
5497 bm_pool
= &port
->priv
->bm_pools
[pool
];
5499 /* In case of an error, release the requested buffer pointer
5500 * to the Buffer Manager. This request process is controlled
5501 * by the hardware, and the information about the buffer is
5502 * comprised by the RX descriptor.
5504 if (rx_status
& MVPP2_RXD_ERR_SUMMARY
) {
5506 dev
->stats
.rx_errors
++;
5507 mvpp2_rx_error(port
, rx_desc
);
5508 /* Return the buffer to the pool */
5509 mvpp2_pool_refill(port
, pool
, dma_addr
, phys_addr
);
5513 if (bm_pool
->frag_size
> PAGE_SIZE
)
5516 frag_size
= bm_pool
->frag_size
;
5518 skb
= build_skb(data
, frag_size
);
5520 netdev_warn(port
->dev
, "skb build failed\n");
5521 goto err_drop_frame
;
5524 err
= mvpp2_rx_refill(port
, bm_pool
, pool
);
5526 netdev_err(port
->dev
, "failed to refill BM pools\n");
5527 goto err_drop_frame
;
5530 dma_unmap_single(dev
->dev
.parent
, dma_addr
,
5531 bm_pool
->buf_size
, DMA_FROM_DEVICE
);
5534 rcvd_bytes
+= rx_bytes
;
5536 skb_reserve(skb
, MVPP2_MH_SIZE
+ NET_SKB_PAD
);
5537 skb_put(skb
, rx_bytes
);
5538 skb
->protocol
= eth_type_trans(skb
, dev
);
5539 mvpp2_rx_csum(port
, rx_status
, skb
);
5541 napi_gro_receive(&port
->napi
, skb
);
5545 struct mvpp2_pcpu_stats
*stats
= this_cpu_ptr(port
->stats
);
5547 u64_stats_update_begin(&stats
->syncp
);
5548 stats
->rx_packets
+= rcvd_pkts
;
5549 stats
->rx_bytes
+= rcvd_bytes
;
5550 u64_stats_update_end(&stats
->syncp
);
5553 /* Update Rx queue management counters */
5555 mvpp2_rxq_status_update(port
, rxq
->id
, rx_done
, rx_done
);
5561 tx_desc_unmap_put(struct mvpp2_port
*port
, struct mvpp2_tx_queue
*txq
,
5562 struct mvpp2_tx_desc
*desc
)
5564 dma_addr_t buf_dma_addr
=
5565 mvpp2_txdesc_dma_addr_get(port
, desc
);
5567 mvpp2_txdesc_size_get(port
, desc
);
5568 dma_unmap_single(port
->dev
->dev
.parent
, buf_dma_addr
,
5569 buf_sz
, DMA_TO_DEVICE
);
5570 mvpp2_txq_desc_put(txq
);
5573 /* Handle tx fragmentation processing */
5574 static int mvpp2_tx_frag_process(struct mvpp2_port
*port
, struct sk_buff
*skb
,
5575 struct mvpp2_tx_queue
*aggr_txq
,
5576 struct mvpp2_tx_queue
*txq
)
5578 struct mvpp2_txq_pcpu
*txq_pcpu
= this_cpu_ptr(txq
->pcpu
);
5579 struct mvpp2_tx_desc
*tx_desc
;
5581 dma_addr_t buf_dma_addr
;
5583 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
5584 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
5585 void *addr
= page_address(frag
->page
.p
) + frag
->page_offset
;
5587 tx_desc
= mvpp2_txq_next_desc_get(aggr_txq
);
5588 mvpp2_txdesc_txq_set(port
, tx_desc
, txq
->id
);
5589 mvpp2_txdesc_size_set(port
, tx_desc
, frag
->size
);
5591 buf_dma_addr
= dma_map_single(port
->dev
->dev
.parent
, addr
,
5594 if (dma_mapping_error(port
->dev
->dev
.parent
, buf_dma_addr
)) {
5595 mvpp2_txq_desc_put(txq
);
5599 mvpp2_txdesc_offset_set(port
, tx_desc
,
5600 buf_dma_addr
& MVPP2_TX_DESC_ALIGN
);
5601 mvpp2_txdesc_dma_addr_set(port
, tx_desc
,
5602 buf_dma_addr
& ~MVPP2_TX_DESC_ALIGN
);
5604 if (i
== (skb_shinfo(skb
)->nr_frags
- 1)) {
5605 /* Last descriptor */
5606 mvpp2_txdesc_cmd_set(port
, tx_desc
,
5608 mvpp2_txq_inc_put(port
, txq_pcpu
, skb
, tx_desc
);
5610 /* Descriptor in the middle: Not First, Not Last */
5611 mvpp2_txdesc_cmd_set(port
, tx_desc
, 0);
5612 mvpp2_txq_inc_put(port
, txq_pcpu
, NULL
, tx_desc
);
5618 /* Release all descriptors that were used to map fragments of
5619 * this packet, as well as the corresponding DMA mappings
5621 for (i
= i
- 1; i
>= 0; i
--) {
5622 tx_desc
= txq
->descs
+ i
;
5623 tx_desc_unmap_put(port
, txq
, tx_desc
);
5629 /* Main tx processing */
5630 static int mvpp2_tx(struct sk_buff
*skb
, struct net_device
*dev
)
5632 struct mvpp2_port
*port
= netdev_priv(dev
);
5633 struct mvpp2_tx_queue
*txq
, *aggr_txq
;
5634 struct mvpp2_txq_pcpu
*txq_pcpu
;
5635 struct mvpp2_tx_desc
*tx_desc
;
5636 dma_addr_t buf_dma_addr
;
5641 txq_id
= skb_get_queue_mapping(skb
);
5642 txq
= port
->txqs
[txq_id
];
5643 txq_pcpu
= this_cpu_ptr(txq
->pcpu
);
5644 aggr_txq
= &port
->priv
->aggr_txqs
[smp_processor_id()];
5646 frags
= skb_shinfo(skb
)->nr_frags
+ 1;
5648 /* Check number of available descriptors */
5649 if (mvpp2_aggr_desc_num_check(port
->priv
, aggr_txq
, frags
) ||
5650 mvpp2_txq_reserved_desc_num_proc(port
->priv
, txq
,
5656 /* Get a descriptor for the first part of the packet */
5657 tx_desc
= mvpp2_txq_next_desc_get(aggr_txq
);
5658 mvpp2_txdesc_txq_set(port
, tx_desc
, txq
->id
);
5659 mvpp2_txdesc_size_set(port
, tx_desc
, skb_headlen(skb
));
5661 buf_dma_addr
= dma_map_single(dev
->dev
.parent
, skb
->data
,
5662 skb_headlen(skb
), DMA_TO_DEVICE
);
5663 if (unlikely(dma_mapping_error(dev
->dev
.parent
, buf_dma_addr
))) {
5664 mvpp2_txq_desc_put(txq
);
5669 mvpp2_txdesc_offset_set(port
, tx_desc
,
5670 buf_dma_addr
& MVPP2_TX_DESC_ALIGN
);
5671 mvpp2_txdesc_dma_addr_set(port
, tx_desc
,
5672 buf_dma_addr
& ~MVPP2_TX_DESC_ALIGN
);
5674 tx_cmd
= mvpp2_skb_tx_csum(port
, skb
);
5677 /* First and Last descriptor */
5678 tx_cmd
|= MVPP2_TXD_F_DESC
| MVPP2_TXD_L_DESC
;
5679 mvpp2_txdesc_cmd_set(port
, tx_desc
, tx_cmd
);
5680 mvpp2_txq_inc_put(port
, txq_pcpu
, skb
, tx_desc
);
5682 /* First but not Last */
5683 tx_cmd
|= MVPP2_TXD_F_DESC
| MVPP2_TXD_PADDING_DISABLE
;
5684 mvpp2_txdesc_cmd_set(port
, tx_desc
, tx_cmd
);
5685 mvpp2_txq_inc_put(port
, txq_pcpu
, NULL
, tx_desc
);
5687 /* Continue with other skb fragments */
5688 if (mvpp2_tx_frag_process(port
, skb
, aggr_txq
, txq
)) {
5689 tx_desc_unmap_put(port
, txq
, tx_desc
);
5695 txq_pcpu
->reserved_num
-= frags
;
5696 txq_pcpu
->count
+= frags
;
5697 aggr_txq
->count
+= frags
;
5699 /* Enable transmit */
5701 mvpp2_aggr_txq_pend_desc_add(port
, frags
);
5703 if (txq_pcpu
->size
- txq_pcpu
->count
< MAX_SKB_FRAGS
+ 1) {
5704 struct netdev_queue
*nq
= netdev_get_tx_queue(dev
, txq_id
);
5706 netif_tx_stop_queue(nq
);
5710 struct mvpp2_pcpu_stats
*stats
= this_cpu_ptr(port
->stats
);
5712 u64_stats_update_begin(&stats
->syncp
);
5713 stats
->tx_packets
++;
5714 stats
->tx_bytes
+= skb
->len
;
5715 u64_stats_update_end(&stats
->syncp
);
5717 dev
->stats
.tx_dropped
++;
5718 dev_kfree_skb_any(skb
);
5721 /* Finalize TX processing */
5722 if (txq_pcpu
->count
>= txq
->done_pkts_coal
)
5723 mvpp2_txq_done(port
, txq
, txq_pcpu
);
5725 /* Set the timer in case not all frags were processed */
5726 if (txq_pcpu
->count
<= frags
&& txq_pcpu
->count
> 0) {
5727 struct mvpp2_port_pcpu
*port_pcpu
= this_cpu_ptr(port
->pcpu
);
5729 mvpp2_timer_set(port_pcpu
);
5732 return NETDEV_TX_OK
;
5735 static inline void mvpp2_cause_error(struct net_device
*dev
, int cause
)
5737 if (cause
& MVPP2_CAUSE_FCS_ERR_MASK
)
5738 netdev_err(dev
, "FCS error\n");
5739 if (cause
& MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK
)
5740 netdev_err(dev
, "rx fifo overrun error\n");
5741 if (cause
& MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK
)
5742 netdev_err(dev
, "tx fifo underrun error\n");
5745 static int mvpp2_poll(struct napi_struct
*napi
, int budget
)
5747 u32 cause_rx_tx
, cause_rx
, cause_misc
;
5749 struct mvpp2_port
*port
= netdev_priv(napi
->dev
);
5750 int cpu
= smp_processor_id();
5752 /* Rx/Tx cause register
5754 * Bits 0-15: each bit indicates received packets on the Rx queue
5755 * (bit 0 is for Rx queue 0).
5757 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
5758 * (bit 16 is for Tx queue 0).
5760 * Each CPU has its own Rx/Tx cause register
5762 cause_rx_tx
= mvpp2_percpu_read(port
->priv
, cpu
,
5763 MVPP2_ISR_RX_TX_CAUSE_REG(port
->id
));
5764 cause_rx_tx
&= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK
;
5765 cause_misc
= cause_rx_tx
& MVPP2_CAUSE_MISC_SUM_MASK
;
5768 mvpp2_cause_error(port
->dev
, cause_misc
);
5770 /* Clear the cause register */
5771 mvpp2_write(port
->priv
, MVPP2_ISR_MISC_CAUSE_REG
, 0);
5772 mvpp2_percpu_write(port
->priv
, cpu
,
5773 MVPP2_ISR_RX_TX_CAUSE_REG(port
->id
),
5774 cause_rx_tx
& ~MVPP2_CAUSE_MISC_SUM_MASK
);
5777 cause_rx
= cause_rx_tx
& MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK
;
5779 /* Process RX packets */
5780 cause_rx
|= port
->pending_cause_rx
;
5781 while (cause_rx
&& budget
> 0) {
5783 struct mvpp2_rx_queue
*rxq
;
5785 rxq
= mvpp2_get_rx_queue(port
, cause_rx
);
5789 count
= mvpp2_rx(port
, budget
, rxq
);
5793 /* Clear the bit associated to this Rx queue
5794 * so that next iteration will continue from
5795 * the next Rx queue.
5797 cause_rx
&= ~(1 << rxq
->logic_rxq
);
5803 napi_complete_done(napi
, rx_done
);
5805 mvpp2_interrupts_enable(port
);
5807 port
->pending_cause_rx
= cause_rx
;
5811 /* Set hw internals when starting port */
5812 static void mvpp2_start_dev(struct mvpp2_port
*port
)
5814 struct net_device
*ndev
= port
->dev
;
5816 mvpp2_gmac_max_rx_size_set(port
);
5817 mvpp2_txp_max_tx_size_set(port
);
5819 napi_enable(&port
->napi
);
5821 /* Enable interrupts on all CPUs */
5822 mvpp2_interrupts_enable(port
);
5824 mvpp2_port_enable(port
);
5825 phy_start(ndev
->phydev
);
5826 netif_tx_start_all_queues(port
->dev
);
5829 /* Set hw internals when stopping port */
5830 static void mvpp2_stop_dev(struct mvpp2_port
*port
)
5832 struct net_device
*ndev
= port
->dev
;
5834 /* Stop new packets from arriving to RXQs */
5835 mvpp2_ingress_disable(port
);
5839 /* Disable interrupts on all CPUs */
5840 mvpp2_interrupts_disable(port
);
5842 napi_disable(&port
->napi
);
5844 netif_carrier_off(port
->dev
);
5845 netif_tx_stop_all_queues(port
->dev
);
5847 mvpp2_egress_disable(port
);
5848 mvpp2_port_disable(port
);
5849 phy_stop(ndev
->phydev
);
5852 static int mvpp2_check_ringparam_valid(struct net_device
*dev
,
5853 struct ethtool_ringparam
*ring
)
5855 u16 new_rx_pending
= ring
->rx_pending
;
5856 u16 new_tx_pending
= ring
->tx_pending
;
5858 if (ring
->rx_pending
== 0 || ring
->tx_pending
== 0)
5861 if (ring
->rx_pending
> MVPP2_MAX_RXD
)
5862 new_rx_pending
= MVPP2_MAX_RXD
;
5863 else if (!IS_ALIGNED(ring
->rx_pending
, 16))
5864 new_rx_pending
= ALIGN(ring
->rx_pending
, 16);
5866 if (ring
->tx_pending
> MVPP2_MAX_TXD
)
5867 new_tx_pending
= MVPP2_MAX_TXD
;
5868 else if (!IS_ALIGNED(ring
->tx_pending
, 32))
5869 new_tx_pending
= ALIGN(ring
->tx_pending
, 32);
5871 if (ring
->rx_pending
!= new_rx_pending
) {
5872 netdev_info(dev
, "illegal Rx ring size value %d, round to %d\n",
5873 ring
->rx_pending
, new_rx_pending
);
5874 ring
->rx_pending
= new_rx_pending
;
5877 if (ring
->tx_pending
!= new_tx_pending
) {
5878 netdev_info(dev
, "illegal Tx ring size value %d, round to %d\n",
5879 ring
->tx_pending
, new_tx_pending
);
5880 ring
->tx_pending
= new_tx_pending
;
5886 static void mvpp21_get_mac_address(struct mvpp2_port
*port
, unsigned char *addr
)
5888 u32 mac_addr_l
, mac_addr_m
, mac_addr_h
;
5890 mac_addr_l
= readl(port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
5891 mac_addr_m
= readl(port
->priv
->lms_base
+ MVPP2_SRC_ADDR_MIDDLE
);
5892 mac_addr_h
= readl(port
->priv
->lms_base
+ MVPP2_SRC_ADDR_HIGH
);
5893 addr
[0] = (mac_addr_h
>> 24) & 0xFF;
5894 addr
[1] = (mac_addr_h
>> 16) & 0xFF;
5895 addr
[2] = (mac_addr_h
>> 8) & 0xFF;
5896 addr
[3] = mac_addr_h
& 0xFF;
5897 addr
[4] = mac_addr_m
& 0xFF;
5898 addr
[5] = (mac_addr_l
>> MVPP2_GMAC_SA_LOW_OFFS
) & 0xFF;
5901 static int mvpp2_phy_connect(struct mvpp2_port
*port
)
5903 struct phy_device
*phy_dev
;
5905 phy_dev
= of_phy_connect(port
->dev
, port
->phy_node
, mvpp2_link_event
, 0,
5906 port
->phy_interface
);
5908 netdev_err(port
->dev
, "cannot connect to phy\n");
5911 phy_dev
->supported
&= PHY_GBIT_FEATURES
;
5912 phy_dev
->advertising
= phy_dev
->supported
;
5921 static void mvpp2_phy_disconnect(struct mvpp2_port
*port
)
5923 struct net_device
*ndev
= port
->dev
;
5925 phy_disconnect(ndev
->phydev
);
5928 static int mvpp2_open(struct net_device
*dev
)
5930 struct mvpp2_port
*port
= netdev_priv(dev
);
5931 unsigned char mac_bcast
[ETH_ALEN
] = {
5932 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
5935 err
= mvpp2_prs_mac_da_accept(port
->priv
, port
->id
, mac_bcast
, true);
5937 netdev_err(dev
, "mvpp2_prs_mac_da_accept BC failed\n");
5940 err
= mvpp2_prs_mac_da_accept(port
->priv
, port
->id
,
5941 dev
->dev_addr
, true);
5943 netdev_err(dev
, "mvpp2_prs_mac_da_accept MC failed\n");
5946 err
= mvpp2_prs_tag_mode_set(port
->priv
, port
->id
, MVPP2_TAG_TYPE_MH
);
5948 netdev_err(dev
, "mvpp2_prs_tag_mode_set failed\n");
5951 err
= mvpp2_prs_def_flow(port
);
5953 netdev_err(dev
, "mvpp2_prs_def_flow failed\n");
5957 /* Allocate the Rx/Tx queues */
5958 err
= mvpp2_setup_rxqs(port
);
5960 netdev_err(port
->dev
, "cannot allocate Rx queues\n");
5964 err
= mvpp2_setup_txqs(port
);
5966 netdev_err(port
->dev
, "cannot allocate Tx queues\n");
5967 goto err_cleanup_rxqs
;
5970 err
= request_irq(port
->irq
, mvpp2_isr
, 0, dev
->name
, port
);
5972 netdev_err(port
->dev
, "cannot request IRQ %d\n", port
->irq
);
5973 goto err_cleanup_txqs
;
5976 /* In default link is down */
5977 netif_carrier_off(port
->dev
);
5979 err
= mvpp2_phy_connect(port
);
5983 /* Unmask interrupts on all CPUs */
5984 on_each_cpu(mvpp2_interrupts_unmask
, port
, 1);
5986 mvpp2_start_dev(port
);
5991 free_irq(port
->irq
, port
);
5993 mvpp2_cleanup_txqs(port
);
5995 mvpp2_cleanup_rxqs(port
);
5999 static int mvpp2_stop(struct net_device
*dev
)
6001 struct mvpp2_port
*port
= netdev_priv(dev
);
6002 struct mvpp2_port_pcpu
*port_pcpu
;
6005 mvpp2_stop_dev(port
);
6006 mvpp2_phy_disconnect(port
);
6008 /* Mask interrupts on all CPUs */
6009 on_each_cpu(mvpp2_interrupts_mask
, port
, 1);
6011 free_irq(port
->irq
, port
);
6012 for_each_present_cpu(cpu
) {
6013 port_pcpu
= per_cpu_ptr(port
->pcpu
, cpu
);
6015 hrtimer_cancel(&port_pcpu
->tx_done_timer
);
6016 port_pcpu
->timer_scheduled
= false;
6017 tasklet_kill(&port_pcpu
->tx_done_tasklet
);
6019 mvpp2_cleanup_rxqs(port
);
6020 mvpp2_cleanup_txqs(port
);
6025 static void mvpp2_set_rx_mode(struct net_device
*dev
)
6027 struct mvpp2_port
*port
= netdev_priv(dev
);
6028 struct mvpp2
*priv
= port
->priv
;
6029 struct netdev_hw_addr
*ha
;
6031 bool allmulti
= dev
->flags
& IFF_ALLMULTI
;
6033 mvpp2_prs_mac_promisc_set(priv
, id
, dev
->flags
& IFF_PROMISC
);
6034 mvpp2_prs_mac_multi_set(priv
, id
, MVPP2_PE_MAC_MC_ALL
, allmulti
);
6035 mvpp2_prs_mac_multi_set(priv
, id
, MVPP2_PE_MAC_MC_IP6
, allmulti
);
6037 /* Remove all port->id's mcast enries */
6038 mvpp2_prs_mcast_del_all(priv
, id
);
6040 if (allmulti
&& !netdev_mc_empty(dev
)) {
6041 netdev_for_each_mc_addr(ha
, dev
)
6042 mvpp2_prs_mac_da_accept(priv
, id
, ha
->addr
, true);
6046 static int mvpp2_set_mac_address(struct net_device
*dev
, void *p
)
6048 struct mvpp2_port
*port
= netdev_priv(dev
);
6049 const struct sockaddr
*addr
= p
;
6052 if (!is_valid_ether_addr(addr
->sa_data
)) {
6053 err
= -EADDRNOTAVAIL
;
6057 if (!netif_running(dev
)) {
6058 err
= mvpp2_prs_update_mac_da(dev
, addr
->sa_data
);
6061 /* Reconfigure parser to accept the original MAC address */
6062 err
= mvpp2_prs_update_mac_da(dev
, dev
->dev_addr
);
6067 mvpp2_stop_dev(port
);
6069 err
= mvpp2_prs_update_mac_da(dev
, addr
->sa_data
);
6073 /* Reconfigure parser accept the original MAC address */
6074 err
= mvpp2_prs_update_mac_da(dev
, dev
->dev_addr
);
6078 mvpp2_start_dev(port
);
6079 mvpp2_egress_enable(port
);
6080 mvpp2_ingress_enable(port
);
6083 netdev_err(dev
, "failed to change MAC address\n");
6087 static int mvpp2_change_mtu(struct net_device
*dev
, int mtu
)
6089 struct mvpp2_port
*port
= netdev_priv(dev
);
6092 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu
), 8)) {
6093 netdev_info(dev
, "illegal MTU value %d, round to %d\n", mtu
,
6094 ALIGN(MVPP2_RX_PKT_SIZE(mtu
), 8));
6095 mtu
= ALIGN(MVPP2_RX_PKT_SIZE(mtu
), 8);
6098 if (!netif_running(dev
)) {
6099 err
= mvpp2_bm_update_mtu(dev
, mtu
);
6101 port
->pkt_size
= MVPP2_RX_PKT_SIZE(mtu
);
6105 /* Reconfigure BM to the original MTU */
6106 err
= mvpp2_bm_update_mtu(dev
, dev
->mtu
);
6111 mvpp2_stop_dev(port
);
6113 err
= mvpp2_bm_update_mtu(dev
, mtu
);
6115 port
->pkt_size
= MVPP2_RX_PKT_SIZE(mtu
);
6119 /* Reconfigure BM to the original MTU */
6120 err
= mvpp2_bm_update_mtu(dev
, dev
->mtu
);
6125 mvpp2_start_dev(port
);
6126 mvpp2_egress_enable(port
);
6127 mvpp2_ingress_enable(port
);
6131 netdev_err(dev
, "failed to change MTU\n");
6136 mvpp2_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
6138 struct mvpp2_port
*port
= netdev_priv(dev
);
6142 for_each_possible_cpu(cpu
) {
6143 struct mvpp2_pcpu_stats
*cpu_stats
;
6149 cpu_stats
= per_cpu_ptr(port
->stats
, cpu
);
6151 start
= u64_stats_fetch_begin_irq(&cpu_stats
->syncp
);
6152 rx_packets
= cpu_stats
->rx_packets
;
6153 rx_bytes
= cpu_stats
->rx_bytes
;
6154 tx_packets
= cpu_stats
->tx_packets
;
6155 tx_bytes
= cpu_stats
->tx_bytes
;
6156 } while (u64_stats_fetch_retry_irq(&cpu_stats
->syncp
, start
));
6158 stats
->rx_packets
+= rx_packets
;
6159 stats
->rx_bytes
+= rx_bytes
;
6160 stats
->tx_packets
+= tx_packets
;
6161 stats
->tx_bytes
+= tx_bytes
;
6164 stats
->rx_errors
= dev
->stats
.rx_errors
;
6165 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
6166 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
6169 static int mvpp2_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
6176 ret
= phy_mii_ioctl(dev
->phydev
, ifr
, cmd
);
6178 mvpp2_link_event(dev
);
6183 /* Ethtool methods */
6185 /* Set interrupt coalescing for ethtools */
6186 static int mvpp2_ethtool_set_coalesce(struct net_device
*dev
,
6187 struct ethtool_coalesce
*c
)
6189 struct mvpp2_port
*port
= netdev_priv(dev
);
6192 for (queue
= 0; queue
< rxq_number
; queue
++) {
6193 struct mvpp2_rx_queue
*rxq
= port
->rxqs
[queue
];
6195 rxq
->time_coal
= c
->rx_coalesce_usecs
;
6196 rxq
->pkts_coal
= c
->rx_max_coalesced_frames
;
6197 mvpp2_rx_pkts_coal_set(port
, rxq
);
6198 mvpp2_rx_time_coal_set(port
, rxq
);
6201 for (queue
= 0; queue
< txq_number
; queue
++) {
6202 struct mvpp2_tx_queue
*txq
= port
->txqs
[queue
];
6204 txq
->done_pkts_coal
= c
->tx_max_coalesced_frames
;
6210 /* get coalescing for ethtools */
6211 static int mvpp2_ethtool_get_coalesce(struct net_device
*dev
,
6212 struct ethtool_coalesce
*c
)
6214 struct mvpp2_port
*port
= netdev_priv(dev
);
6216 c
->rx_coalesce_usecs
= port
->rxqs
[0]->time_coal
;
6217 c
->rx_max_coalesced_frames
= port
->rxqs
[0]->pkts_coal
;
6218 c
->tx_max_coalesced_frames
= port
->txqs
[0]->done_pkts_coal
;
6222 static void mvpp2_ethtool_get_drvinfo(struct net_device
*dev
,
6223 struct ethtool_drvinfo
*drvinfo
)
6225 strlcpy(drvinfo
->driver
, MVPP2_DRIVER_NAME
,
6226 sizeof(drvinfo
->driver
));
6227 strlcpy(drvinfo
->version
, MVPP2_DRIVER_VERSION
,
6228 sizeof(drvinfo
->version
));
6229 strlcpy(drvinfo
->bus_info
, dev_name(&dev
->dev
),
6230 sizeof(drvinfo
->bus_info
));
6233 static void mvpp2_ethtool_get_ringparam(struct net_device
*dev
,
6234 struct ethtool_ringparam
*ring
)
6236 struct mvpp2_port
*port
= netdev_priv(dev
);
6238 ring
->rx_max_pending
= MVPP2_MAX_RXD
;
6239 ring
->tx_max_pending
= MVPP2_MAX_TXD
;
6240 ring
->rx_pending
= port
->rx_ring_size
;
6241 ring
->tx_pending
= port
->tx_ring_size
;
6244 static int mvpp2_ethtool_set_ringparam(struct net_device
*dev
,
6245 struct ethtool_ringparam
*ring
)
6247 struct mvpp2_port
*port
= netdev_priv(dev
);
6248 u16 prev_rx_ring_size
= port
->rx_ring_size
;
6249 u16 prev_tx_ring_size
= port
->tx_ring_size
;
6252 err
= mvpp2_check_ringparam_valid(dev
, ring
);
6256 if (!netif_running(dev
)) {
6257 port
->rx_ring_size
= ring
->rx_pending
;
6258 port
->tx_ring_size
= ring
->tx_pending
;
6262 /* The interface is running, so we have to force a
6263 * reallocation of the queues
6265 mvpp2_stop_dev(port
);
6266 mvpp2_cleanup_rxqs(port
);
6267 mvpp2_cleanup_txqs(port
);
6269 port
->rx_ring_size
= ring
->rx_pending
;
6270 port
->tx_ring_size
= ring
->tx_pending
;
6272 err
= mvpp2_setup_rxqs(port
);
6274 /* Reallocate Rx queues with the original ring size */
6275 port
->rx_ring_size
= prev_rx_ring_size
;
6276 ring
->rx_pending
= prev_rx_ring_size
;
6277 err
= mvpp2_setup_rxqs(port
);
6281 err
= mvpp2_setup_txqs(port
);
6283 /* Reallocate Tx queues with the original ring size */
6284 port
->tx_ring_size
= prev_tx_ring_size
;
6285 ring
->tx_pending
= prev_tx_ring_size
;
6286 err
= mvpp2_setup_txqs(port
);
6288 goto err_clean_rxqs
;
6291 mvpp2_start_dev(port
);
6292 mvpp2_egress_enable(port
);
6293 mvpp2_ingress_enable(port
);
6298 mvpp2_cleanup_rxqs(port
);
6300 netdev_err(dev
, "failed to change ring parameters");
6306 static const struct net_device_ops mvpp2_netdev_ops
= {
6307 .ndo_open
= mvpp2_open
,
6308 .ndo_stop
= mvpp2_stop
,
6309 .ndo_start_xmit
= mvpp2_tx
,
6310 .ndo_set_rx_mode
= mvpp2_set_rx_mode
,
6311 .ndo_set_mac_address
= mvpp2_set_mac_address
,
6312 .ndo_change_mtu
= mvpp2_change_mtu
,
6313 .ndo_get_stats64
= mvpp2_get_stats64
,
6314 .ndo_do_ioctl
= mvpp2_ioctl
,
6317 static const struct ethtool_ops mvpp2_eth_tool_ops
= {
6318 .nway_reset
= phy_ethtool_nway_reset
,
6319 .get_link
= ethtool_op_get_link
,
6320 .set_coalesce
= mvpp2_ethtool_set_coalesce
,
6321 .get_coalesce
= mvpp2_ethtool_get_coalesce
,
6322 .get_drvinfo
= mvpp2_ethtool_get_drvinfo
,
6323 .get_ringparam
= mvpp2_ethtool_get_ringparam
,
6324 .set_ringparam
= mvpp2_ethtool_set_ringparam
,
6325 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
6326 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
6329 /* Initialize port HW */
6330 static int mvpp2_port_init(struct mvpp2_port
*port
)
6332 struct device
*dev
= port
->dev
->dev
.parent
;
6333 struct mvpp2
*priv
= port
->priv
;
6334 struct mvpp2_txq_pcpu
*txq_pcpu
;
6335 int queue
, cpu
, err
;
6337 if (port
->first_rxq
+ rxq_number
>
6338 MVPP2_MAX_PORTS
* priv
->max_port_rxqs
)
6342 mvpp2_egress_disable(port
);
6343 mvpp2_port_disable(port
);
6345 port
->txqs
= devm_kcalloc(dev
, txq_number
, sizeof(*port
->txqs
),
6350 /* Associate physical Tx queues to this port and initialize.
6351 * The mapping is predefined.
6353 for (queue
= 0; queue
< txq_number
; queue
++) {
6354 int queue_phy_id
= mvpp2_txq_phys(port
->id
, queue
);
6355 struct mvpp2_tx_queue
*txq
;
6357 txq
= devm_kzalloc(dev
, sizeof(*txq
), GFP_KERNEL
);
6360 goto err_free_percpu
;
6363 txq
->pcpu
= alloc_percpu(struct mvpp2_txq_pcpu
);
6366 goto err_free_percpu
;
6369 txq
->id
= queue_phy_id
;
6370 txq
->log_id
= queue
;
6371 txq
->done_pkts_coal
= MVPP2_TXDONE_COAL_PKTS_THRESH
;
6372 for_each_present_cpu(cpu
) {
6373 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
6374 txq_pcpu
->cpu
= cpu
;
6377 port
->txqs
[queue
] = txq
;
6380 port
->rxqs
= devm_kcalloc(dev
, rxq_number
, sizeof(*port
->rxqs
),
6384 goto err_free_percpu
;
6387 /* Allocate and initialize Rx queue for this port */
6388 for (queue
= 0; queue
< rxq_number
; queue
++) {
6389 struct mvpp2_rx_queue
*rxq
;
6391 /* Map physical Rx queue to port's logical Rx queue */
6392 rxq
= devm_kzalloc(dev
, sizeof(*rxq
), GFP_KERNEL
);
6395 goto err_free_percpu
;
6397 /* Map this Rx queue to a physical queue */
6398 rxq
->id
= port
->first_rxq
+ queue
;
6399 rxq
->port
= port
->id
;
6400 rxq
->logic_rxq
= queue
;
6402 port
->rxqs
[queue
] = rxq
;
6405 /* Configure Rx queue group interrupt for this port */
6406 if (priv
->hw_version
== MVPP21
) {
6407 mvpp2_write(priv
, MVPP21_ISR_RXQ_GROUP_REG(port
->id
),
6412 val
= (port
->id
<< MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET
);
6413 mvpp2_write(priv
, MVPP22_ISR_RXQ_GROUP_INDEX_REG
, val
);
6415 val
= (rxq_number
<< MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET
);
6416 mvpp2_write(priv
, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG
, val
);
6419 /* Create Rx descriptor rings */
6420 for (queue
= 0; queue
< rxq_number
; queue
++) {
6421 struct mvpp2_rx_queue
*rxq
= port
->rxqs
[queue
];
6423 rxq
->size
= port
->rx_ring_size
;
6424 rxq
->pkts_coal
= MVPP2_RX_COAL_PKTS
;
6425 rxq
->time_coal
= MVPP2_RX_COAL_USEC
;
6428 mvpp2_ingress_disable(port
);
6430 /* Port default configuration */
6431 mvpp2_defaults_set(port
);
6433 /* Port's classifier configuration */
6434 mvpp2_cls_oversize_rxq_set(port
);
6435 mvpp2_cls_port_config(port
);
6437 /* Provide an initial Rx packet size */
6438 port
->pkt_size
= MVPP2_RX_PKT_SIZE(port
->dev
->mtu
);
6440 /* Initialize pools for swf */
6441 err
= mvpp2_swf_bm_pool_init(port
);
6443 goto err_free_percpu
;
6448 for (queue
= 0; queue
< txq_number
; queue
++) {
6449 if (!port
->txqs
[queue
])
6451 free_percpu(port
->txqs
[queue
]->pcpu
);
6456 /* Ports initialization */
6457 static int mvpp2_port_probe(struct platform_device
*pdev
,
6458 struct device_node
*port_node
,
6461 struct device_node
*phy_node
;
6462 struct mvpp2_port
*port
;
6463 struct mvpp2_port_pcpu
*port_pcpu
;
6464 struct net_device
*dev
;
6465 struct resource
*res
;
6466 const char *dt_mac_addr
;
6467 const char *mac_from
;
6468 char hw_mac_addr
[ETH_ALEN
];
6474 dev
= alloc_etherdev_mqs(sizeof(*port
), txq_number
, rxq_number
);
6478 phy_node
= of_parse_phandle(port_node
, "phy", 0);
6480 dev_err(&pdev
->dev
, "missing phy\n");
6482 goto err_free_netdev
;
6485 phy_mode
= of_get_phy_mode(port_node
);
6487 dev_err(&pdev
->dev
, "incorrect phy mode\n");
6489 goto err_free_netdev
;
6492 if (of_property_read_u32(port_node
, "port-id", &id
)) {
6494 dev_err(&pdev
->dev
, "missing port-id value\n");
6495 goto err_free_netdev
;
6498 dev
->tx_queue_len
= MVPP2_MAX_TXD
;
6499 dev
->watchdog_timeo
= 5 * HZ
;
6500 dev
->netdev_ops
= &mvpp2_netdev_ops
;
6501 dev
->ethtool_ops
= &mvpp2_eth_tool_ops
;
6503 port
= netdev_priv(dev
);
6505 port
->irq
= irq_of_parse_and_map(port_node
, 0);
6506 if (port
->irq
<= 0) {
6508 goto err_free_netdev
;
6511 if (of_property_read_bool(port_node
, "marvell,loopback"))
6512 port
->flags
|= MVPP2_F_LOOPBACK
;
6516 if (priv
->hw_version
== MVPP21
)
6517 port
->first_rxq
= port
->id
* rxq_number
;
6519 port
->first_rxq
= port
->id
* priv
->max_port_rxqs
;
6521 port
->phy_node
= phy_node
;
6522 port
->phy_interface
= phy_mode
;
6524 if (priv
->hw_version
== MVPP21
) {
6525 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 2 + id
);
6526 port
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
6527 if (IS_ERR(port
->base
)) {
6528 err
= PTR_ERR(port
->base
);
6532 if (of_property_read_u32(port_node
, "gop-port-id",
6535 dev_err(&pdev
->dev
, "missing gop-port-id value\n");
6539 port
->base
= priv
->iface_base
+ MVPP22_GMAC_BASE(port
->gop_id
);
6542 /* Alloc per-cpu stats */
6543 port
->stats
= netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats
);
6549 dt_mac_addr
= of_get_mac_address(port_node
);
6550 if (dt_mac_addr
&& is_valid_ether_addr(dt_mac_addr
)) {
6551 mac_from
= "device tree";
6552 ether_addr_copy(dev
->dev_addr
, dt_mac_addr
);
6554 if (priv
->hw_version
== MVPP21
)
6555 mvpp21_get_mac_address(port
, hw_mac_addr
);
6556 if (is_valid_ether_addr(hw_mac_addr
)) {
6557 mac_from
= "hardware";
6558 ether_addr_copy(dev
->dev_addr
, hw_mac_addr
);
6560 mac_from
= "random";
6561 eth_hw_addr_random(dev
);
6565 port
->tx_ring_size
= MVPP2_MAX_TXD
;
6566 port
->rx_ring_size
= MVPP2_MAX_RXD
;
6568 SET_NETDEV_DEV(dev
, &pdev
->dev
);
6570 err
= mvpp2_port_init(port
);
6572 dev_err(&pdev
->dev
, "failed to init port %d\n", id
);
6573 goto err_free_stats
;
6576 mvpp2_port_mii_set(port
);
6577 mvpp2_port_periodic_xon_disable(port
);
6579 if (priv
->hw_version
== MVPP21
)
6580 mvpp2_port_fc_adv_enable(port
);
6582 mvpp2_port_reset(port
);
6584 port
->pcpu
= alloc_percpu(struct mvpp2_port_pcpu
);
6587 goto err_free_txq_pcpu
;
6590 for_each_present_cpu(cpu
) {
6591 port_pcpu
= per_cpu_ptr(port
->pcpu
, cpu
);
6593 hrtimer_init(&port_pcpu
->tx_done_timer
, CLOCK_MONOTONIC
,
6594 HRTIMER_MODE_REL_PINNED
);
6595 port_pcpu
->tx_done_timer
.function
= mvpp2_hr_timer_cb
;
6596 port_pcpu
->timer_scheduled
= false;
6598 tasklet_init(&port_pcpu
->tx_done_tasklet
, mvpp2_tx_proc_cb
,
6599 (unsigned long)dev
);
6602 netif_napi_add(dev
, &port
->napi
, mvpp2_poll
, NAPI_POLL_WEIGHT
);
6603 features
= NETIF_F_SG
| NETIF_F_IP_CSUM
;
6604 dev
->features
= features
| NETIF_F_RXCSUM
;
6605 dev
->hw_features
|= features
| NETIF_F_RXCSUM
| NETIF_F_GRO
;
6606 dev
->vlan_features
|= features
;
6608 /* MTU range: 68 - 9676 */
6609 dev
->min_mtu
= ETH_MIN_MTU
;
6610 /* 9676 == 9700 - 20 and rounding to 8 */
6611 dev
->max_mtu
= 9676;
6613 err
= register_netdev(dev
);
6615 dev_err(&pdev
->dev
, "failed to register netdev\n");
6616 goto err_free_port_pcpu
;
6618 netdev_info(dev
, "Using %s mac address %pM\n", mac_from
, dev
->dev_addr
);
6620 priv
->port_list
[id
] = port
;
6624 free_percpu(port
->pcpu
);
6626 for (i
= 0; i
< txq_number
; i
++)
6627 free_percpu(port
->txqs
[i
]->pcpu
);
6629 free_percpu(port
->stats
);
6631 irq_dispose_mapping(port
->irq
);
6633 of_node_put(phy_node
);
6638 /* Ports removal routine */
6639 static void mvpp2_port_remove(struct mvpp2_port
*port
)
6643 unregister_netdev(port
->dev
);
6644 of_node_put(port
->phy_node
);
6645 free_percpu(port
->pcpu
);
6646 free_percpu(port
->stats
);
6647 for (i
= 0; i
< txq_number
; i
++)
6648 free_percpu(port
->txqs
[i
]->pcpu
);
6649 irq_dispose_mapping(port
->irq
);
6650 free_netdev(port
->dev
);
6653 /* Initialize decoding windows */
6654 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info
*dram
,
6660 for (i
= 0; i
< 6; i
++) {
6661 mvpp2_write(priv
, MVPP2_WIN_BASE(i
), 0);
6662 mvpp2_write(priv
, MVPP2_WIN_SIZE(i
), 0);
6665 mvpp2_write(priv
, MVPP2_WIN_REMAP(i
), 0);
6670 for (i
= 0; i
< dram
->num_cs
; i
++) {
6671 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
6673 mvpp2_write(priv
, MVPP2_WIN_BASE(i
),
6674 (cs
->base
& 0xffff0000) | (cs
->mbus_attr
<< 8) |
6675 dram
->mbus_dram_target_id
);
6677 mvpp2_write(priv
, MVPP2_WIN_SIZE(i
),
6678 (cs
->size
- 1) & 0xffff0000);
6680 win_enable
|= (1 << i
);
6683 mvpp2_write(priv
, MVPP2_BASE_ADDR_ENABLE
, win_enable
);
6686 /* Initialize Rx FIFO's */
6687 static void mvpp2_rx_fifo_init(struct mvpp2
*priv
)
6691 for (port
= 0; port
< MVPP2_MAX_PORTS
; port
++) {
6692 mvpp2_write(priv
, MVPP2_RX_DATA_FIFO_SIZE_REG(port
),
6693 MVPP2_RX_FIFO_PORT_DATA_SIZE
);
6694 mvpp2_write(priv
, MVPP2_RX_ATTR_FIFO_SIZE_REG(port
),
6695 MVPP2_RX_FIFO_PORT_ATTR_SIZE
);
6698 mvpp2_write(priv
, MVPP2_RX_MIN_PKT_SIZE_REG
,
6699 MVPP2_RX_FIFO_PORT_MIN_PKT
);
6700 mvpp2_write(priv
, MVPP2_RX_FIFO_INIT_REG
, 0x1);
6703 static void mvpp2_axi_init(struct mvpp2
*priv
)
6705 u32 val
, rdval
, wrval
;
6707 mvpp2_write(priv
, MVPP22_BM_ADDR_HIGH_RLS_REG
, 0x0);
6709 /* AXI Bridge Configuration */
6711 rdval
= MVPP22_AXI_CODE_CACHE_RD_CACHE
6712 << MVPP22_AXI_ATTR_CACHE_OFFS
;
6713 rdval
|= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6714 << MVPP22_AXI_ATTR_DOMAIN_OFFS
;
6716 wrval
= MVPP22_AXI_CODE_CACHE_WR_CACHE
6717 << MVPP22_AXI_ATTR_CACHE_OFFS
;
6718 wrval
|= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6719 << MVPP22_AXI_ATTR_DOMAIN_OFFS
;
6722 mvpp2_write(priv
, MVPP22_AXI_BM_WR_ATTR_REG
, wrval
);
6723 mvpp2_write(priv
, MVPP22_AXI_BM_RD_ATTR_REG
, rdval
);
6726 mvpp2_write(priv
, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG
, rdval
);
6727 mvpp2_write(priv
, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG
, wrval
);
6728 mvpp2_write(priv
, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG
, rdval
);
6729 mvpp2_write(priv
, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG
, wrval
);
6732 mvpp2_write(priv
, MVPP22_AXI_TX_DATA_RD_ATTR_REG
, rdval
);
6733 mvpp2_write(priv
, MVPP22_AXI_RX_DATA_WR_ATTR_REG
, wrval
);
6735 val
= MVPP22_AXI_CODE_CACHE_NON_CACHE
6736 << MVPP22_AXI_CODE_CACHE_OFFS
;
6737 val
|= MVPP22_AXI_CODE_DOMAIN_SYSTEM
6738 << MVPP22_AXI_CODE_DOMAIN_OFFS
;
6739 mvpp2_write(priv
, MVPP22_AXI_RD_NORMAL_CODE_REG
, val
);
6740 mvpp2_write(priv
, MVPP22_AXI_WR_NORMAL_CODE_REG
, val
);
6742 val
= MVPP22_AXI_CODE_CACHE_RD_CACHE
6743 << MVPP22_AXI_CODE_CACHE_OFFS
;
6744 val
|= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6745 << MVPP22_AXI_CODE_DOMAIN_OFFS
;
6747 mvpp2_write(priv
, MVPP22_AXI_RD_SNOOP_CODE_REG
, val
);
6749 val
= MVPP22_AXI_CODE_CACHE_WR_CACHE
6750 << MVPP22_AXI_CODE_CACHE_OFFS
;
6751 val
|= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6752 << MVPP22_AXI_CODE_DOMAIN_OFFS
;
6754 mvpp2_write(priv
, MVPP22_AXI_WR_SNOOP_CODE_REG
, val
);
6757 /* Initialize network controller common part HW */
6758 static int mvpp2_init(struct platform_device
*pdev
, struct mvpp2
*priv
)
6760 const struct mbus_dram_target_info
*dram_target_info
;
6764 /* Checks for hardware constraints */
6765 if (rxq_number
% 4 || (rxq_number
> priv
->max_port_rxqs
) ||
6766 (txq_number
> MVPP2_MAX_TXQ
)) {
6767 dev_err(&pdev
->dev
, "invalid queue size parameter\n");
6771 /* MBUS windows configuration */
6772 dram_target_info
= mv_mbus_dram_info();
6773 if (dram_target_info
)
6774 mvpp2_conf_mbus_windows(dram_target_info
, priv
);
6776 if (priv
->hw_version
== MVPP22
)
6777 mvpp2_axi_init(priv
);
6779 /* Disable HW PHY polling */
6780 if (priv
->hw_version
== MVPP21
) {
6781 val
= readl(priv
->lms_base
+ MVPP2_PHY_AN_CFG0_REG
);
6782 val
|= MVPP2_PHY_AN_STOP_SMI0_MASK
;
6783 writel(val
, priv
->lms_base
+ MVPP2_PHY_AN_CFG0_REG
);
6785 val
= readl(priv
->iface_base
+ MVPP22_SMI_MISC_CFG_REG
);
6786 val
&= ~MVPP22_SMI_POLLING_EN
;
6787 writel(val
, priv
->iface_base
+ MVPP22_SMI_MISC_CFG_REG
);
6790 /* Allocate and initialize aggregated TXQs */
6791 priv
->aggr_txqs
= devm_kcalloc(&pdev
->dev
, num_present_cpus(),
6792 sizeof(*priv
->aggr_txqs
),
6794 if (!priv
->aggr_txqs
)
6797 for_each_present_cpu(i
) {
6798 priv
->aggr_txqs
[i
].id
= i
;
6799 priv
->aggr_txqs
[i
].size
= MVPP2_AGGR_TXQ_SIZE
;
6800 err
= mvpp2_aggr_txq_init(pdev
, &priv
->aggr_txqs
[i
],
6801 MVPP2_AGGR_TXQ_SIZE
, i
, priv
);
6807 mvpp2_rx_fifo_init(priv
);
6809 /* Reset Rx queue group interrupt configuration */
6810 for (i
= 0; i
< MVPP2_MAX_PORTS
; i
++) {
6811 if (priv
->hw_version
== MVPP21
) {
6812 mvpp2_write(priv
, MVPP21_ISR_RXQ_GROUP_REG(i
),
6818 val
= (i
<< MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET
);
6819 mvpp2_write(priv
, MVPP22_ISR_RXQ_GROUP_INDEX_REG
, val
);
6821 val
= (rxq_number
<< MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET
);
6822 mvpp2_write(priv
, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG
, val
);
6826 if (priv
->hw_version
== MVPP21
)
6827 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT
,
6828 priv
->lms_base
+ MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG
);
6830 /* Allow cache snoop when transmiting packets */
6831 mvpp2_write(priv
, MVPP2_TX_SNOOP_REG
, 0x1);
6833 /* Buffer Manager initialization */
6834 err
= mvpp2_bm_init(pdev
, priv
);
6838 /* Parser default initialization */
6839 err
= mvpp2_prs_default_init(pdev
, priv
);
6843 /* Classifier default initialization */
6844 mvpp2_cls_init(priv
);
6849 static int mvpp2_probe(struct platform_device
*pdev
)
6851 struct device_node
*dn
= pdev
->dev
.of_node
;
6852 struct device_node
*port_node
;
6854 struct resource
*res
;
6856 int port_count
, cpu
;
6859 priv
= devm_kzalloc(&pdev
->dev
, sizeof(*priv
), GFP_KERNEL
);
6864 (unsigned long)of_device_get_match_data(&pdev
->dev
);
6866 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
6867 base
= devm_ioremap_resource(&pdev
->dev
, res
);
6869 return PTR_ERR(base
);
6871 if (priv
->hw_version
== MVPP21
) {
6872 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
6873 priv
->lms_base
= devm_ioremap_resource(&pdev
->dev
, res
);
6874 if (IS_ERR(priv
->lms_base
))
6875 return PTR_ERR(priv
->lms_base
);
6877 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
6878 priv
->iface_base
= devm_ioremap_resource(&pdev
->dev
, res
);
6879 if (IS_ERR(priv
->iface_base
))
6880 return PTR_ERR(priv
->iface_base
);
6883 for_each_present_cpu(cpu
) {
6886 addr_space_sz
= (priv
->hw_version
== MVPP21
?
6887 MVPP21_ADDR_SPACE_SZ
: MVPP22_ADDR_SPACE_SZ
);
6888 priv
->cpu_base
[cpu
] = base
+ cpu
* addr_space_sz
;
6891 if (priv
->hw_version
== MVPP21
)
6892 priv
->max_port_rxqs
= 8;
6894 priv
->max_port_rxqs
= 32;
6896 priv
->pp_clk
= devm_clk_get(&pdev
->dev
, "pp_clk");
6897 if (IS_ERR(priv
->pp_clk
))
6898 return PTR_ERR(priv
->pp_clk
);
6899 err
= clk_prepare_enable(priv
->pp_clk
);
6903 priv
->gop_clk
= devm_clk_get(&pdev
->dev
, "gop_clk");
6904 if (IS_ERR(priv
->gop_clk
)) {
6905 err
= PTR_ERR(priv
->gop_clk
);
6908 err
= clk_prepare_enable(priv
->gop_clk
);
6912 if (priv
->hw_version
== MVPP22
) {
6913 priv
->mg_clk
= devm_clk_get(&pdev
->dev
, "mg_clk");
6914 if (IS_ERR(priv
->mg_clk
)) {
6915 err
= PTR_ERR(priv
->mg_clk
);
6919 err
= clk_prepare_enable(priv
->mg_clk
);
6924 /* Get system's tclk rate */
6925 priv
->tclk
= clk_get_rate(priv
->pp_clk
);
6927 if (priv
->hw_version
== MVPP22
) {
6928 err
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(40));
6931 /* Sadly, the BM pools all share the same register to
6932 * store the high 32 bits of their address. So they
6933 * must all have the same high 32 bits, which forces
6934 * us to restrict coherent memory to DMA_BIT_MASK(32).
6936 err
= dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(32));
6941 /* Initialize network controller */
6942 err
= mvpp2_init(pdev
, priv
);
6944 dev_err(&pdev
->dev
, "failed to initialize controller\n");
6948 port_count
= of_get_available_child_count(dn
);
6949 if (port_count
== 0) {
6950 dev_err(&pdev
->dev
, "no ports enabled\n");
6955 priv
->port_list
= devm_kcalloc(&pdev
->dev
, port_count
,
6956 sizeof(*priv
->port_list
),
6958 if (!priv
->port_list
) {
6963 /* Initialize ports */
6964 for_each_available_child_of_node(dn
, port_node
) {
6965 err
= mvpp2_port_probe(pdev
, port_node
, priv
);
6970 platform_set_drvdata(pdev
, priv
);
6974 if (priv
->hw_version
== MVPP22
)
6975 clk_disable_unprepare(priv
->mg_clk
);
6977 clk_disable_unprepare(priv
->gop_clk
);
6979 clk_disable_unprepare(priv
->pp_clk
);
6983 static int mvpp2_remove(struct platform_device
*pdev
)
6985 struct mvpp2
*priv
= platform_get_drvdata(pdev
);
6986 struct device_node
*dn
= pdev
->dev
.of_node
;
6987 struct device_node
*port_node
;
6990 for_each_available_child_of_node(dn
, port_node
) {
6991 if (priv
->port_list
[i
])
6992 mvpp2_port_remove(priv
->port_list
[i
]);
6996 for (i
= 0; i
< MVPP2_BM_POOLS_NUM
; i
++) {
6997 struct mvpp2_bm_pool
*bm_pool
= &priv
->bm_pools
[i
];
6999 mvpp2_bm_pool_destroy(pdev
, priv
, bm_pool
);
7002 for_each_present_cpu(i
) {
7003 struct mvpp2_tx_queue
*aggr_txq
= &priv
->aggr_txqs
[i
];
7005 dma_free_coherent(&pdev
->dev
,
7006 MVPP2_AGGR_TXQ_SIZE
* MVPP2_DESC_ALIGNED_SIZE
,
7008 aggr_txq
->descs_dma
);
7011 clk_disable_unprepare(priv
->mg_clk
);
7012 clk_disable_unprepare(priv
->pp_clk
);
7013 clk_disable_unprepare(priv
->gop_clk
);
7018 static const struct of_device_id mvpp2_match
[] = {
7020 .compatible
= "marvell,armada-375-pp2",
7021 .data
= (void *)MVPP21
,
7024 .compatible
= "marvell,armada-7k-pp22",
7025 .data
= (void *)MVPP22
,
7029 MODULE_DEVICE_TABLE(of
, mvpp2_match
);
7031 static struct platform_driver mvpp2_driver
= {
7032 .probe
= mvpp2_probe
,
7033 .remove
= mvpp2_remove
,
7035 .name
= MVPP2_DRIVER_NAME
,
7036 .of_match_table
= mvpp2_match
,
7040 module_platform_driver(mvpp2_driver
);
7042 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
7043 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
7044 MODULE_LICENSE("GPL v2");