2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
4 * Copyright (C) 2014 Marvell
6 * Marcin Wojtas <mw@semihalf.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/platform_device.h>
17 #include <linux/skbuff.h>
18 #include <linux/inetdevice.h>
19 #include <linux/mbus.h>
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/cpumask.h>
24 #include <linux/of_irq.h>
25 #include <linux/of_mdio.h>
26 #include <linux/of_net.h>
27 #include <linux/of_address.h>
28 #include <linux/of_device.h>
29 #include <linux/phy.h>
30 #include <linux/clk.h>
31 #include <linux/hrtimer.h>
32 #include <linux/ktime.h>
33 #include <uapi/linux/ppp_defs.h>
37 /* RX Fifo Registers */
38 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
39 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
40 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
41 #define MVPP2_RX_FIFO_INIT_REG 0x64
43 /* RX DMA Top Registers */
44 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
45 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
46 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
47 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
48 #define MVPP2_POOL_BUF_SIZE_OFFSET 5
49 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
50 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
51 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
52 #define MVPP2_RXQ_POOL_SHORT_OFFS 20
53 #define MVPP2_RXQ_POOL_SHORT_MASK 0x700000
54 #define MVPP2_RXQ_POOL_LONG_OFFS 24
55 #define MVPP2_RXQ_POOL_LONG_MASK 0x7000000
56 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
57 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
58 #define MVPP2_RXQ_DISABLE_MASK BIT(31)
60 /* Parser Registers */
61 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
62 #define MVPP2_PRS_PORT_LU_MAX 0xf
63 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
64 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
65 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
66 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
67 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
68 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
69 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
70 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
71 #define MVPP2_PRS_TCAM_IDX_REG 0x1100
72 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
73 #define MVPP2_PRS_TCAM_INV_MASK BIT(31)
74 #define MVPP2_PRS_SRAM_IDX_REG 0x1200
75 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
76 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230
77 #define MVPP2_PRS_TCAM_EN_MASK BIT(0)
79 /* Classifier Registers */
80 #define MVPP2_CLS_MODE_REG 0x1800
81 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
82 #define MVPP2_CLS_PORT_WAY_REG 0x1810
83 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
84 #define MVPP2_CLS_LKP_INDEX_REG 0x1814
85 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
86 #define MVPP2_CLS_LKP_TBL_REG 0x1818
87 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
88 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
89 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820
90 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824
91 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828
92 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c
93 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
94 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
95 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
96 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
97 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
98 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
100 /* Descriptor Manager Top Registers */
101 #define MVPP2_RXQ_NUM_REG 0x2040
102 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044
103 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048
104 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
105 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
106 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
107 #define MVPP2_RXQ_NUM_NEW_OFFSET 16
108 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
109 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
110 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
111 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
112 #define MVPP2_RXQ_THRESH_REG 0x204c
113 #define MVPP2_OCCUPIED_THRESH_OFFSET 0
114 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
115 #define MVPP2_RXQ_INDEX_REG 0x2050
116 #define MVPP2_TXQ_NUM_REG 0x2080
117 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084
118 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088
119 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
120 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
121 #define MVPP2_TXQ_INDEX_REG 0x2098
122 #define MVPP2_TXQ_PREF_BUF_REG 0x209c
123 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
124 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
125 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
126 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
127 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
128 #define MVPP2_TXQ_PENDING_REG 0x20a0
129 #define MVPP2_TXQ_PENDING_MASK 0x3fff
130 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4
131 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
132 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16
133 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
134 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
135 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
136 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
137 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
138 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
139 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16
140 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
141 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
142 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
143 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
144 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
145 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
147 /* MBUS bridge registers */
148 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
149 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
150 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
151 #define MVPP2_BASE_ADDR_ENABLE 0x4060
153 /* Interrupt Cause and Mask registers */
154 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
155 #define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
156 #define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
157 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
158 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
159 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
160 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
161 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
162 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
163 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
164 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
165 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
166 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
167 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
168 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
169 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
170 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
171 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
172 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
173 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
174 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
176 /* Buffer Manager registers */
177 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
178 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
179 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
180 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0
181 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
182 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
183 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
184 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
185 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
186 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
187 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
188 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
189 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
190 #define MVPP2_BM_START_MASK BIT(0)
191 #define MVPP2_BM_STOP_MASK BIT(1)
192 #define MVPP2_BM_STATE_MASK BIT(4)
193 #define MVPP2_BM_LOW_THRESH_OFFS 8
194 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00
195 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
196 MVPP2_BM_LOW_THRESH_OFFS)
197 #define MVPP2_BM_HIGH_THRESH_OFFS 16
198 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
199 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
200 MVPP2_BM_HIGH_THRESH_OFFS)
201 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
202 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
203 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
204 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
205 #define MVPP2_BM_BPPE_FULL_MASK BIT(3)
206 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
207 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
208 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
209 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
210 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440
211 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
212 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
213 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
214 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
215 #define MVPP2_BM_VIRT_RLS_REG 0x64c0
217 /* TX Scheduler registers */
218 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
219 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
220 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff
221 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8
222 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
223 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
224 #define MVPP2_TXP_SCHED_MTU_REG 0x801c
225 #define MVPP2_TXP_MTU_MAX 0x7FFFF
226 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020
227 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
228 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
229 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
230 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
231 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
232 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
233 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
234 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
235 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
236 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
237 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
238 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
239 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
241 /* TX general registers */
242 #define MVPP2_TX_SNOOP_REG 0x8800
243 #define MVPP2_TX_PORT_FLUSH_REG 0x8810
244 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
247 #define MVPP2_SRC_ADDR_MIDDLE 0x24
248 #define MVPP2_SRC_ADDR_HIGH 0x28
249 #define MVPP2_PHY_AN_CFG0_REG 0x34
250 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
251 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
252 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
254 /* Per-port registers */
255 #define MVPP2_GMAC_CTRL_0_REG 0x0
256 #define MVPP2_GMAC_PORT_EN_MASK BIT(0)
257 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
258 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
259 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
260 #define MVPP2_GMAC_CTRL_1_REG 0x4
261 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
262 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
263 #define MVPP2_GMAC_PCS_LB_EN_BIT 6
264 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
265 #define MVPP2_GMAC_SA_LOW_OFFS 7
266 #define MVPP2_GMAC_CTRL_2_REG 0x8
267 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
268 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
269 #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
270 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
271 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc
272 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
273 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
274 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
275 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
276 #define MVPP2_GMAC_AN_SPEED_EN BIT(7)
277 #define MVPP2_GMAC_FC_ADV_EN BIT(9)
278 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
279 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
280 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
281 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
282 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
283 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
284 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
286 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
288 /* Descriptor ring Macros */
289 #define MVPP2_QUEUE_NEXT_DESC(q, index) \
290 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
292 /* Various constants */
295 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15
296 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
297 #define MVPP2_RX_COAL_PKTS 32
298 #define MVPP2_RX_COAL_USEC 100
300 /* The two bytes Marvell header. Either contains a special value used
301 * by Marvell switches when a specific hardware mode is enabled (not
302 * supported by this driver) or is filled automatically by zeroes on
303 * the RX side. Those two bytes being at the front of the Ethernet
304 * header, they allow to have the IP header aligned on a 4 bytes
305 * boundary automatically: the hardware skips those two bytes on its
308 #define MVPP2_MH_SIZE 2
309 #define MVPP2_ETH_TYPE_LEN 2
310 #define MVPP2_PPPOE_HDR_SIZE 8
311 #define MVPP2_VLAN_TAG_LEN 4
313 /* Lbtd 802.3 type */
314 #define MVPP2_IP_LBDT_TYPE 0xfffa
316 #define MVPP2_TX_CSUM_MAX_SIZE 9800
318 /* Timeout constants */
319 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
320 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
322 #define MVPP2_TX_MTU_MAX 0x7ffff
324 /* Maximum number of T-CONTs of PON port */
325 #define MVPP2_MAX_TCONT 16
327 /* Maximum number of supported ports */
328 #define MVPP2_MAX_PORTS 4
330 /* Maximum number of TXQs used by single port */
331 #define MVPP2_MAX_TXQ 8
333 /* Maximum number of RXQs used by single port */
334 #define MVPP2_MAX_RXQ 8
336 /* Dfault number of RXQs in use */
337 #define MVPP2_DEFAULT_RXQ 4
339 /* Total number of RXQs available to all ports */
340 #define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
342 /* Max number of Rx descriptors */
343 #define MVPP2_MAX_RXD 128
345 /* Max number of Tx descriptors */
346 #define MVPP2_MAX_TXD 1024
348 /* Amount of Tx descriptors that can be reserved at once by CPU */
349 #define MVPP2_CPU_DESC_CHUNK 64
351 /* Max number of Tx descriptors in each aggregated queue */
352 #define MVPP2_AGGR_TXQ_SIZE 256
354 /* Descriptor aligned size */
355 #define MVPP2_DESC_ALIGNED_SIZE 32
357 /* Descriptor alignment mask */
358 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
360 /* RX FIFO constants */
361 #define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
362 #define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
363 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
365 /* RX buffer constants */
366 #define MVPP2_SKB_SHINFO_SIZE \
367 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
369 #define MVPP2_RX_PKT_SIZE(mtu) \
370 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
371 ETH_HLEN + ETH_FCS_LEN, cache_line_size())
373 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
374 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
375 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
376 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
378 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
380 /* IPv6 max L3 address size */
381 #define MVPP2_MAX_L3_ADDR_SIZE 16
384 #define MVPP2_F_LOOPBACK BIT(0)
386 /* Marvell tag types */
387 enum mvpp2_tag_type
{
388 MVPP2_TAG_TYPE_NONE
= 0,
389 MVPP2_TAG_TYPE_MH
= 1,
390 MVPP2_TAG_TYPE_DSA
= 2,
391 MVPP2_TAG_TYPE_EDSA
= 3,
392 MVPP2_TAG_TYPE_VLAN
= 4,
393 MVPP2_TAG_TYPE_LAST
= 5
396 /* Parser constants */
397 #define MVPP2_PRS_TCAM_SRAM_SIZE 256
398 #define MVPP2_PRS_TCAM_WORDS 6
399 #define MVPP2_PRS_SRAM_WORDS 4
400 #define MVPP2_PRS_FLOW_ID_SIZE 64
401 #define MVPP2_PRS_FLOW_ID_MASK 0x3f
402 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1
403 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
404 #define MVPP2_PRS_IPV4_HEAD 0x40
405 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
406 #define MVPP2_PRS_IPV4_MC 0xe0
407 #define MVPP2_PRS_IPV4_MC_MASK 0xf0
408 #define MVPP2_PRS_IPV4_BC_MASK 0xff
409 #define MVPP2_PRS_IPV4_IHL 0x5
410 #define MVPP2_PRS_IPV4_IHL_MASK 0xf
411 #define MVPP2_PRS_IPV6_MC 0xff
412 #define MVPP2_PRS_IPV6_MC_MASK 0xff
413 #define MVPP2_PRS_IPV6_HOP_MASK 0xff
414 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff
415 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
416 #define MVPP2_PRS_DBL_VLANS_MAX 100
419 * - lookup ID - 4 bits
421 * - additional information - 1 byte
422 * - header data - 8 bytes
423 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
425 #define MVPP2_PRS_AI_BITS 8
426 #define MVPP2_PRS_PORT_MASK 0xff
427 #define MVPP2_PRS_LU_MASK 0xf
428 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
429 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
430 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
431 (((offs) * 2) - ((offs) % 2) + 2)
432 #define MVPP2_PRS_TCAM_AI_BYTE 16
433 #define MVPP2_PRS_TCAM_PORT_BYTE 17
434 #define MVPP2_PRS_TCAM_LU_BYTE 20
435 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
436 #define MVPP2_PRS_TCAM_INV_WORD 5
437 /* Tcam entries ID */
438 #define MVPP2_PE_DROP_ALL 0
439 #define MVPP2_PE_FIRST_FREE_TID 1
440 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
441 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
442 #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
443 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
444 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
445 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
446 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
447 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
448 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
449 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
450 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
451 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
452 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
453 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
454 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
455 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
456 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
457 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
458 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
459 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
460 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
461 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
462 #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
463 #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
464 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
467 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
469 #define MVPP2_PRS_SRAM_RI_OFFS 0
470 #define MVPP2_PRS_SRAM_RI_WORD 0
471 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
472 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
473 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
474 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64
475 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
476 #define MVPP2_PRS_SRAM_UDF_OFFS 73
477 #define MVPP2_PRS_SRAM_UDF_BITS 8
478 #define MVPP2_PRS_SRAM_UDF_MASK 0xff
479 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
480 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
481 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
482 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
483 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
484 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
485 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
486 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
487 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
488 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
489 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
490 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
491 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
492 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
493 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
494 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
495 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
496 #define MVPP2_PRS_SRAM_AI_OFFS 90
497 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
498 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
499 #define MVPP2_PRS_SRAM_AI_MASK 0xff
500 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
501 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
502 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110
503 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111
505 /* Sram result info bits assignment */
506 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1
507 #define MVPP2_PRS_RI_DSA_MASK 0x2
508 #define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
509 #define MVPP2_PRS_RI_VLAN_NONE 0x0
510 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
511 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
512 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
513 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
514 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
515 #define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
516 #define MVPP2_PRS_RI_L2_UCAST 0x0
517 #define MVPP2_PRS_RI_L2_MCAST BIT(9)
518 #define MVPP2_PRS_RI_L2_BCAST BIT(10)
519 #define MVPP2_PRS_RI_PPPOE_MASK 0x800
520 #define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
521 #define MVPP2_PRS_RI_L3_UN 0x0
522 #define MVPP2_PRS_RI_L3_IP4 BIT(12)
523 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
524 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
525 #define MVPP2_PRS_RI_L3_IP6 BIT(14)
526 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
527 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
528 #define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
529 #define MVPP2_PRS_RI_L3_UCAST 0x0
530 #define MVPP2_PRS_RI_L3_MCAST BIT(15)
531 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
532 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
533 #define MVPP2_PRS_RI_UDF3_MASK 0x300000
534 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
535 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
536 #define MVPP2_PRS_RI_L4_TCP BIT(22)
537 #define MVPP2_PRS_RI_L4_UDP BIT(23)
538 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
539 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000
540 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
541 #define MVPP2_PRS_RI_DROP_MASK 0x80000000
543 /* Sram additional info bits assignment */
544 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
545 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
546 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
547 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
548 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
549 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
550 #define MVPP2_PRS_SINGLE_VLAN_AI 0
551 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
554 #define MVPP2_PRS_TAGGED true
555 #define MVPP2_PRS_UNTAGGED false
556 #define MVPP2_PRS_EDSA true
557 #define MVPP2_PRS_DSA false
559 /* MAC entries, shadow udf */
561 MVPP2_PRS_UDF_MAC_DEF
,
562 MVPP2_PRS_UDF_MAC_RANGE
,
563 MVPP2_PRS_UDF_L2_DEF
,
564 MVPP2_PRS_UDF_L2_DEF_COPY
,
565 MVPP2_PRS_UDF_L2_USER
,
569 enum mvpp2_prs_lookup
{
583 enum mvpp2_prs_l3_cast
{
584 MVPP2_PRS_L3_UNI_CAST
,
585 MVPP2_PRS_L3_MULTI_CAST
,
586 MVPP2_PRS_L3_BROAD_CAST
589 /* Classifier constants */
590 #define MVPP2_CLS_FLOWS_TBL_SIZE 512
591 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
592 #define MVPP2_CLS_LKP_TBL_SIZE 64
595 #define MVPP2_BM_POOLS_NUM 8
596 #define MVPP2_BM_LONG_BUF_NUM 1024
597 #define MVPP2_BM_SHORT_BUF_NUM 2048
598 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
599 #define MVPP2_BM_POOL_PTR_ALIGN 128
600 #define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
601 #define MVPP2_BM_SWF_SHORT_POOL 3
603 /* BM cookie (32 bits) definition */
604 #define MVPP2_BM_COOKIE_POOL_OFFS 8
605 #define MVPP2_BM_COOKIE_CPU_OFFS 24
607 /* BM short pool packet size
608 * These value assure that for SWF the total number
609 * of bytes allocated for each buffer will be 512
611 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
621 /* Shared Packet Processor resources */
623 /* Shared registers' base addresses */
625 void __iomem
*lms_base
;
631 /* List of pointers to port structures */
632 struct mvpp2_port
**port_list
;
634 /* Aggregated TXQs */
635 struct mvpp2_tx_queue
*aggr_txqs
;
638 struct mvpp2_bm_pool
*bm_pools
;
640 /* PRS shadow table */
641 struct mvpp2_prs_shadow
*prs_shadow
;
642 /* PRS auxiliary table for double vlan entries control */
643 bool *prs_double_vlans
;
649 enum { MVPP21
, MVPP22
} hw_version
;
652 struct mvpp2_pcpu_stats
{
653 struct u64_stats_sync syncp
;
660 /* Per-CPU port control */
661 struct mvpp2_port_pcpu
{
662 struct hrtimer tx_done_timer
;
663 bool timer_scheduled
;
664 /* Tasklet for egress finalization */
665 struct tasklet_struct tx_done_tasklet
;
675 /* Per-port registers' base address */
678 struct mvpp2_rx_queue
**rxqs
;
679 struct mvpp2_tx_queue
**txqs
;
680 struct net_device
*dev
;
684 u32 pending_cause_rx
;
685 struct napi_struct napi
;
687 /* Per-CPU port control */
688 struct mvpp2_port_pcpu __percpu
*pcpu
;
695 struct mvpp2_pcpu_stats __percpu
*stats
;
697 phy_interface_t phy_interface
;
698 struct device_node
*phy_node
;
703 struct mvpp2_bm_pool
*pool_long
;
704 struct mvpp2_bm_pool
*pool_short
;
706 /* Index of first port's physical RXQ */
710 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
711 * layout of the transmit and reception DMA descriptors, and their
712 * layout is therefore defined by the hardware design
715 #define MVPP2_TXD_L3_OFF_SHIFT 0
716 #define MVPP2_TXD_IP_HLEN_SHIFT 8
717 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
718 #define MVPP2_TXD_L4_CSUM_NOT BIT(14)
719 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
720 #define MVPP2_TXD_PADDING_DISABLE BIT(23)
721 #define MVPP2_TXD_L4_UDP BIT(24)
722 #define MVPP2_TXD_L3_IP6 BIT(26)
723 #define MVPP2_TXD_L_DESC BIT(28)
724 #define MVPP2_TXD_F_DESC BIT(29)
726 #define MVPP2_RXD_ERR_SUMMARY BIT(15)
727 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
728 #define MVPP2_RXD_ERR_CRC 0x0
729 #define MVPP2_RXD_ERR_OVERRUN BIT(13)
730 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
731 #define MVPP2_RXD_BM_POOL_ID_OFFS 16
732 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
733 #define MVPP2_RXD_HWF_SYNC BIT(21)
734 #define MVPP2_RXD_L4_CSUM_OK BIT(22)
735 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
736 #define MVPP2_RXD_L4_TCP BIT(25)
737 #define MVPP2_RXD_L4_UDP BIT(26)
738 #define MVPP2_RXD_L3_IP4 BIT(28)
739 #define MVPP2_RXD_L3_IP6 BIT(30)
740 #define MVPP2_RXD_BUF_HDR BIT(31)
742 /* HW TX descriptor for PPv2.1 */
743 struct mvpp21_tx_desc
{
744 u32 command
; /* Options used by HW for packet transmitting.*/
745 u8 packet_offset
; /* the offset from the buffer beginning */
746 u8 phys_txq
; /* destination queue ID */
747 u16 data_size
; /* data size of transmitted packet in bytes */
748 u32 buf_dma_addr
; /* physical addr of transmitted buffer */
749 u32 buf_cookie
; /* cookie for access to TX buffer in tx path */
750 u32 reserved1
[3]; /* hw_cmd (for future use, BM, PON, PNC) */
751 u32 reserved2
; /* reserved (for future use) */
754 /* HW RX descriptor for PPv2.1 */
755 struct mvpp21_rx_desc
{
756 u32 status
; /* info about received packet */
757 u16 reserved1
; /* parser_info (for future use, PnC) */
758 u16 data_size
; /* size of received packet in bytes */
759 u32 buf_dma_addr
; /* physical address of the buffer */
760 u32 buf_cookie
; /* cookie for access to RX buffer in rx path */
761 u16 reserved2
; /* gem_port_id (for future use, PON) */
762 u16 reserved3
; /* csum_l4 (for future use, PnC) */
763 u8 reserved4
; /* bm_qset (for future use, BM) */
765 u16 reserved6
; /* classify_info (for future use, PnC) */
766 u32 reserved7
; /* flow_id (for future use, PnC) */
770 /* Opaque type used by the driver to manipulate the HW TX and RX
773 struct mvpp2_tx_desc
{
775 struct mvpp21_tx_desc pp21
;
779 struct mvpp2_rx_desc
{
781 struct mvpp21_rx_desc pp21
;
785 struct mvpp2_txq_pcpu_buf
{
786 /* Transmitted SKB */
789 /* Physical address of transmitted buffer */
792 /* Size transmitted */
796 /* Per-CPU Tx queue control */
797 struct mvpp2_txq_pcpu
{
800 /* Number of Tx DMA descriptors in the descriptor ring */
803 /* Number of currently used Tx DMA descriptor in the
808 /* Number of Tx DMA descriptors reserved for each CPU */
811 /* Infos about transmitted buffers */
812 struct mvpp2_txq_pcpu_buf
*buffs
;
814 /* Index of last TX DMA descriptor that was inserted */
817 /* Index of the TX DMA descriptor to be cleaned up */
821 struct mvpp2_tx_queue
{
822 /* Physical number of this Tx queue */
825 /* Logical number of this Tx queue */
828 /* Number of Tx DMA descriptors in the descriptor ring */
831 /* Number of currently used Tx DMA descriptor in the descriptor ring */
834 /* Per-CPU control of physical Tx queues */
835 struct mvpp2_txq_pcpu __percpu
*pcpu
;
839 /* Virtual address of thex Tx DMA descriptors array */
840 struct mvpp2_tx_desc
*descs
;
842 /* DMA address of the Tx DMA descriptors array */
843 dma_addr_t descs_dma
;
845 /* Index of the last Tx DMA descriptor */
848 /* Index of the next Tx DMA descriptor to process */
849 int next_desc_to_proc
;
852 struct mvpp2_rx_queue
{
853 /* RX queue number, in the range 0-31 for physical RXQs */
856 /* Num of rx descriptors in the rx descriptor ring */
862 /* Virtual address of the RX DMA descriptors array */
863 struct mvpp2_rx_desc
*descs
;
865 /* DMA address of the RX DMA descriptors array */
866 dma_addr_t descs_dma
;
868 /* Index of the last RX DMA descriptor */
871 /* Index of the next RX DMA descriptor to process */
872 int next_desc_to_proc
;
874 /* ID of port to which physical RXQ is mapped */
877 /* Port's logic RXQ number to which physical RXQ is mapped */
881 union mvpp2_prs_tcam_entry
{
882 u32 word
[MVPP2_PRS_TCAM_WORDS
];
883 u8 byte
[MVPP2_PRS_TCAM_WORDS
* 4];
886 union mvpp2_prs_sram_entry
{
887 u32 word
[MVPP2_PRS_SRAM_WORDS
];
888 u8 byte
[MVPP2_PRS_SRAM_WORDS
* 4];
891 struct mvpp2_prs_entry
{
893 union mvpp2_prs_tcam_entry tcam
;
894 union mvpp2_prs_sram_entry sram
;
897 struct mvpp2_prs_shadow
{
904 /* User defined offset */
912 struct mvpp2_cls_flow_entry
{
914 u32 data
[MVPP2_CLS_FLOWS_TBL_DATA_WORDS
];
917 struct mvpp2_cls_lookup_entry
{
923 struct mvpp2_bm_pool
{
924 /* Pool number in the range 0-7 */
926 enum mvpp2_bm_type type
;
928 /* Buffer Pointers Pool External (BPPE) size */
930 /* Number of buffers for this pool */
932 /* Pool buffer size */
938 /* BPPE virtual base address */
940 /* BPPE DMA base address */
943 /* Ports using BM pool */
947 /* Static declaractions */
949 /* Number of RXQs used by single port */
950 static int rxq_number
= MVPP2_DEFAULT_RXQ
;
951 /* Number of TXQs used by single port */
952 static int txq_number
= MVPP2_MAX_TXQ
;
954 #define MVPP2_DRIVER_NAME "mvpp2"
955 #define MVPP2_DRIVER_VERSION "1.0"
957 /* Utility/helper methods */
959 static void mvpp2_write(struct mvpp2
*priv
, u32 offset
, u32 data
)
961 writel(data
, priv
->base
+ offset
);
964 static u32
mvpp2_read(struct mvpp2
*priv
, u32 offset
)
966 return readl(priv
->base
+ offset
);
969 static dma_addr_t
mvpp2_txdesc_dma_addr_get(struct mvpp2_port
*port
,
970 struct mvpp2_tx_desc
*tx_desc
)
972 return tx_desc
->pp21
.buf_dma_addr
;
975 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port
*port
,
976 struct mvpp2_tx_desc
*tx_desc
,
979 tx_desc
->pp21
.buf_dma_addr
= dma_addr
;
982 static size_t mvpp2_txdesc_size_get(struct mvpp2_port
*port
,
983 struct mvpp2_tx_desc
*tx_desc
)
985 return tx_desc
->pp21
.data_size
;
988 static void mvpp2_txdesc_size_set(struct mvpp2_port
*port
,
989 struct mvpp2_tx_desc
*tx_desc
,
992 tx_desc
->pp21
.data_size
= size
;
995 static void mvpp2_txdesc_txq_set(struct mvpp2_port
*port
,
996 struct mvpp2_tx_desc
*tx_desc
,
999 tx_desc
->pp21
.phys_txq
= txq
;
1002 static void mvpp2_txdesc_cmd_set(struct mvpp2_port
*port
,
1003 struct mvpp2_tx_desc
*tx_desc
,
1004 unsigned int command
)
1006 tx_desc
->pp21
.command
= command
;
1009 static void mvpp2_txdesc_offset_set(struct mvpp2_port
*port
,
1010 struct mvpp2_tx_desc
*tx_desc
,
1011 unsigned int offset
)
1013 tx_desc
->pp21
.packet_offset
= offset
;
1016 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port
*port
,
1017 struct mvpp2_tx_desc
*tx_desc
)
1019 return tx_desc
->pp21
.packet_offset
;
1022 static dma_addr_t
mvpp2_rxdesc_dma_addr_get(struct mvpp2_port
*port
,
1023 struct mvpp2_rx_desc
*rx_desc
)
1025 return rx_desc
->pp21
.buf_dma_addr
;
1028 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port
*port
,
1029 struct mvpp2_rx_desc
*rx_desc
)
1031 return rx_desc
->pp21
.buf_cookie
;
1034 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port
*port
,
1035 struct mvpp2_rx_desc
*rx_desc
)
1037 return rx_desc
->pp21
.data_size
;
1040 static u32
mvpp2_rxdesc_status_get(struct mvpp2_port
*port
,
1041 struct mvpp2_rx_desc
*rx_desc
)
1043 return rx_desc
->pp21
.status
;
1046 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu
*txq_pcpu
)
1048 txq_pcpu
->txq_get_index
++;
1049 if (txq_pcpu
->txq_get_index
== txq_pcpu
->size
)
1050 txq_pcpu
->txq_get_index
= 0;
1053 static void mvpp2_txq_inc_put(struct mvpp2_port
*port
,
1054 struct mvpp2_txq_pcpu
*txq_pcpu
,
1055 struct sk_buff
*skb
,
1056 struct mvpp2_tx_desc
*tx_desc
)
1058 struct mvpp2_txq_pcpu_buf
*tx_buf
=
1059 txq_pcpu
->buffs
+ txq_pcpu
->txq_put_index
;
1061 tx_buf
->size
= mvpp2_txdesc_size_get(port
, tx_desc
);
1062 tx_buf
->dma
= mvpp2_txdesc_dma_addr_get(port
, tx_desc
) +
1063 mvpp2_txdesc_offset_get(port
, tx_desc
);
1064 txq_pcpu
->txq_put_index
++;
1065 if (txq_pcpu
->txq_put_index
== txq_pcpu
->size
)
1066 txq_pcpu
->txq_put_index
= 0;
1069 /* Get number of physical egress port */
1070 static inline int mvpp2_egress_port(struct mvpp2_port
*port
)
1072 return MVPP2_MAX_TCONT
+ port
->id
;
1075 /* Get number of physical TXQ */
1076 static inline int mvpp2_txq_phys(int port
, int txq
)
1078 return (MVPP2_MAX_TCONT
+ port
) * MVPP2_MAX_TXQ
+ txq
;
1081 /* Parser configuration routines */
1083 /* Update parser tcam and sram hw entries */
1084 static int mvpp2_prs_hw_write(struct mvpp2
*priv
, struct mvpp2_prs_entry
*pe
)
1088 if (pe
->index
> MVPP2_PRS_TCAM_SRAM_SIZE
- 1)
1091 /* Clear entry invalidation bit */
1092 pe
->tcam
.word
[MVPP2_PRS_TCAM_INV_WORD
] &= ~MVPP2_PRS_TCAM_INV_MASK
;
1094 /* Write tcam index - indirect access */
1095 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, pe
->index
);
1096 for (i
= 0; i
< MVPP2_PRS_TCAM_WORDS
; i
++)
1097 mvpp2_write(priv
, MVPP2_PRS_TCAM_DATA_REG(i
), pe
->tcam
.word
[i
]);
1099 /* Write sram index - indirect access */
1100 mvpp2_write(priv
, MVPP2_PRS_SRAM_IDX_REG
, pe
->index
);
1101 for (i
= 0; i
< MVPP2_PRS_SRAM_WORDS
; i
++)
1102 mvpp2_write(priv
, MVPP2_PRS_SRAM_DATA_REG(i
), pe
->sram
.word
[i
]);
1107 /* Read tcam entry from hw */
1108 static int mvpp2_prs_hw_read(struct mvpp2
*priv
, struct mvpp2_prs_entry
*pe
)
1112 if (pe
->index
> MVPP2_PRS_TCAM_SRAM_SIZE
- 1)
1115 /* Write tcam index - indirect access */
1116 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, pe
->index
);
1118 pe
->tcam
.word
[MVPP2_PRS_TCAM_INV_WORD
] = mvpp2_read(priv
,
1119 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD
));
1120 if (pe
->tcam
.word
[MVPP2_PRS_TCAM_INV_WORD
] & MVPP2_PRS_TCAM_INV_MASK
)
1121 return MVPP2_PRS_TCAM_ENTRY_INVALID
;
1123 for (i
= 0; i
< MVPP2_PRS_TCAM_WORDS
; i
++)
1124 pe
->tcam
.word
[i
] = mvpp2_read(priv
, MVPP2_PRS_TCAM_DATA_REG(i
));
1126 /* Write sram index - indirect access */
1127 mvpp2_write(priv
, MVPP2_PRS_SRAM_IDX_REG
, pe
->index
);
1128 for (i
= 0; i
< MVPP2_PRS_SRAM_WORDS
; i
++)
1129 pe
->sram
.word
[i
] = mvpp2_read(priv
, MVPP2_PRS_SRAM_DATA_REG(i
));
1134 /* Invalidate tcam hw entry */
1135 static void mvpp2_prs_hw_inv(struct mvpp2
*priv
, int index
)
1137 /* Write index - indirect access */
1138 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, index
);
1139 mvpp2_write(priv
, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD
),
1140 MVPP2_PRS_TCAM_INV_MASK
);
1143 /* Enable shadow table entry and set its lookup ID */
1144 static void mvpp2_prs_shadow_set(struct mvpp2
*priv
, int index
, int lu
)
1146 priv
->prs_shadow
[index
].valid
= true;
1147 priv
->prs_shadow
[index
].lu
= lu
;
1150 /* Update ri fields in shadow table entry */
1151 static void mvpp2_prs_shadow_ri_set(struct mvpp2
*priv
, int index
,
1152 unsigned int ri
, unsigned int ri_mask
)
1154 priv
->prs_shadow
[index
].ri_mask
= ri_mask
;
1155 priv
->prs_shadow
[index
].ri
= ri
;
1158 /* Update lookup field in tcam sw entry */
1159 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry
*pe
, unsigned int lu
)
1161 int enable_off
= MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE
);
1163 pe
->tcam
.byte
[MVPP2_PRS_TCAM_LU_BYTE
] = lu
;
1164 pe
->tcam
.byte
[enable_off
] = MVPP2_PRS_LU_MASK
;
1167 /* Update mask for single port in tcam sw entry */
1168 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry
*pe
,
1169 unsigned int port
, bool add
)
1171 int enable_off
= MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE
);
1174 pe
->tcam
.byte
[enable_off
] &= ~(1 << port
);
1176 pe
->tcam
.byte
[enable_off
] |= 1 << port
;
1179 /* Update port map in tcam sw entry */
1180 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry
*pe
,
1183 unsigned char port_mask
= MVPP2_PRS_PORT_MASK
;
1184 int enable_off
= MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE
);
1186 pe
->tcam
.byte
[MVPP2_PRS_TCAM_PORT_BYTE
] = 0;
1187 pe
->tcam
.byte
[enable_off
] &= ~port_mask
;
1188 pe
->tcam
.byte
[enable_off
] |= ~ports
& MVPP2_PRS_PORT_MASK
;
1191 /* Obtain port map from tcam sw entry */
1192 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry
*pe
)
1194 int enable_off
= MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE
);
1196 return ~(pe
->tcam
.byte
[enable_off
]) & MVPP2_PRS_PORT_MASK
;
1199 /* Set byte of data and its enable bits in tcam sw entry */
1200 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry
*pe
,
1201 unsigned int offs
, unsigned char byte
,
1202 unsigned char enable
)
1204 pe
->tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE(offs
)] = byte
;
1205 pe
->tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs
)] = enable
;
1208 /* Get byte of data and its enable bits from tcam sw entry */
1209 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry
*pe
,
1210 unsigned int offs
, unsigned char *byte
,
1211 unsigned char *enable
)
1213 *byte
= pe
->tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE(offs
)];
1214 *enable
= pe
->tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs
)];
1217 /* Compare tcam data bytes with a pattern */
1218 static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry
*pe
, int offs
,
1221 int off
= MVPP2_PRS_TCAM_DATA_BYTE(offs
);
1224 tcam_data
= (8 << pe
->tcam
.byte
[off
+ 1]) | pe
->tcam
.byte
[off
];
1225 if (tcam_data
!= data
)
1230 /* Update ai bits in tcam sw entry */
1231 static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry
*pe
,
1232 unsigned int bits
, unsigned int enable
)
1234 int i
, ai_idx
= MVPP2_PRS_TCAM_AI_BYTE
;
1236 for (i
= 0; i
< MVPP2_PRS_AI_BITS
; i
++) {
1238 if (!(enable
& BIT(i
)))
1242 pe
->tcam
.byte
[ai_idx
] |= 1 << i
;
1244 pe
->tcam
.byte
[ai_idx
] &= ~(1 << i
);
1247 pe
->tcam
.byte
[MVPP2_PRS_TCAM_EN_OFFS(ai_idx
)] |= enable
;
1250 /* Get ai bits from tcam sw entry */
1251 static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry
*pe
)
1253 return pe
->tcam
.byte
[MVPP2_PRS_TCAM_AI_BYTE
];
1256 /* Set ethertype in tcam sw entry */
1257 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry
*pe
, int offset
,
1258 unsigned short ethertype
)
1260 mvpp2_prs_tcam_data_byte_set(pe
, offset
+ 0, ethertype
>> 8, 0xff);
1261 mvpp2_prs_tcam_data_byte_set(pe
, offset
+ 1, ethertype
& 0xff, 0xff);
1264 /* Set bits in sram sw entry */
1265 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry
*pe
, int bit_num
,
1268 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(bit_num
)] |= (val
<< (bit_num
% 8));
1271 /* Clear bits in sram sw entry */
1272 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry
*pe
, int bit_num
,
1275 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(bit_num
)] &= ~(val
<< (bit_num
% 8));
1278 /* Update ri bits in sram sw entry */
1279 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry
*pe
,
1280 unsigned int bits
, unsigned int mask
)
1284 for (i
= 0; i
< MVPP2_PRS_SRAM_RI_CTRL_BITS
; i
++) {
1285 int ri_off
= MVPP2_PRS_SRAM_RI_OFFS
;
1287 if (!(mask
& BIT(i
)))
1291 mvpp2_prs_sram_bits_set(pe
, ri_off
+ i
, 1);
1293 mvpp2_prs_sram_bits_clear(pe
, ri_off
+ i
, 1);
1295 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_RI_CTRL_OFFS
+ i
, 1);
1299 /* Obtain ri bits from sram sw entry */
1300 static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry
*pe
)
1302 return pe
->sram
.word
[MVPP2_PRS_SRAM_RI_WORD
];
1305 /* Update ai bits in sram sw entry */
1306 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry
*pe
,
1307 unsigned int bits
, unsigned int mask
)
1310 int ai_off
= MVPP2_PRS_SRAM_AI_OFFS
;
1312 for (i
= 0; i
< MVPP2_PRS_SRAM_AI_CTRL_BITS
; i
++) {
1314 if (!(mask
& BIT(i
)))
1318 mvpp2_prs_sram_bits_set(pe
, ai_off
+ i
, 1);
1320 mvpp2_prs_sram_bits_clear(pe
, ai_off
+ i
, 1);
1322 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_AI_CTRL_OFFS
+ i
, 1);
1326 /* Read ai bits from sram sw entry */
1327 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry
*pe
)
1330 int ai_off
= MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS
);
1331 int ai_en_off
= ai_off
+ 1;
1332 int ai_shift
= MVPP2_PRS_SRAM_AI_OFFS
% 8;
1334 bits
= (pe
->sram
.byte
[ai_off
] >> ai_shift
) |
1335 (pe
->sram
.byte
[ai_en_off
] << (8 - ai_shift
));
1340 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
1343 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry
*pe
,
1346 int sram_next_off
= MVPP2_PRS_SRAM_NEXT_LU_OFFS
;
1348 mvpp2_prs_sram_bits_clear(pe
, sram_next_off
,
1349 MVPP2_PRS_SRAM_NEXT_LU_MASK
);
1350 mvpp2_prs_sram_bits_set(pe
, sram_next_off
, lu
);
1353 /* In the sram sw entry set sign and value of the next lookup offset
1354 * and the offset value generated to the classifier
1356 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry
*pe
, int shift
,
1361 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT
, 1);
1364 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT
, 1);
1368 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS
)] =
1369 (unsigned char)shift
;
1371 /* Reset and set operation */
1372 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS
,
1373 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK
);
1374 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS
, op
);
1376 /* Set base offset as current */
1377 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS
, 1);
1380 /* In the sram sw entry set sign and value of the user defined offset
1381 * generated to the classifier
1383 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry
*pe
,
1384 unsigned int type
, int offset
,
1389 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_UDF_SIGN_BIT
, 1);
1390 offset
= 0 - offset
;
1392 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_UDF_SIGN_BIT
, 1);
1396 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_UDF_OFFS
,
1397 MVPP2_PRS_SRAM_UDF_MASK
);
1398 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_UDF_OFFS
, offset
);
1399 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS
+
1400 MVPP2_PRS_SRAM_UDF_BITS
)] &=
1401 ~(MVPP2_PRS_SRAM_UDF_MASK
>> (8 - (MVPP2_PRS_SRAM_UDF_OFFS
% 8)));
1402 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS
+
1403 MVPP2_PRS_SRAM_UDF_BITS
)] |=
1404 (offset
>> (8 - (MVPP2_PRS_SRAM_UDF_OFFS
% 8)));
1406 /* Set offset type */
1407 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_UDF_TYPE_OFFS
,
1408 MVPP2_PRS_SRAM_UDF_TYPE_MASK
);
1409 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_UDF_TYPE_OFFS
, type
);
1411 /* Set offset operation */
1412 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
,
1413 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK
);
1414 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
, op
);
1416 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
+
1417 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS
)] &=
1418 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK
>>
1419 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
% 8)));
1421 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
+
1422 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS
)] |=
1423 (op
>> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
% 8)));
1425 /* Set base offset as current */
1426 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS
, 1);
1429 /* Find parser flow entry */
1430 static struct mvpp2_prs_entry
*mvpp2_prs_flow_find(struct mvpp2
*priv
, int flow
)
1432 struct mvpp2_prs_entry
*pe
;
1435 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
1438 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_FLOWS
);
1440 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1441 for (tid
= MVPP2_PRS_TCAM_SRAM_SIZE
- 1; tid
>= 0; tid
--) {
1444 if (!priv
->prs_shadow
[tid
].valid
||
1445 priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_FLOWS
)
1449 mvpp2_prs_hw_read(priv
, pe
);
1450 bits
= mvpp2_prs_sram_ai_get(pe
);
1452 /* Sram store classification lookup ID in AI bits [5:0] */
1453 if ((bits
& MVPP2_PRS_FLOW_ID_MASK
) == flow
)
1461 /* Return first free tcam index, seeking from start to end */
1462 static int mvpp2_prs_tcam_first_free(struct mvpp2
*priv
, unsigned char start
,
1470 if (end
>= MVPP2_PRS_TCAM_SRAM_SIZE
)
1471 end
= MVPP2_PRS_TCAM_SRAM_SIZE
- 1;
1473 for (tid
= start
; tid
<= end
; tid
++) {
1474 if (!priv
->prs_shadow
[tid
].valid
)
1481 /* Enable/disable dropping all mac da's */
1482 static void mvpp2_prs_mac_drop_all_set(struct mvpp2
*priv
, int port
, bool add
)
1484 struct mvpp2_prs_entry pe
;
1486 if (priv
->prs_shadow
[MVPP2_PE_DROP_ALL
].valid
) {
1487 /* Entry exist - update port only */
1488 pe
.index
= MVPP2_PE_DROP_ALL
;
1489 mvpp2_prs_hw_read(priv
, &pe
);
1491 /* Entry doesn't exist - create new */
1492 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1493 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
1494 pe
.index
= MVPP2_PE_DROP_ALL
;
1496 /* Non-promiscuous mode for all ports - DROP unknown packets */
1497 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_DROP_MASK
,
1498 MVPP2_PRS_RI_DROP_MASK
);
1500 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
1501 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
1503 /* Update shadow table */
1504 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
1506 /* Mask all ports */
1507 mvpp2_prs_tcam_port_map_set(&pe
, 0);
1510 /* Update port mask */
1511 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
1513 mvpp2_prs_hw_write(priv
, &pe
);
1516 /* Set port to promiscuous mode */
1517 static void mvpp2_prs_mac_promisc_set(struct mvpp2
*priv
, int port
, bool add
)
1519 struct mvpp2_prs_entry pe
;
1521 /* Promiscuous mode - Accept unknown packets */
1523 if (priv
->prs_shadow
[MVPP2_PE_MAC_PROMISCUOUS
].valid
) {
1524 /* Entry exist - update port only */
1525 pe
.index
= MVPP2_PE_MAC_PROMISCUOUS
;
1526 mvpp2_prs_hw_read(priv
, &pe
);
1528 /* Entry doesn't exist - create new */
1529 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1530 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
1531 pe
.index
= MVPP2_PE_MAC_PROMISCUOUS
;
1533 /* Continue - set next lookup */
1534 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
1536 /* Set result info bits */
1537 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L2_UCAST
,
1538 MVPP2_PRS_RI_L2_CAST_MASK
);
1540 /* Shift to ethertype */
1541 mvpp2_prs_sram_shift_set(&pe
, 2 * ETH_ALEN
,
1542 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1544 /* Mask all ports */
1545 mvpp2_prs_tcam_port_map_set(&pe
, 0);
1547 /* Update shadow table */
1548 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
1551 /* Update port mask */
1552 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
1554 mvpp2_prs_hw_write(priv
, &pe
);
1557 /* Accept multicast */
1558 static void mvpp2_prs_mac_multi_set(struct mvpp2
*priv
, int port
, int index
,
1561 struct mvpp2_prs_entry pe
;
1562 unsigned char da_mc
;
1564 /* Ethernet multicast address first byte is
1565 * 0x01 for IPv4 and 0x33 for IPv6
1567 da_mc
= (index
== MVPP2_PE_MAC_MC_ALL
) ? 0x01 : 0x33;
1569 if (priv
->prs_shadow
[index
].valid
) {
1570 /* Entry exist - update port only */
1572 mvpp2_prs_hw_read(priv
, &pe
);
1574 /* Entry doesn't exist - create new */
1575 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1576 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
1579 /* Continue - set next lookup */
1580 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
1582 /* Set result info bits */
1583 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L2_MCAST
,
1584 MVPP2_PRS_RI_L2_CAST_MASK
);
1586 /* Update tcam entry data first byte */
1587 mvpp2_prs_tcam_data_byte_set(&pe
, 0, da_mc
, 0xff);
1589 /* Shift to ethertype */
1590 mvpp2_prs_sram_shift_set(&pe
, 2 * ETH_ALEN
,
1591 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1593 /* Mask all ports */
1594 mvpp2_prs_tcam_port_map_set(&pe
, 0);
1596 /* Update shadow table */
1597 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
1600 /* Update port mask */
1601 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
1603 mvpp2_prs_hw_write(priv
, &pe
);
1606 /* Set entry for dsa packets */
1607 static void mvpp2_prs_dsa_tag_set(struct mvpp2
*priv
, int port
, bool add
,
1608 bool tagged
, bool extend
)
1610 struct mvpp2_prs_entry pe
;
1614 tid
= tagged
? MVPP2_PE_EDSA_TAGGED
: MVPP2_PE_EDSA_UNTAGGED
;
1617 tid
= tagged
? MVPP2_PE_DSA_TAGGED
: MVPP2_PE_DSA_UNTAGGED
;
1621 if (priv
->prs_shadow
[tid
].valid
) {
1622 /* Entry exist - update port only */
1624 mvpp2_prs_hw_read(priv
, &pe
);
1626 /* Entry doesn't exist - create new */
1627 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1628 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
1631 /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
1632 mvpp2_prs_sram_shift_set(&pe
, shift
,
1633 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1635 /* Update shadow table */
1636 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_DSA
);
1639 /* Set tagged bit in DSA tag */
1640 mvpp2_prs_tcam_data_byte_set(&pe
, 0,
1641 MVPP2_PRS_TCAM_DSA_TAGGED_BIT
,
1642 MVPP2_PRS_TCAM_DSA_TAGGED_BIT
);
1643 /* Clear all ai bits for next iteration */
1644 mvpp2_prs_sram_ai_update(&pe
, 0,
1645 MVPP2_PRS_SRAM_AI_MASK
);
1646 /* If packet is tagged continue check vlans */
1647 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
1649 /* Set result info bits to 'no vlans' */
1650 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_NONE
,
1651 MVPP2_PRS_RI_VLAN_MASK
);
1652 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_L2
);
1655 /* Mask all ports */
1656 mvpp2_prs_tcam_port_map_set(&pe
, 0);
1659 /* Update port mask */
1660 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
1662 mvpp2_prs_hw_write(priv
, &pe
);
1665 /* Set entry for dsa ethertype */
1666 static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2
*priv
, int port
,
1667 bool add
, bool tagged
, bool extend
)
1669 struct mvpp2_prs_entry pe
;
1670 int tid
, shift
, port_mask
;
1673 tid
= tagged
? MVPP2_PE_ETYPE_EDSA_TAGGED
:
1674 MVPP2_PE_ETYPE_EDSA_UNTAGGED
;
1678 tid
= tagged
? MVPP2_PE_ETYPE_DSA_TAGGED
:
1679 MVPP2_PE_ETYPE_DSA_UNTAGGED
;
1680 port_mask
= MVPP2_PRS_PORT_MASK
;
1684 if (priv
->prs_shadow
[tid
].valid
) {
1685 /* Entry exist - update port only */
1687 mvpp2_prs_hw_read(priv
, &pe
);
1689 /* Entry doesn't exist - create new */
1690 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1691 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
1695 mvpp2_prs_match_etype(&pe
, 0, ETH_P_EDSA
);
1696 mvpp2_prs_match_etype(&pe
, 2, 0);
1698 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_DSA_MASK
,
1699 MVPP2_PRS_RI_DSA_MASK
);
1700 /* Shift ethertype + 2 byte reserved + tag*/
1701 mvpp2_prs_sram_shift_set(&pe
, 2 + MVPP2_ETH_TYPE_LEN
+ shift
,
1702 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1704 /* Update shadow table */
1705 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_DSA
);
1708 /* Set tagged bit in DSA tag */
1709 mvpp2_prs_tcam_data_byte_set(&pe
,
1710 MVPP2_ETH_TYPE_LEN
+ 2 + 3,
1711 MVPP2_PRS_TCAM_DSA_TAGGED_BIT
,
1712 MVPP2_PRS_TCAM_DSA_TAGGED_BIT
);
1713 /* Clear all ai bits for next iteration */
1714 mvpp2_prs_sram_ai_update(&pe
, 0,
1715 MVPP2_PRS_SRAM_AI_MASK
);
1716 /* If packet is tagged continue check vlans */
1717 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
1719 /* Set result info bits to 'no vlans' */
1720 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_NONE
,
1721 MVPP2_PRS_RI_VLAN_MASK
);
1722 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_L2
);
1724 /* Mask/unmask all ports, depending on dsa type */
1725 mvpp2_prs_tcam_port_map_set(&pe
, port_mask
);
1728 /* Update port mask */
1729 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
1731 mvpp2_prs_hw_write(priv
, &pe
);
1734 /* Search for existing single/triple vlan entry */
1735 static struct mvpp2_prs_entry
*mvpp2_prs_vlan_find(struct mvpp2
*priv
,
1736 unsigned short tpid
, int ai
)
1738 struct mvpp2_prs_entry
*pe
;
1741 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
1744 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_VLAN
);
1746 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1747 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
1748 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++) {
1749 unsigned int ri_bits
, ai_bits
;
1752 if (!priv
->prs_shadow
[tid
].valid
||
1753 priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_VLAN
)
1758 mvpp2_prs_hw_read(priv
, pe
);
1759 match
= mvpp2_prs_tcam_data_cmp(pe
, 0, swab16(tpid
));
1764 ri_bits
= mvpp2_prs_sram_ri_get(pe
);
1765 ri_bits
&= MVPP2_PRS_RI_VLAN_MASK
;
1767 /* Get current ai value from tcam */
1768 ai_bits
= mvpp2_prs_tcam_ai_get(pe
);
1769 /* Clear double vlan bit */
1770 ai_bits
&= ~MVPP2_PRS_DBL_VLAN_AI_BIT
;
1775 if (ri_bits
== MVPP2_PRS_RI_VLAN_SINGLE
||
1776 ri_bits
== MVPP2_PRS_RI_VLAN_TRIPLE
)
1784 /* Add/update single/triple vlan entry */
1785 static int mvpp2_prs_vlan_add(struct mvpp2
*priv
, unsigned short tpid
, int ai
,
1786 unsigned int port_map
)
1788 struct mvpp2_prs_entry
*pe
;
1792 pe
= mvpp2_prs_vlan_find(priv
, tpid
, ai
);
1795 /* Create new tcam entry */
1796 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_LAST_FREE_TID
,
1797 MVPP2_PE_FIRST_FREE_TID
);
1801 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
1805 /* Get last double vlan tid */
1806 for (tid_aux
= MVPP2_PE_LAST_FREE_TID
;
1807 tid_aux
>= MVPP2_PE_FIRST_FREE_TID
; tid_aux
--) {
1808 unsigned int ri_bits
;
1810 if (!priv
->prs_shadow
[tid_aux
].valid
||
1811 priv
->prs_shadow
[tid_aux
].lu
!= MVPP2_PRS_LU_VLAN
)
1814 pe
->index
= tid_aux
;
1815 mvpp2_prs_hw_read(priv
, pe
);
1816 ri_bits
= mvpp2_prs_sram_ri_get(pe
);
1817 if ((ri_bits
& MVPP2_PRS_RI_VLAN_MASK
) ==
1818 MVPP2_PRS_RI_VLAN_DOUBLE
)
1822 if (tid
<= tid_aux
) {
1827 memset(pe
, 0 , sizeof(struct mvpp2_prs_entry
));
1828 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_VLAN
);
1831 mvpp2_prs_match_etype(pe
, 0, tpid
);
1833 mvpp2_prs_sram_next_lu_set(pe
, MVPP2_PRS_LU_L2
);
1834 /* Shift 4 bytes - skip 1 vlan tag */
1835 mvpp2_prs_sram_shift_set(pe
, MVPP2_VLAN_TAG_LEN
,
1836 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1837 /* Clear all ai bits for next iteration */
1838 mvpp2_prs_sram_ai_update(pe
, 0, MVPP2_PRS_SRAM_AI_MASK
);
1840 if (ai
== MVPP2_PRS_SINGLE_VLAN_AI
) {
1841 mvpp2_prs_sram_ri_update(pe
, MVPP2_PRS_RI_VLAN_SINGLE
,
1842 MVPP2_PRS_RI_VLAN_MASK
);
1844 ai
|= MVPP2_PRS_DBL_VLAN_AI_BIT
;
1845 mvpp2_prs_sram_ri_update(pe
, MVPP2_PRS_RI_VLAN_TRIPLE
,
1846 MVPP2_PRS_RI_VLAN_MASK
);
1848 mvpp2_prs_tcam_ai_update(pe
, ai
, MVPP2_PRS_SRAM_AI_MASK
);
1850 mvpp2_prs_shadow_set(priv
, pe
->index
, MVPP2_PRS_LU_VLAN
);
1852 /* Update ports' mask */
1853 mvpp2_prs_tcam_port_map_set(pe
, port_map
);
1855 mvpp2_prs_hw_write(priv
, pe
);
1863 /* Get first free double vlan ai number */
1864 static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2
*priv
)
1868 for (i
= 1; i
< MVPP2_PRS_DBL_VLANS_MAX
; i
++) {
1869 if (!priv
->prs_double_vlans
[i
])
1876 /* Search for existing double vlan entry */
1877 static struct mvpp2_prs_entry
*mvpp2_prs_double_vlan_find(struct mvpp2
*priv
,
1878 unsigned short tpid1
,
1879 unsigned short tpid2
)
1881 struct mvpp2_prs_entry
*pe
;
1884 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
1887 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_VLAN
);
1889 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1890 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
1891 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++) {
1892 unsigned int ri_mask
;
1895 if (!priv
->prs_shadow
[tid
].valid
||
1896 priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_VLAN
)
1900 mvpp2_prs_hw_read(priv
, pe
);
1902 match
= mvpp2_prs_tcam_data_cmp(pe
, 0, swab16(tpid1
))
1903 && mvpp2_prs_tcam_data_cmp(pe
, 4, swab16(tpid2
));
1908 ri_mask
= mvpp2_prs_sram_ri_get(pe
) & MVPP2_PRS_RI_VLAN_MASK
;
1909 if (ri_mask
== MVPP2_PRS_RI_VLAN_DOUBLE
)
1917 /* Add or update double vlan entry */
1918 static int mvpp2_prs_double_vlan_add(struct mvpp2
*priv
, unsigned short tpid1
,
1919 unsigned short tpid2
,
1920 unsigned int port_map
)
1922 struct mvpp2_prs_entry
*pe
;
1923 int tid_aux
, tid
, ai
, ret
= 0;
1925 pe
= mvpp2_prs_double_vlan_find(priv
, tpid1
, tpid2
);
1928 /* Create new tcam entry */
1929 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
1930 MVPP2_PE_LAST_FREE_TID
);
1934 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
1938 /* Set ai value for new double vlan entry */
1939 ai
= mvpp2_prs_double_vlan_ai_free_get(priv
);
1945 /* Get first single/triple vlan tid */
1946 for (tid_aux
= MVPP2_PE_FIRST_FREE_TID
;
1947 tid_aux
<= MVPP2_PE_LAST_FREE_TID
; tid_aux
++) {
1948 unsigned int ri_bits
;
1950 if (!priv
->prs_shadow
[tid_aux
].valid
||
1951 priv
->prs_shadow
[tid_aux
].lu
!= MVPP2_PRS_LU_VLAN
)
1954 pe
->index
= tid_aux
;
1955 mvpp2_prs_hw_read(priv
, pe
);
1956 ri_bits
= mvpp2_prs_sram_ri_get(pe
);
1957 ri_bits
&= MVPP2_PRS_RI_VLAN_MASK
;
1958 if (ri_bits
== MVPP2_PRS_RI_VLAN_SINGLE
||
1959 ri_bits
== MVPP2_PRS_RI_VLAN_TRIPLE
)
1963 if (tid
>= tid_aux
) {
1968 memset(pe
, 0, sizeof(struct mvpp2_prs_entry
));
1969 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_VLAN
);
1972 priv
->prs_double_vlans
[ai
] = true;
1974 mvpp2_prs_match_etype(pe
, 0, tpid1
);
1975 mvpp2_prs_match_etype(pe
, 4, tpid2
);
1977 mvpp2_prs_sram_next_lu_set(pe
, MVPP2_PRS_LU_VLAN
);
1978 /* Shift 8 bytes - skip 2 vlan tags */
1979 mvpp2_prs_sram_shift_set(pe
, 2 * MVPP2_VLAN_TAG_LEN
,
1980 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1981 mvpp2_prs_sram_ri_update(pe
, MVPP2_PRS_RI_VLAN_DOUBLE
,
1982 MVPP2_PRS_RI_VLAN_MASK
);
1983 mvpp2_prs_sram_ai_update(pe
, ai
| MVPP2_PRS_DBL_VLAN_AI_BIT
,
1984 MVPP2_PRS_SRAM_AI_MASK
);
1986 mvpp2_prs_shadow_set(priv
, pe
->index
, MVPP2_PRS_LU_VLAN
);
1989 /* Update ports' mask */
1990 mvpp2_prs_tcam_port_map_set(pe
, port_map
);
1991 mvpp2_prs_hw_write(priv
, pe
);
1998 /* IPv4 header parsing for fragmentation and L4 offset */
1999 static int mvpp2_prs_ip4_proto(struct mvpp2
*priv
, unsigned short proto
,
2000 unsigned int ri
, unsigned int ri_mask
)
2002 struct mvpp2_prs_entry pe
;
2005 if ((proto
!= IPPROTO_TCP
) && (proto
!= IPPROTO_UDP
) &&
2006 (proto
!= IPPROTO_IGMP
))
2009 /* Fragmented packet */
2010 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2011 MVPP2_PE_LAST_FREE_TID
);
2015 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2016 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2019 /* Set next lu to IPv4 */
2020 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2021 mvpp2_prs_sram_shift_set(&pe
, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2023 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L4
,
2024 sizeof(struct iphdr
) - 4,
2025 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2026 mvpp2_prs_sram_ai_update(&pe
, MVPP2_PRS_IPV4_DIP_AI_BIT
,
2027 MVPP2_PRS_IPV4_DIP_AI_BIT
);
2028 mvpp2_prs_sram_ri_update(&pe
, ri
| MVPP2_PRS_RI_IP_FRAG_MASK
,
2029 ri_mask
| MVPP2_PRS_RI_IP_FRAG_MASK
);
2031 mvpp2_prs_tcam_data_byte_set(&pe
, 5, proto
, MVPP2_PRS_TCAM_PROTO_MASK
);
2032 mvpp2_prs_tcam_ai_update(&pe
, 0, MVPP2_PRS_IPV4_DIP_AI_BIT
);
2033 /* Unmask all ports */
2034 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2036 /* Update shadow table and hw entry */
2037 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
2038 mvpp2_prs_hw_write(priv
, &pe
);
2040 /* Not fragmented packet */
2041 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2042 MVPP2_PE_LAST_FREE_TID
);
2047 /* Clear ri before updating */
2048 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_WORD
] = 0x0;
2049 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_CTRL_WORD
] = 0x0;
2050 mvpp2_prs_sram_ri_update(&pe
, ri
, ri_mask
);
2052 mvpp2_prs_tcam_data_byte_set(&pe
, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L
);
2053 mvpp2_prs_tcam_data_byte_set(&pe
, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK
);
2055 /* Update shadow table and hw entry */
2056 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
2057 mvpp2_prs_hw_write(priv
, &pe
);
2062 /* IPv4 L3 multicast or broadcast */
2063 static int mvpp2_prs_ip4_cast(struct mvpp2
*priv
, unsigned short l3_cast
)
2065 struct mvpp2_prs_entry pe
;
2068 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2069 MVPP2_PE_LAST_FREE_TID
);
2073 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2074 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2078 case MVPP2_PRS_L3_MULTI_CAST
:
2079 mvpp2_prs_tcam_data_byte_set(&pe
, 0, MVPP2_PRS_IPV4_MC
,
2080 MVPP2_PRS_IPV4_MC_MASK
);
2081 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_MCAST
,
2082 MVPP2_PRS_RI_L3_ADDR_MASK
);
2084 case MVPP2_PRS_L3_BROAD_CAST
:
2085 mask
= MVPP2_PRS_IPV4_BC_MASK
;
2086 mvpp2_prs_tcam_data_byte_set(&pe
, 0, mask
, mask
);
2087 mvpp2_prs_tcam_data_byte_set(&pe
, 1, mask
, mask
);
2088 mvpp2_prs_tcam_data_byte_set(&pe
, 2, mask
, mask
);
2089 mvpp2_prs_tcam_data_byte_set(&pe
, 3, mask
, mask
);
2090 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_BCAST
,
2091 MVPP2_PRS_RI_L3_ADDR_MASK
);
2097 /* Finished: go to flowid generation */
2098 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2099 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2101 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV4_DIP_AI_BIT
,
2102 MVPP2_PRS_IPV4_DIP_AI_BIT
);
2103 /* Unmask all ports */
2104 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2106 /* Update shadow table and hw entry */
2107 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
2108 mvpp2_prs_hw_write(priv
, &pe
);
2113 /* Set entries for protocols over IPv6 */
2114 static int mvpp2_prs_ip6_proto(struct mvpp2
*priv
, unsigned short proto
,
2115 unsigned int ri
, unsigned int ri_mask
)
2117 struct mvpp2_prs_entry pe
;
2120 if ((proto
!= IPPROTO_TCP
) && (proto
!= IPPROTO_UDP
) &&
2121 (proto
!= IPPROTO_ICMPV6
) && (proto
!= IPPROTO_IPIP
))
2124 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2125 MVPP2_PE_LAST_FREE_TID
);
2129 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2130 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2133 /* Finished: go to flowid generation */
2134 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2135 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2136 mvpp2_prs_sram_ri_update(&pe
, ri
, ri_mask
);
2137 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L4
,
2138 sizeof(struct ipv6hdr
) - 6,
2139 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2141 mvpp2_prs_tcam_data_byte_set(&pe
, 0, proto
, MVPP2_PRS_TCAM_PROTO_MASK
);
2142 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
2143 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
2144 /* Unmask all ports */
2145 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2148 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP6
);
2149 mvpp2_prs_hw_write(priv
, &pe
);
2154 /* IPv6 L3 multicast entry */
2155 static int mvpp2_prs_ip6_cast(struct mvpp2
*priv
, unsigned short l3_cast
)
2157 struct mvpp2_prs_entry pe
;
2160 if (l3_cast
!= MVPP2_PRS_L3_MULTI_CAST
)
2163 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2164 MVPP2_PE_LAST_FREE_TID
);
2168 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2169 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2172 /* Finished: go to flowid generation */
2173 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2174 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_MCAST
,
2175 MVPP2_PRS_RI_L3_ADDR_MASK
);
2176 mvpp2_prs_sram_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
2177 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
2178 /* Shift back to IPv6 NH */
2179 mvpp2_prs_sram_shift_set(&pe
, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2181 mvpp2_prs_tcam_data_byte_set(&pe
, 0, MVPP2_PRS_IPV6_MC
,
2182 MVPP2_PRS_IPV6_MC_MASK
);
2183 mvpp2_prs_tcam_ai_update(&pe
, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
2184 /* Unmask all ports */
2185 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2187 /* Update shadow table and hw entry */
2188 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP6
);
2189 mvpp2_prs_hw_write(priv
, &pe
);
2194 /* Parser per-port initialization */
2195 static void mvpp2_prs_hw_port_init(struct mvpp2
*priv
, int port
, int lu_first
,
2196 int lu_max
, int offset
)
2201 val
= mvpp2_read(priv
, MVPP2_PRS_INIT_LOOKUP_REG
);
2202 val
&= ~MVPP2_PRS_PORT_LU_MASK(port
);
2203 val
|= MVPP2_PRS_PORT_LU_VAL(port
, lu_first
);
2204 mvpp2_write(priv
, MVPP2_PRS_INIT_LOOKUP_REG
, val
);
2206 /* Set maximum number of loops for packet received from port */
2207 val
= mvpp2_read(priv
, MVPP2_PRS_MAX_LOOP_REG(port
));
2208 val
&= ~MVPP2_PRS_MAX_LOOP_MASK(port
);
2209 val
|= MVPP2_PRS_MAX_LOOP_VAL(port
, lu_max
);
2210 mvpp2_write(priv
, MVPP2_PRS_MAX_LOOP_REG(port
), val
);
2212 /* Set initial offset for packet header extraction for the first
2215 val
= mvpp2_read(priv
, MVPP2_PRS_INIT_OFFS_REG(port
));
2216 val
&= ~MVPP2_PRS_INIT_OFF_MASK(port
);
2217 val
|= MVPP2_PRS_INIT_OFF_VAL(port
, offset
);
2218 mvpp2_write(priv
, MVPP2_PRS_INIT_OFFS_REG(port
), val
);
2221 /* Default flow entries initialization for all ports */
2222 static void mvpp2_prs_def_flow_init(struct mvpp2
*priv
)
2224 struct mvpp2_prs_entry pe
;
2227 for (port
= 0; port
< MVPP2_MAX_PORTS
; port
++) {
2228 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2229 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2230 pe
.index
= MVPP2_PE_FIRST_DEFAULT_FLOW
- port
;
2232 /* Mask all ports */
2233 mvpp2_prs_tcam_port_map_set(&pe
, 0);
2236 mvpp2_prs_sram_ai_update(&pe
, port
, MVPP2_PRS_FLOW_ID_MASK
);
2237 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_DONE_BIT
, 1);
2239 /* Update shadow table and hw entry */
2240 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_FLOWS
);
2241 mvpp2_prs_hw_write(priv
, &pe
);
2245 /* Set default entry for Marvell Header field */
2246 static void mvpp2_prs_mh_init(struct mvpp2
*priv
)
2248 struct mvpp2_prs_entry pe
;
2250 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2252 pe
.index
= MVPP2_PE_MH_DEFAULT
;
2253 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MH
);
2254 mvpp2_prs_sram_shift_set(&pe
, MVPP2_MH_SIZE
,
2255 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2256 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
2258 /* Unmask all ports */
2259 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2261 /* Update shadow table and hw entry */
2262 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MH
);
2263 mvpp2_prs_hw_write(priv
, &pe
);
2266 /* Set default entires (place holder) for promiscuous, non-promiscuous and
2267 * multicast MAC addresses
2269 static void mvpp2_prs_mac_init(struct mvpp2
*priv
)
2271 struct mvpp2_prs_entry pe
;
2273 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2275 /* Non-promiscuous mode for all ports - DROP unknown packets */
2276 pe
.index
= MVPP2_PE_MAC_NON_PROMISCUOUS
;
2277 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
2279 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_DROP_MASK
,
2280 MVPP2_PRS_RI_DROP_MASK
);
2281 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2282 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2284 /* Unmask all ports */
2285 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2287 /* Update shadow table and hw entry */
2288 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
2289 mvpp2_prs_hw_write(priv
, &pe
);
2291 /* place holders only - no ports */
2292 mvpp2_prs_mac_drop_all_set(priv
, 0, false);
2293 mvpp2_prs_mac_promisc_set(priv
, 0, false);
2294 mvpp2_prs_mac_multi_set(priv
, MVPP2_PE_MAC_MC_ALL
, 0, false);
2295 mvpp2_prs_mac_multi_set(priv
, MVPP2_PE_MAC_MC_IP6
, 0, false);
2298 /* Set default entries for various types of dsa packets */
2299 static void mvpp2_prs_dsa_init(struct mvpp2
*priv
)
2301 struct mvpp2_prs_entry pe
;
2303 /* None tagged EDSA entry - place holder */
2304 mvpp2_prs_dsa_tag_set(priv
, 0, false, MVPP2_PRS_UNTAGGED
,
2307 /* Tagged EDSA entry - place holder */
2308 mvpp2_prs_dsa_tag_set(priv
, 0, false, MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
2310 /* None tagged DSA entry - place holder */
2311 mvpp2_prs_dsa_tag_set(priv
, 0, false, MVPP2_PRS_UNTAGGED
,
2314 /* Tagged DSA entry - place holder */
2315 mvpp2_prs_dsa_tag_set(priv
, 0, false, MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
2317 /* None tagged EDSA ethertype entry - place holder*/
2318 mvpp2_prs_dsa_tag_ethertype_set(priv
, 0, false,
2319 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_EDSA
);
2321 /* Tagged EDSA ethertype entry - place holder*/
2322 mvpp2_prs_dsa_tag_ethertype_set(priv
, 0, false,
2323 MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
2325 /* None tagged DSA ethertype entry */
2326 mvpp2_prs_dsa_tag_ethertype_set(priv
, 0, true,
2327 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_DSA
);
2329 /* Tagged DSA ethertype entry */
2330 mvpp2_prs_dsa_tag_ethertype_set(priv
, 0, true,
2331 MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
2333 /* Set default entry, in case DSA or EDSA tag not found */
2334 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2335 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
2336 pe
.index
= MVPP2_PE_DSA_DEFAULT
;
2337 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
2340 mvpp2_prs_sram_shift_set(&pe
, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2341 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
2343 /* Clear all sram ai bits for next iteration */
2344 mvpp2_prs_sram_ai_update(&pe
, 0, MVPP2_PRS_SRAM_AI_MASK
);
2346 /* Unmask all ports */
2347 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2349 mvpp2_prs_hw_write(priv
, &pe
);
2352 /* Match basic ethertypes */
2353 static int mvpp2_prs_etype_init(struct mvpp2
*priv
)
2355 struct mvpp2_prs_entry pe
;
2358 /* Ethertype: PPPoE */
2359 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2360 MVPP2_PE_LAST_FREE_TID
);
2364 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2365 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2368 mvpp2_prs_match_etype(&pe
, 0, ETH_P_PPP_SES
);
2370 mvpp2_prs_sram_shift_set(&pe
, MVPP2_PPPOE_HDR_SIZE
,
2371 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2372 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_PPPOE
);
2373 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_PPPOE_MASK
,
2374 MVPP2_PRS_RI_PPPOE_MASK
);
2376 /* Update shadow table and hw entry */
2377 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2378 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2379 priv
->prs_shadow
[pe
.index
].finish
= false;
2380 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_PPPOE_MASK
,
2381 MVPP2_PRS_RI_PPPOE_MASK
);
2382 mvpp2_prs_hw_write(priv
, &pe
);
2384 /* Ethertype: ARP */
2385 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2386 MVPP2_PE_LAST_FREE_TID
);
2390 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2391 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2394 mvpp2_prs_match_etype(&pe
, 0, ETH_P_ARP
);
2396 /* Generate flow in the next iteration*/
2397 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2398 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2399 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_ARP
,
2400 MVPP2_PRS_RI_L3_PROTO_MASK
);
2402 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2404 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2406 /* Update shadow table and hw entry */
2407 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2408 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2409 priv
->prs_shadow
[pe
.index
].finish
= true;
2410 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_ARP
,
2411 MVPP2_PRS_RI_L3_PROTO_MASK
);
2412 mvpp2_prs_hw_write(priv
, &pe
);
2414 /* Ethertype: LBTD */
2415 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2416 MVPP2_PE_LAST_FREE_TID
);
2420 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2421 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2424 mvpp2_prs_match_etype(&pe
, 0, MVPP2_IP_LBDT_TYPE
);
2426 /* Generate flow in the next iteration*/
2427 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2428 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2429 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_CPU_CODE_RX_SPEC
|
2430 MVPP2_PRS_RI_UDF3_RX_SPECIAL
,
2431 MVPP2_PRS_RI_CPU_CODE_MASK
|
2432 MVPP2_PRS_RI_UDF3_MASK
);
2434 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2436 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2438 /* Update shadow table and hw entry */
2439 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2440 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2441 priv
->prs_shadow
[pe
.index
].finish
= true;
2442 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_CPU_CODE_RX_SPEC
|
2443 MVPP2_PRS_RI_UDF3_RX_SPECIAL
,
2444 MVPP2_PRS_RI_CPU_CODE_MASK
|
2445 MVPP2_PRS_RI_UDF3_MASK
);
2446 mvpp2_prs_hw_write(priv
, &pe
);
2448 /* Ethertype: IPv4 without options */
2449 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2450 MVPP2_PE_LAST_FREE_TID
);
2454 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2455 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2458 mvpp2_prs_match_etype(&pe
, 0, ETH_P_IP
);
2459 mvpp2_prs_tcam_data_byte_set(&pe
, MVPP2_ETH_TYPE_LEN
,
2460 MVPP2_PRS_IPV4_HEAD
| MVPP2_PRS_IPV4_IHL
,
2461 MVPP2_PRS_IPV4_HEAD_MASK
|
2462 MVPP2_PRS_IPV4_IHL_MASK
);
2464 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2465 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP4
,
2466 MVPP2_PRS_RI_L3_PROTO_MASK
);
2467 /* Skip eth_type + 4 bytes of IP header */
2468 mvpp2_prs_sram_shift_set(&pe
, MVPP2_ETH_TYPE_LEN
+ 4,
2469 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2471 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2473 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2475 /* Update shadow table and hw entry */
2476 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2477 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2478 priv
->prs_shadow
[pe
.index
].finish
= false;
2479 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_IP4
,
2480 MVPP2_PRS_RI_L3_PROTO_MASK
);
2481 mvpp2_prs_hw_write(priv
, &pe
);
2483 /* Ethertype: IPv4 with options */
2484 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2485 MVPP2_PE_LAST_FREE_TID
);
2491 /* Clear tcam data before updating */
2492 pe
.tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN
)] = 0x0;
2493 pe
.tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN
)] = 0x0;
2495 mvpp2_prs_tcam_data_byte_set(&pe
, MVPP2_ETH_TYPE_LEN
,
2496 MVPP2_PRS_IPV4_HEAD
,
2497 MVPP2_PRS_IPV4_HEAD_MASK
);
2499 /* Clear ri before updating */
2500 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_WORD
] = 0x0;
2501 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_CTRL_WORD
] = 0x0;
2502 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP4_OPT
,
2503 MVPP2_PRS_RI_L3_PROTO_MASK
);
2505 /* Update shadow table and hw entry */
2506 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2507 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2508 priv
->prs_shadow
[pe
.index
].finish
= false;
2509 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_IP4_OPT
,
2510 MVPP2_PRS_RI_L3_PROTO_MASK
);
2511 mvpp2_prs_hw_write(priv
, &pe
);
2513 /* Ethertype: IPv6 without options */
2514 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2515 MVPP2_PE_LAST_FREE_TID
);
2519 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2520 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2523 mvpp2_prs_match_etype(&pe
, 0, ETH_P_IPV6
);
2525 /* Skip DIP of IPV6 header */
2526 mvpp2_prs_sram_shift_set(&pe
, MVPP2_ETH_TYPE_LEN
+ 8 +
2527 MVPP2_MAX_L3_ADDR_SIZE
,
2528 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2529 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2530 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP6
,
2531 MVPP2_PRS_RI_L3_PROTO_MASK
);
2533 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2535 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2537 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2538 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2539 priv
->prs_shadow
[pe
.index
].finish
= false;
2540 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_IP6
,
2541 MVPP2_PRS_RI_L3_PROTO_MASK
);
2542 mvpp2_prs_hw_write(priv
, &pe
);
2544 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2545 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2546 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2547 pe
.index
= MVPP2_PE_ETH_TYPE_UN
;
2549 /* Unmask all ports */
2550 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2552 /* Generate flow in the next iteration*/
2553 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2554 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2555 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UN
,
2556 MVPP2_PRS_RI_L3_PROTO_MASK
);
2557 /* Set L3 offset even it's unknown L3 */
2558 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2560 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2562 /* Update shadow table and hw entry */
2563 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2564 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2565 priv
->prs_shadow
[pe
.index
].finish
= true;
2566 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_UN
,
2567 MVPP2_PRS_RI_L3_PROTO_MASK
);
2568 mvpp2_prs_hw_write(priv
, &pe
);
2573 /* Configure vlan entries and detect up to 2 successive VLAN tags.
2580 static int mvpp2_prs_vlan_init(struct platform_device
*pdev
, struct mvpp2
*priv
)
2582 struct mvpp2_prs_entry pe
;
2585 priv
->prs_double_vlans
= devm_kcalloc(&pdev
->dev
, sizeof(bool),
2586 MVPP2_PRS_DBL_VLANS_MAX
,
2588 if (!priv
->prs_double_vlans
)
2591 /* Double VLAN: 0x8100, 0x88A8 */
2592 err
= mvpp2_prs_double_vlan_add(priv
, ETH_P_8021Q
, ETH_P_8021AD
,
2593 MVPP2_PRS_PORT_MASK
);
2597 /* Double VLAN: 0x8100, 0x8100 */
2598 err
= mvpp2_prs_double_vlan_add(priv
, ETH_P_8021Q
, ETH_P_8021Q
,
2599 MVPP2_PRS_PORT_MASK
);
2603 /* Single VLAN: 0x88a8 */
2604 err
= mvpp2_prs_vlan_add(priv
, ETH_P_8021AD
, MVPP2_PRS_SINGLE_VLAN_AI
,
2605 MVPP2_PRS_PORT_MASK
);
2609 /* Single VLAN: 0x8100 */
2610 err
= mvpp2_prs_vlan_add(priv
, ETH_P_8021Q
, MVPP2_PRS_SINGLE_VLAN_AI
,
2611 MVPP2_PRS_PORT_MASK
);
2615 /* Set default double vlan entry */
2616 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2617 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
2618 pe
.index
= MVPP2_PE_VLAN_DBL
;
2620 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2621 /* Clear ai for next iterations */
2622 mvpp2_prs_sram_ai_update(&pe
, 0, MVPP2_PRS_SRAM_AI_MASK
);
2623 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_DOUBLE
,
2624 MVPP2_PRS_RI_VLAN_MASK
);
2626 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_DBL_VLAN_AI_BIT
,
2627 MVPP2_PRS_DBL_VLAN_AI_BIT
);
2628 /* Unmask all ports */
2629 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2631 /* Update shadow table and hw entry */
2632 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_VLAN
);
2633 mvpp2_prs_hw_write(priv
, &pe
);
2635 /* Set default vlan none entry */
2636 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2637 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
2638 pe
.index
= MVPP2_PE_VLAN_NONE
;
2640 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2641 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_NONE
,
2642 MVPP2_PRS_RI_VLAN_MASK
);
2644 /* Unmask all ports */
2645 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2647 /* Update shadow table and hw entry */
2648 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_VLAN
);
2649 mvpp2_prs_hw_write(priv
, &pe
);
2654 /* Set entries for PPPoE ethertype */
2655 static int mvpp2_prs_pppoe_init(struct mvpp2
*priv
)
2657 struct mvpp2_prs_entry pe
;
2660 /* IPv4 over PPPoE with options */
2661 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2662 MVPP2_PE_LAST_FREE_TID
);
2666 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2667 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_PPPOE
);
2670 mvpp2_prs_match_etype(&pe
, 0, PPP_IP
);
2672 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2673 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP4_OPT
,
2674 MVPP2_PRS_RI_L3_PROTO_MASK
);
2675 /* Skip eth_type + 4 bytes of IP header */
2676 mvpp2_prs_sram_shift_set(&pe
, MVPP2_ETH_TYPE_LEN
+ 4,
2677 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2679 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2681 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2683 /* Update shadow table and hw entry */
2684 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_PPPOE
);
2685 mvpp2_prs_hw_write(priv
, &pe
);
2687 /* IPv4 over PPPoE without options */
2688 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2689 MVPP2_PE_LAST_FREE_TID
);
2695 mvpp2_prs_tcam_data_byte_set(&pe
, MVPP2_ETH_TYPE_LEN
,
2696 MVPP2_PRS_IPV4_HEAD
| MVPP2_PRS_IPV4_IHL
,
2697 MVPP2_PRS_IPV4_HEAD_MASK
|
2698 MVPP2_PRS_IPV4_IHL_MASK
);
2700 /* Clear ri before updating */
2701 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_WORD
] = 0x0;
2702 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_CTRL_WORD
] = 0x0;
2703 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP4
,
2704 MVPP2_PRS_RI_L3_PROTO_MASK
);
2706 /* Update shadow table and hw entry */
2707 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_PPPOE
);
2708 mvpp2_prs_hw_write(priv
, &pe
);
2710 /* IPv6 over PPPoE */
2711 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2712 MVPP2_PE_LAST_FREE_TID
);
2716 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2717 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_PPPOE
);
2720 mvpp2_prs_match_etype(&pe
, 0, PPP_IPV6
);
2722 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2723 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP6
,
2724 MVPP2_PRS_RI_L3_PROTO_MASK
);
2725 /* Skip eth_type + 4 bytes of IPv6 header */
2726 mvpp2_prs_sram_shift_set(&pe
, MVPP2_ETH_TYPE_LEN
+ 4,
2727 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2729 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2731 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2733 /* Update shadow table and hw entry */
2734 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_PPPOE
);
2735 mvpp2_prs_hw_write(priv
, &pe
);
2737 /* Non-IP over PPPoE */
2738 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2739 MVPP2_PE_LAST_FREE_TID
);
2743 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2744 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_PPPOE
);
2747 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UN
,
2748 MVPP2_PRS_RI_L3_PROTO_MASK
);
2750 /* Finished: go to flowid generation */
2751 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2752 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2753 /* Set L3 offset even if it's unknown L3 */
2754 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2756 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2758 /* Update shadow table and hw entry */
2759 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_PPPOE
);
2760 mvpp2_prs_hw_write(priv
, &pe
);
2765 /* Initialize entries for IPv4 */
2766 static int mvpp2_prs_ip4_init(struct mvpp2
*priv
)
2768 struct mvpp2_prs_entry pe
;
2771 /* Set entries for TCP, UDP and IGMP over IPv4 */
2772 err
= mvpp2_prs_ip4_proto(priv
, IPPROTO_TCP
, MVPP2_PRS_RI_L4_TCP
,
2773 MVPP2_PRS_RI_L4_PROTO_MASK
);
2777 err
= mvpp2_prs_ip4_proto(priv
, IPPROTO_UDP
, MVPP2_PRS_RI_L4_UDP
,
2778 MVPP2_PRS_RI_L4_PROTO_MASK
);
2782 err
= mvpp2_prs_ip4_proto(priv
, IPPROTO_IGMP
,
2783 MVPP2_PRS_RI_CPU_CODE_RX_SPEC
|
2784 MVPP2_PRS_RI_UDF3_RX_SPECIAL
,
2785 MVPP2_PRS_RI_CPU_CODE_MASK
|
2786 MVPP2_PRS_RI_UDF3_MASK
);
2790 /* IPv4 Broadcast */
2791 err
= mvpp2_prs_ip4_cast(priv
, MVPP2_PRS_L3_BROAD_CAST
);
2795 /* IPv4 Multicast */
2796 err
= mvpp2_prs_ip4_cast(priv
, MVPP2_PRS_L3_MULTI_CAST
);
2800 /* Default IPv4 entry for unknown protocols */
2801 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2802 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2803 pe
.index
= MVPP2_PE_IP4_PROTO_UN
;
2805 /* Set next lu to IPv4 */
2806 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2807 mvpp2_prs_sram_shift_set(&pe
, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2809 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L4
,
2810 sizeof(struct iphdr
) - 4,
2811 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2812 mvpp2_prs_sram_ai_update(&pe
, MVPP2_PRS_IPV4_DIP_AI_BIT
,
2813 MVPP2_PRS_IPV4_DIP_AI_BIT
);
2814 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L4_OTHER
,
2815 MVPP2_PRS_RI_L4_PROTO_MASK
);
2817 mvpp2_prs_tcam_ai_update(&pe
, 0, MVPP2_PRS_IPV4_DIP_AI_BIT
);
2818 /* Unmask all ports */
2819 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2821 /* Update shadow table and hw entry */
2822 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
2823 mvpp2_prs_hw_write(priv
, &pe
);
2825 /* Default IPv4 entry for unicast address */
2826 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2827 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2828 pe
.index
= MVPP2_PE_IP4_ADDR_UN
;
2830 /* Finished: go to flowid generation */
2831 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2832 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2833 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UCAST
,
2834 MVPP2_PRS_RI_L3_ADDR_MASK
);
2836 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV4_DIP_AI_BIT
,
2837 MVPP2_PRS_IPV4_DIP_AI_BIT
);
2838 /* Unmask all ports */
2839 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2841 /* Update shadow table and hw entry */
2842 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
2843 mvpp2_prs_hw_write(priv
, &pe
);
2848 /* Initialize entries for IPv6 */
2849 static int mvpp2_prs_ip6_init(struct mvpp2
*priv
)
2851 struct mvpp2_prs_entry pe
;
2854 /* Set entries for TCP, UDP and ICMP over IPv6 */
2855 err
= mvpp2_prs_ip6_proto(priv
, IPPROTO_TCP
,
2856 MVPP2_PRS_RI_L4_TCP
,
2857 MVPP2_PRS_RI_L4_PROTO_MASK
);
2861 err
= mvpp2_prs_ip6_proto(priv
, IPPROTO_UDP
,
2862 MVPP2_PRS_RI_L4_UDP
,
2863 MVPP2_PRS_RI_L4_PROTO_MASK
);
2867 err
= mvpp2_prs_ip6_proto(priv
, IPPROTO_ICMPV6
,
2868 MVPP2_PRS_RI_CPU_CODE_RX_SPEC
|
2869 MVPP2_PRS_RI_UDF3_RX_SPECIAL
,
2870 MVPP2_PRS_RI_CPU_CODE_MASK
|
2871 MVPP2_PRS_RI_UDF3_MASK
);
2875 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
2876 /* Result Info: UDF7=1, DS lite */
2877 err
= mvpp2_prs_ip6_proto(priv
, IPPROTO_IPIP
,
2878 MVPP2_PRS_RI_UDF7_IP6_LITE
,
2879 MVPP2_PRS_RI_UDF7_MASK
);
2883 /* IPv6 multicast */
2884 err
= mvpp2_prs_ip6_cast(priv
, MVPP2_PRS_L3_MULTI_CAST
);
2888 /* Entry for checking hop limit */
2889 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2890 MVPP2_PE_LAST_FREE_TID
);
2894 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2895 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2898 /* Finished: go to flowid generation */
2899 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2900 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2901 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UN
|
2902 MVPP2_PRS_RI_DROP_MASK
,
2903 MVPP2_PRS_RI_L3_PROTO_MASK
|
2904 MVPP2_PRS_RI_DROP_MASK
);
2906 mvpp2_prs_tcam_data_byte_set(&pe
, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK
);
2907 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
2908 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
2910 /* Update shadow table and hw entry */
2911 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
2912 mvpp2_prs_hw_write(priv
, &pe
);
2914 /* Default IPv6 entry for unknown protocols */
2915 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2916 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2917 pe
.index
= MVPP2_PE_IP6_PROTO_UN
;
2919 /* Finished: go to flowid generation */
2920 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2921 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2922 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L4_OTHER
,
2923 MVPP2_PRS_RI_L4_PROTO_MASK
);
2924 /* Set L4 offset relatively to our current place */
2925 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L4
,
2926 sizeof(struct ipv6hdr
) - 4,
2927 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2929 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
2930 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
2931 /* Unmask all ports */
2932 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2934 /* Update shadow table and hw entry */
2935 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
2936 mvpp2_prs_hw_write(priv
, &pe
);
2938 /* Default IPv6 entry for unknown ext protocols */
2939 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2940 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2941 pe
.index
= MVPP2_PE_IP6_EXT_PROTO_UN
;
2943 /* Finished: go to flowid generation */
2944 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2945 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2946 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L4_OTHER
,
2947 MVPP2_PRS_RI_L4_PROTO_MASK
);
2949 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV6_EXT_AI_BIT
,
2950 MVPP2_PRS_IPV6_EXT_AI_BIT
);
2951 /* Unmask all ports */
2952 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2954 /* Update shadow table and hw entry */
2955 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
2956 mvpp2_prs_hw_write(priv
, &pe
);
2958 /* Default IPv6 entry for unicast address */
2959 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2960 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2961 pe
.index
= MVPP2_PE_IP6_ADDR_UN
;
2963 /* Finished: go to IPv6 again */
2964 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2965 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UCAST
,
2966 MVPP2_PRS_RI_L3_ADDR_MASK
);
2967 mvpp2_prs_sram_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
2968 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
2969 /* Shift back to IPV6 NH */
2970 mvpp2_prs_sram_shift_set(&pe
, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2972 mvpp2_prs_tcam_ai_update(&pe
, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
2973 /* Unmask all ports */
2974 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2976 /* Update shadow table and hw entry */
2977 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP6
);
2978 mvpp2_prs_hw_write(priv
, &pe
);
2983 /* Parser default initialization */
2984 static int mvpp2_prs_default_init(struct platform_device
*pdev
,
2989 /* Enable tcam table */
2990 mvpp2_write(priv
, MVPP2_PRS_TCAM_CTRL_REG
, MVPP2_PRS_TCAM_EN_MASK
);
2992 /* Clear all tcam and sram entries */
2993 for (index
= 0; index
< MVPP2_PRS_TCAM_SRAM_SIZE
; index
++) {
2994 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, index
);
2995 for (i
= 0; i
< MVPP2_PRS_TCAM_WORDS
; i
++)
2996 mvpp2_write(priv
, MVPP2_PRS_TCAM_DATA_REG(i
), 0);
2998 mvpp2_write(priv
, MVPP2_PRS_SRAM_IDX_REG
, index
);
2999 for (i
= 0; i
< MVPP2_PRS_SRAM_WORDS
; i
++)
3000 mvpp2_write(priv
, MVPP2_PRS_SRAM_DATA_REG(i
), 0);
3003 /* Invalidate all tcam entries */
3004 for (index
= 0; index
< MVPP2_PRS_TCAM_SRAM_SIZE
; index
++)
3005 mvpp2_prs_hw_inv(priv
, index
);
3007 priv
->prs_shadow
= devm_kcalloc(&pdev
->dev
, MVPP2_PRS_TCAM_SRAM_SIZE
,
3008 sizeof(struct mvpp2_prs_shadow
),
3010 if (!priv
->prs_shadow
)
3013 /* Always start from lookup = 0 */
3014 for (index
= 0; index
< MVPP2_MAX_PORTS
; index
++)
3015 mvpp2_prs_hw_port_init(priv
, index
, MVPP2_PRS_LU_MH
,
3016 MVPP2_PRS_PORT_LU_MAX
, 0);
3018 mvpp2_prs_def_flow_init(priv
);
3020 mvpp2_prs_mh_init(priv
);
3022 mvpp2_prs_mac_init(priv
);
3024 mvpp2_prs_dsa_init(priv
);
3026 err
= mvpp2_prs_etype_init(priv
);
3030 err
= mvpp2_prs_vlan_init(pdev
, priv
);
3034 err
= mvpp2_prs_pppoe_init(priv
);
3038 err
= mvpp2_prs_ip6_init(priv
);
3042 err
= mvpp2_prs_ip4_init(priv
);
3049 /* Compare MAC DA with tcam entry data */
3050 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry
*pe
,
3051 const u8
*da
, unsigned char *mask
)
3053 unsigned char tcam_byte
, tcam_mask
;
3056 for (index
= 0; index
< ETH_ALEN
; index
++) {
3057 mvpp2_prs_tcam_data_byte_get(pe
, index
, &tcam_byte
, &tcam_mask
);
3058 if (tcam_mask
!= mask
[index
])
3061 if ((tcam_mask
& tcam_byte
) != (da
[index
] & mask
[index
]))
3068 /* Find tcam entry with matched pair <MAC DA, port> */
3069 static struct mvpp2_prs_entry
*
3070 mvpp2_prs_mac_da_range_find(struct mvpp2
*priv
, int pmap
, const u8
*da
,
3071 unsigned char *mask
, int udf_type
)
3073 struct mvpp2_prs_entry
*pe
;
3076 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
3079 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_MAC
);
3081 /* Go through the all entires with MVPP2_PRS_LU_MAC */
3082 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
3083 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++) {
3084 unsigned int entry_pmap
;
3086 if (!priv
->prs_shadow
[tid
].valid
||
3087 (priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_MAC
) ||
3088 (priv
->prs_shadow
[tid
].udf
!= udf_type
))
3092 mvpp2_prs_hw_read(priv
, pe
);
3093 entry_pmap
= mvpp2_prs_tcam_port_map_get(pe
);
3095 if (mvpp2_prs_mac_range_equals(pe
, da
, mask
) &&
3104 /* Update parser's mac da entry */
3105 static int mvpp2_prs_mac_da_accept(struct mvpp2
*priv
, int port
,
3106 const u8
*da
, bool add
)
3108 struct mvpp2_prs_entry
*pe
;
3109 unsigned int pmap
, len
, ri
;
3110 unsigned char mask
[ETH_ALEN
] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3113 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3114 pe
= mvpp2_prs_mac_da_range_find(priv
, (1 << port
), da
, mask
,
3115 MVPP2_PRS_UDF_MAC_DEF
);
3122 /* Create new TCAM entry */
3123 /* Find first range mac entry*/
3124 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
3125 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++)
3126 if (priv
->prs_shadow
[tid
].valid
&&
3127 (priv
->prs_shadow
[tid
].lu
== MVPP2_PRS_LU_MAC
) &&
3128 (priv
->prs_shadow
[tid
].udf
==
3129 MVPP2_PRS_UDF_MAC_RANGE
))
3132 /* Go through the all entries from first to last */
3133 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
3138 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
3141 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_MAC
);
3144 /* Mask all ports */
3145 mvpp2_prs_tcam_port_map_set(pe
, 0);
3148 /* Update port mask */
3149 mvpp2_prs_tcam_port_set(pe
, port
, add
);
3151 /* Invalidate the entry if no ports are left enabled */
3152 pmap
= mvpp2_prs_tcam_port_map_get(pe
);
3158 mvpp2_prs_hw_inv(priv
, pe
->index
);
3159 priv
->prs_shadow
[pe
->index
].valid
= false;
3164 /* Continue - set next lookup */
3165 mvpp2_prs_sram_next_lu_set(pe
, MVPP2_PRS_LU_DSA
);
3167 /* Set match on DA */
3170 mvpp2_prs_tcam_data_byte_set(pe
, len
, da
[len
], 0xff);
3172 /* Set result info bits */
3173 if (is_broadcast_ether_addr(da
))
3174 ri
= MVPP2_PRS_RI_L2_BCAST
;
3175 else if (is_multicast_ether_addr(da
))
3176 ri
= MVPP2_PRS_RI_L2_MCAST
;
3178 ri
= MVPP2_PRS_RI_L2_UCAST
| MVPP2_PRS_RI_MAC_ME_MASK
;
3180 mvpp2_prs_sram_ri_update(pe
, ri
, MVPP2_PRS_RI_L2_CAST_MASK
|
3181 MVPP2_PRS_RI_MAC_ME_MASK
);
3182 mvpp2_prs_shadow_ri_set(priv
, pe
->index
, ri
, MVPP2_PRS_RI_L2_CAST_MASK
|
3183 MVPP2_PRS_RI_MAC_ME_MASK
);
3185 /* Shift to ethertype */
3186 mvpp2_prs_sram_shift_set(pe
, 2 * ETH_ALEN
,
3187 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
3189 /* Update shadow table and hw entry */
3190 priv
->prs_shadow
[pe
->index
].udf
= MVPP2_PRS_UDF_MAC_DEF
;
3191 mvpp2_prs_shadow_set(priv
, pe
->index
, MVPP2_PRS_LU_MAC
);
3192 mvpp2_prs_hw_write(priv
, pe
);
3199 static int mvpp2_prs_update_mac_da(struct net_device
*dev
, const u8
*da
)
3201 struct mvpp2_port
*port
= netdev_priv(dev
);
3204 /* Remove old parser entry */
3205 err
= mvpp2_prs_mac_da_accept(port
->priv
, port
->id
, dev
->dev_addr
,
3210 /* Add new parser entry */
3211 err
= mvpp2_prs_mac_da_accept(port
->priv
, port
->id
, da
, true);
3215 /* Set addr in the device */
3216 ether_addr_copy(dev
->dev_addr
, da
);
3221 /* Delete all port's multicast simple (not range) entries */
3222 static void mvpp2_prs_mcast_del_all(struct mvpp2
*priv
, int port
)
3224 struct mvpp2_prs_entry pe
;
3227 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
3228 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++) {
3229 unsigned char da
[ETH_ALEN
], da_mask
[ETH_ALEN
];
3231 if (!priv
->prs_shadow
[tid
].valid
||
3232 (priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_MAC
) ||
3233 (priv
->prs_shadow
[tid
].udf
!= MVPP2_PRS_UDF_MAC_DEF
))
3236 /* Only simple mac entries */
3238 mvpp2_prs_hw_read(priv
, &pe
);
3240 /* Read mac addr from entry */
3241 for (index
= 0; index
< ETH_ALEN
; index
++)
3242 mvpp2_prs_tcam_data_byte_get(&pe
, index
, &da
[index
],
3245 if (is_multicast_ether_addr(da
) && !is_broadcast_ether_addr(da
))
3246 /* Delete this entry */
3247 mvpp2_prs_mac_da_accept(priv
, port
, da
, false);
3251 static int mvpp2_prs_tag_mode_set(struct mvpp2
*priv
, int port
, int type
)
3254 case MVPP2_TAG_TYPE_EDSA
:
3255 /* Add port to EDSA entries */
3256 mvpp2_prs_dsa_tag_set(priv
, port
, true,
3257 MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
3258 mvpp2_prs_dsa_tag_set(priv
, port
, true,
3259 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_EDSA
);
3260 /* Remove port from DSA entries */
3261 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3262 MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
3263 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3264 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_DSA
);
3267 case MVPP2_TAG_TYPE_DSA
:
3268 /* Add port to DSA entries */
3269 mvpp2_prs_dsa_tag_set(priv
, port
, true,
3270 MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
3271 mvpp2_prs_dsa_tag_set(priv
, port
, true,
3272 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_DSA
);
3273 /* Remove port from EDSA entries */
3274 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3275 MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
3276 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3277 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_EDSA
);
3280 case MVPP2_TAG_TYPE_MH
:
3281 case MVPP2_TAG_TYPE_NONE
:
3282 /* Remove port form EDSA and DSA entries */
3283 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3284 MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
3285 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3286 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_DSA
);
3287 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3288 MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
3289 mvpp2_prs_dsa_tag_set(priv
, port
, false,
3290 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_EDSA
);
3294 if ((type
< 0) || (type
> MVPP2_TAG_TYPE_EDSA
))
3301 /* Set prs flow for the port */
3302 static int mvpp2_prs_def_flow(struct mvpp2_port
*port
)
3304 struct mvpp2_prs_entry
*pe
;
3307 pe
= mvpp2_prs_flow_find(port
->priv
, port
->id
);
3309 /* Such entry not exist */
3311 /* Go through the all entires from last to first */
3312 tid
= mvpp2_prs_tcam_first_free(port
->priv
,
3313 MVPP2_PE_LAST_FREE_TID
,
3314 MVPP2_PE_FIRST_FREE_TID
);
3318 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
3322 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_FLOWS
);
3326 mvpp2_prs_sram_ai_update(pe
, port
->id
, MVPP2_PRS_FLOW_ID_MASK
);
3327 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_LU_DONE_BIT
, 1);
3329 /* Update shadow table */
3330 mvpp2_prs_shadow_set(port
->priv
, pe
->index
, MVPP2_PRS_LU_FLOWS
);
3333 mvpp2_prs_tcam_port_map_set(pe
, (1 << port
->id
));
3334 mvpp2_prs_hw_write(port
->priv
, pe
);
3340 /* Classifier configuration routines */
3342 /* Update classification flow table registers */
3343 static void mvpp2_cls_flow_write(struct mvpp2
*priv
,
3344 struct mvpp2_cls_flow_entry
*fe
)
3346 mvpp2_write(priv
, MVPP2_CLS_FLOW_INDEX_REG
, fe
->index
);
3347 mvpp2_write(priv
, MVPP2_CLS_FLOW_TBL0_REG
, fe
->data
[0]);
3348 mvpp2_write(priv
, MVPP2_CLS_FLOW_TBL1_REG
, fe
->data
[1]);
3349 mvpp2_write(priv
, MVPP2_CLS_FLOW_TBL2_REG
, fe
->data
[2]);
3352 /* Update classification lookup table register */
3353 static void mvpp2_cls_lookup_write(struct mvpp2
*priv
,
3354 struct mvpp2_cls_lookup_entry
*le
)
3358 val
= (le
->way
<< MVPP2_CLS_LKP_INDEX_WAY_OFFS
) | le
->lkpid
;
3359 mvpp2_write(priv
, MVPP2_CLS_LKP_INDEX_REG
, val
);
3360 mvpp2_write(priv
, MVPP2_CLS_LKP_TBL_REG
, le
->data
);
3363 /* Classifier default initialization */
3364 static void mvpp2_cls_init(struct mvpp2
*priv
)
3366 struct mvpp2_cls_lookup_entry le
;
3367 struct mvpp2_cls_flow_entry fe
;
3370 /* Enable classifier */
3371 mvpp2_write(priv
, MVPP2_CLS_MODE_REG
, MVPP2_CLS_MODE_ACTIVE_MASK
);
3373 /* Clear classifier flow table */
3374 memset(&fe
.data
, 0, sizeof(fe
.data
));
3375 for (index
= 0; index
< MVPP2_CLS_FLOWS_TBL_SIZE
; index
++) {
3377 mvpp2_cls_flow_write(priv
, &fe
);
3380 /* Clear classifier lookup table */
3382 for (index
= 0; index
< MVPP2_CLS_LKP_TBL_SIZE
; index
++) {
3385 mvpp2_cls_lookup_write(priv
, &le
);
3388 mvpp2_cls_lookup_write(priv
, &le
);
3392 static void mvpp2_cls_port_config(struct mvpp2_port
*port
)
3394 struct mvpp2_cls_lookup_entry le
;
3397 /* Set way for the port */
3398 val
= mvpp2_read(port
->priv
, MVPP2_CLS_PORT_WAY_REG
);
3399 val
&= ~MVPP2_CLS_PORT_WAY_MASK(port
->id
);
3400 mvpp2_write(port
->priv
, MVPP2_CLS_PORT_WAY_REG
, val
);
3402 /* Pick the entry to be accessed in lookup ID decoding table
3403 * according to the way and lkpid.
3405 le
.lkpid
= port
->id
;
3409 /* Set initial CPU queue for receiving packets */
3410 le
.data
&= ~MVPP2_CLS_LKP_TBL_RXQ_MASK
;
3411 le
.data
|= port
->first_rxq
;
3413 /* Disable classification engines */
3414 le
.data
&= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK
;
3416 /* Update lookup ID table entry */
3417 mvpp2_cls_lookup_write(port
->priv
, &le
);
3420 /* Set CPU queue number for oversize packets */
3421 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port
*port
)
3425 mvpp2_write(port
->priv
, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port
->id
),
3426 port
->first_rxq
& MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK
);
3428 mvpp2_write(port
->priv
, MVPP2_CLS_SWFWD_P2HQ_REG(port
->id
),
3429 (port
->first_rxq
>> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS
));
3431 val
= mvpp2_read(port
->priv
, MVPP2_CLS_SWFWD_PCTRL_REG
);
3432 val
|= MVPP2_CLS_SWFWD_PCTRL_MASK(port
->id
);
3433 mvpp2_write(port
->priv
, MVPP2_CLS_SWFWD_PCTRL_REG
, val
);
3436 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool
*pool
)
3438 if (likely(pool
->frag_size
<= PAGE_SIZE
))
3439 return netdev_alloc_frag(pool
->frag_size
);
3441 return kmalloc(pool
->frag_size
, GFP_ATOMIC
);
3444 static void mvpp2_frag_free(const struct mvpp2_bm_pool
*pool
, void *data
)
3446 if (likely(pool
->frag_size
<= PAGE_SIZE
))
3447 skb_free_frag(data
);
3452 /* Buffer Manager configuration routines */
3455 static int mvpp2_bm_pool_create(struct platform_device
*pdev
,
3457 struct mvpp2_bm_pool
*bm_pool
, int size
)
3462 size_bytes
= sizeof(u32
) * size
;
3463 bm_pool
->virt_addr
= dma_alloc_coherent(&pdev
->dev
, size_bytes
,
3466 if (!bm_pool
->virt_addr
)
3469 if (!IS_ALIGNED((unsigned long)bm_pool
->virt_addr
,
3470 MVPP2_BM_POOL_PTR_ALIGN
)) {
3471 dma_free_coherent(&pdev
->dev
, size_bytes
, bm_pool
->virt_addr
,
3473 dev_err(&pdev
->dev
, "BM pool %d is not %d bytes aligned\n",
3474 bm_pool
->id
, MVPP2_BM_POOL_PTR_ALIGN
);
3478 mvpp2_write(priv
, MVPP2_BM_POOL_BASE_REG(bm_pool
->id
),
3480 mvpp2_write(priv
, MVPP2_BM_POOL_SIZE_REG(bm_pool
->id
), size
);
3482 val
= mvpp2_read(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
));
3483 val
|= MVPP2_BM_START_MASK
;
3484 mvpp2_write(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
), val
);
3486 bm_pool
->type
= MVPP2_BM_FREE
;
3487 bm_pool
->size
= size
;
3488 bm_pool
->pkt_size
= 0;
3489 bm_pool
->buf_num
= 0;
3494 /* Set pool buffer size */
3495 static void mvpp2_bm_pool_bufsize_set(struct mvpp2
*priv
,
3496 struct mvpp2_bm_pool
*bm_pool
,
3501 bm_pool
->buf_size
= buf_size
;
3503 val
= ALIGN(buf_size
, 1 << MVPP2_POOL_BUF_SIZE_OFFSET
);
3504 mvpp2_write(priv
, MVPP2_POOL_BUF_SIZE_REG(bm_pool
->id
), val
);
3507 /* Free all buffers from the pool */
3508 static void mvpp2_bm_bufs_free(struct device
*dev
, struct mvpp2
*priv
,
3509 struct mvpp2_bm_pool
*bm_pool
)
3513 for (i
= 0; i
< bm_pool
->buf_num
; i
++) {
3514 dma_addr_t buf_dma_addr
;
3515 phys_addr_t buf_phys_addr
;
3518 buf_dma_addr
= mvpp2_read(priv
,
3519 MVPP2_BM_PHY_ALLOC_REG(bm_pool
->id
));
3520 buf_phys_addr
= mvpp2_read(priv
, MVPP2_BM_VIRT_ALLOC_REG
);
3522 dma_unmap_single(dev
, buf_dma_addr
,
3523 bm_pool
->buf_size
, DMA_FROM_DEVICE
);
3525 data
= (void *)phys_to_virt(buf_phys_addr
);
3529 mvpp2_frag_free(bm_pool
, data
);
3532 /* Update BM driver with number of buffers removed from pool */
3533 bm_pool
->buf_num
-= i
;
3537 static int mvpp2_bm_pool_destroy(struct platform_device
*pdev
,
3539 struct mvpp2_bm_pool
*bm_pool
)
3543 mvpp2_bm_bufs_free(&pdev
->dev
, priv
, bm_pool
);
3544 if (bm_pool
->buf_num
) {
3545 WARN(1, "cannot free all buffers in pool %d\n", bm_pool
->id
);
3549 val
= mvpp2_read(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
));
3550 val
|= MVPP2_BM_STOP_MASK
;
3551 mvpp2_write(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
), val
);
3553 dma_free_coherent(&pdev
->dev
, sizeof(u32
) * bm_pool
->size
,
3559 static int mvpp2_bm_pools_init(struct platform_device
*pdev
,
3563 struct mvpp2_bm_pool
*bm_pool
;
3565 /* Create all pools with maximum size */
3566 size
= MVPP2_BM_POOL_SIZE_MAX
;
3567 for (i
= 0; i
< MVPP2_BM_POOLS_NUM
; i
++) {
3568 bm_pool
= &priv
->bm_pools
[i
];
3570 err
= mvpp2_bm_pool_create(pdev
, priv
, bm_pool
, size
);
3572 goto err_unroll_pools
;
3573 mvpp2_bm_pool_bufsize_set(priv
, bm_pool
, 0);
3578 dev_err(&pdev
->dev
, "failed to create BM pool %d, size %d\n", i
, size
);
3579 for (i
= i
- 1; i
>= 0; i
--)
3580 mvpp2_bm_pool_destroy(pdev
, priv
, &priv
->bm_pools
[i
]);
3584 static int mvpp2_bm_init(struct platform_device
*pdev
, struct mvpp2
*priv
)
3588 for (i
= 0; i
< MVPP2_BM_POOLS_NUM
; i
++) {
3589 /* Mask BM all interrupts */
3590 mvpp2_write(priv
, MVPP2_BM_INTR_MASK_REG(i
), 0);
3591 /* Clear BM cause register */
3592 mvpp2_write(priv
, MVPP2_BM_INTR_CAUSE_REG(i
), 0);
3595 /* Allocate and initialize BM pools */
3596 priv
->bm_pools
= devm_kcalloc(&pdev
->dev
, MVPP2_BM_POOLS_NUM
,
3597 sizeof(struct mvpp2_bm_pool
), GFP_KERNEL
);
3598 if (!priv
->bm_pools
)
3601 err
= mvpp2_bm_pools_init(pdev
, priv
);
3607 /* Attach long pool to rxq */
3608 static void mvpp2_rxq_long_pool_set(struct mvpp2_port
*port
,
3609 int lrxq
, int long_pool
)
3614 /* Get queue physical ID */
3615 prxq
= port
->rxqs
[lrxq
]->id
;
3617 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
));
3618 val
&= ~MVPP2_RXQ_POOL_LONG_MASK
;
3619 val
|= ((long_pool
<< MVPP2_RXQ_POOL_LONG_OFFS
) &
3620 MVPP2_RXQ_POOL_LONG_MASK
);
3622 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
), val
);
3625 /* Attach short pool to rxq */
3626 static void mvpp2_rxq_short_pool_set(struct mvpp2_port
*port
,
3627 int lrxq
, int short_pool
)
3632 /* Get queue physical ID */
3633 prxq
= port
->rxqs
[lrxq
]->id
;
3635 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
));
3636 val
&= ~MVPP2_RXQ_POOL_SHORT_MASK
;
3637 val
|= ((short_pool
<< MVPP2_RXQ_POOL_SHORT_OFFS
) &
3638 MVPP2_RXQ_POOL_SHORT_MASK
);
3640 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
), val
);
3643 static void *mvpp2_buf_alloc(struct mvpp2_port
*port
,
3644 struct mvpp2_bm_pool
*bm_pool
,
3645 dma_addr_t
*buf_dma_addr
,
3646 phys_addr_t
*buf_phys_addr
,
3649 dma_addr_t dma_addr
;
3652 data
= mvpp2_frag_alloc(bm_pool
);
3656 dma_addr
= dma_map_single(port
->dev
->dev
.parent
, data
,
3657 MVPP2_RX_BUF_SIZE(bm_pool
->pkt_size
),
3659 if (unlikely(dma_mapping_error(port
->dev
->dev
.parent
, dma_addr
))) {
3660 mvpp2_frag_free(bm_pool
, data
);
3663 *buf_dma_addr
= dma_addr
;
3664 *buf_phys_addr
= virt_to_phys(data
);
3669 /* Set pool number in a BM cookie */
3670 static inline u32
mvpp2_bm_cookie_pool_set(u32 cookie
, int pool
)
3674 bm
= cookie
& ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS
);
3675 bm
|= ((pool
& 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS
);
3680 /* Get pool number from a BM cookie */
3681 static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie
)
3683 return (cookie
>> MVPP2_BM_COOKIE_POOL_OFFS
) & 0xFF;
3686 /* Release buffer to BM */
3687 static inline void mvpp2_bm_pool_put(struct mvpp2_port
*port
, int pool
,
3688 dma_addr_t buf_dma_addr
,
3689 phys_addr_t buf_phys_addr
)
3691 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
3692 * returned in the "cookie" field of the RX
3693 * descriptor. Instead of storing the virtual address, we
3694 * store the physical address
3696 mvpp2_write(port
->priv
, MVPP2_BM_VIRT_RLS_REG
, buf_phys_addr
);
3697 mvpp2_write(port
->priv
, MVPP2_BM_PHY_RLS_REG(pool
), buf_dma_addr
);
3700 /* Refill BM pool */
3701 static void mvpp2_pool_refill(struct mvpp2_port
*port
, u32 bm
,
3702 dma_addr_t dma_addr
,
3703 phys_addr_t phys_addr
)
3705 int pool
= mvpp2_bm_cookie_pool_get(bm
);
3707 mvpp2_bm_pool_put(port
, pool
, dma_addr
, phys_addr
);
3710 /* Allocate buffers for the pool */
3711 static int mvpp2_bm_bufs_add(struct mvpp2_port
*port
,
3712 struct mvpp2_bm_pool
*bm_pool
, int buf_num
)
3714 int i
, buf_size
, total_size
;
3715 dma_addr_t dma_addr
;
3716 phys_addr_t phys_addr
;
3719 buf_size
= MVPP2_RX_BUF_SIZE(bm_pool
->pkt_size
);
3720 total_size
= MVPP2_RX_TOTAL_SIZE(buf_size
);
3723 (buf_num
+ bm_pool
->buf_num
> bm_pool
->size
)) {
3724 netdev_err(port
->dev
,
3725 "cannot allocate %d buffers for pool %d\n",
3726 buf_num
, bm_pool
->id
);
3730 for (i
= 0; i
< buf_num
; i
++) {
3731 buf
= mvpp2_buf_alloc(port
, bm_pool
, &dma_addr
,
3732 &phys_addr
, GFP_KERNEL
);
3736 mvpp2_bm_pool_put(port
, bm_pool
->id
, dma_addr
,
3740 /* Update BM driver with number of buffers added to pool */
3741 bm_pool
->buf_num
+= i
;
3743 netdev_dbg(port
->dev
,
3744 "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
3745 bm_pool
->type
== MVPP2_BM_SWF_SHORT
? "short" : " long",
3746 bm_pool
->id
, bm_pool
->pkt_size
, buf_size
, total_size
);
3748 netdev_dbg(port
->dev
,
3749 "%s pool %d: %d of %d buffers added\n",
3750 bm_pool
->type
== MVPP2_BM_SWF_SHORT
? "short" : " long",
3751 bm_pool
->id
, i
, buf_num
);
3755 /* Notify the driver that BM pool is being used as specific type and return the
3756 * pool pointer on success
3758 static struct mvpp2_bm_pool
*
3759 mvpp2_bm_pool_use(struct mvpp2_port
*port
, int pool
, enum mvpp2_bm_type type
,
3762 struct mvpp2_bm_pool
*new_pool
= &port
->priv
->bm_pools
[pool
];
3765 if (new_pool
->type
!= MVPP2_BM_FREE
&& new_pool
->type
!= type
) {
3766 netdev_err(port
->dev
, "mixing pool types is forbidden\n");
3770 if (new_pool
->type
== MVPP2_BM_FREE
)
3771 new_pool
->type
= type
;
3773 /* Allocate buffers in case BM pool is used as long pool, but packet
3774 * size doesn't match MTU or BM pool hasn't being used yet
3776 if (((type
== MVPP2_BM_SWF_LONG
) && (pkt_size
> new_pool
->pkt_size
)) ||
3777 (new_pool
->pkt_size
== 0)) {
3780 /* Set default buffer number or free all the buffers in case
3781 * the pool is not empty
3783 pkts_num
= new_pool
->buf_num
;
3785 pkts_num
= type
== MVPP2_BM_SWF_LONG
?
3786 MVPP2_BM_LONG_BUF_NUM
:
3787 MVPP2_BM_SHORT_BUF_NUM
;
3789 mvpp2_bm_bufs_free(port
->dev
->dev
.parent
,
3790 port
->priv
, new_pool
);
3792 new_pool
->pkt_size
= pkt_size
;
3793 new_pool
->frag_size
=
3794 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size
)) +
3795 MVPP2_SKB_SHINFO_SIZE
;
3797 /* Allocate buffers for this pool */
3798 num
= mvpp2_bm_bufs_add(port
, new_pool
, pkts_num
);
3799 if (num
!= pkts_num
) {
3800 WARN(1, "pool %d: %d of %d allocated\n",
3801 new_pool
->id
, num
, pkts_num
);
3806 mvpp2_bm_pool_bufsize_set(port
->priv
, new_pool
,
3807 MVPP2_RX_BUF_SIZE(new_pool
->pkt_size
));
3812 /* Initialize pools for swf */
3813 static int mvpp2_swf_bm_pool_init(struct mvpp2_port
*port
)
3817 if (!port
->pool_long
) {
3819 mvpp2_bm_pool_use(port
, MVPP2_BM_SWF_LONG_POOL(port
->id
),
3822 if (!port
->pool_long
)
3825 port
->pool_long
->port_map
|= (1 << port
->id
);
3827 for (rxq
= 0; rxq
< rxq_number
; rxq
++)
3828 mvpp2_rxq_long_pool_set(port
, rxq
, port
->pool_long
->id
);
3831 if (!port
->pool_short
) {
3833 mvpp2_bm_pool_use(port
, MVPP2_BM_SWF_SHORT_POOL
,
3835 MVPP2_BM_SHORT_PKT_SIZE
);
3836 if (!port
->pool_short
)
3839 port
->pool_short
->port_map
|= (1 << port
->id
);
3841 for (rxq
= 0; rxq
< rxq_number
; rxq
++)
3842 mvpp2_rxq_short_pool_set(port
, rxq
,
3843 port
->pool_short
->id
);
3849 static int mvpp2_bm_update_mtu(struct net_device
*dev
, int mtu
)
3851 struct mvpp2_port
*port
= netdev_priv(dev
);
3852 struct mvpp2_bm_pool
*port_pool
= port
->pool_long
;
3853 int num
, pkts_num
= port_pool
->buf_num
;
3854 int pkt_size
= MVPP2_RX_PKT_SIZE(mtu
);
3856 /* Update BM pool with new buffer size */
3857 mvpp2_bm_bufs_free(dev
->dev
.parent
, port
->priv
, port_pool
);
3858 if (port_pool
->buf_num
) {
3859 WARN(1, "cannot free all buffers in pool %d\n", port_pool
->id
);
3863 port_pool
->pkt_size
= pkt_size
;
3864 port_pool
->frag_size
= SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size
)) +
3865 MVPP2_SKB_SHINFO_SIZE
;
3866 num
= mvpp2_bm_bufs_add(port
, port_pool
, pkts_num
);
3867 if (num
!= pkts_num
) {
3868 WARN(1, "pool %d: %d of %d allocated\n",
3869 port_pool
->id
, num
, pkts_num
);
3873 mvpp2_bm_pool_bufsize_set(port
->priv
, port_pool
,
3874 MVPP2_RX_BUF_SIZE(port_pool
->pkt_size
));
3876 netdev_update_features(dev
);
3880 static inline void mvpp2_interrupts_enable(struct mvpp2_port
*port
)
3882 int cpu
, cpu_mask
= 0;
3884 for_each_present_cpu(cpu
)
3885 cpu_mask
|= 1 << cpu
;
3886 mvpp2_write(port
->priv
, MVPP2_ISR_ENABLE_REG(port
->id
),
3887 MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask
));
3890 static inline void mvpp2_interrupts_disable(struct mvpp2_port
*port
)
3892 int cpu
, cpu_mask
= 0;
3894 for_each_present_cpu(cpu
)
3895 cpu_mask
|= 1 << cpu
;
3896 mvpp2_write(port
->priv
, MVPP2_ISR_ENABLE_REG(port
->id
),
3897 MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask
));
3900 /* Mask the current CPU's Rx/Tx interrupts */
3901 static void mvpp2_interrupts_mask(void *arg
)
3903 struct mvpp2_port
*port
= arg
;
3905 mvpp2_write(port
->priv
, MVPP2_ISR_RX_TX_MASK_REG(port
->id
), 0);
3908 /* Unmask the current CPU's Rx/Tx interrupts */
3909 static void mvpp2_interrupts_unmask(void *arg
)
3911 struct mvpp2_port
*port
= arg
;
3913 mvpp2_write(port
->priv
, MVPP2_ISR_RX_TX_MASK_REG(port
->id
),
3914 (MVPP2_CAUSE_MISC_SUM_MASK
|
3915 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK
));
3918 /* Port configuration routines */
3920 static void mvpp2_port_mii_set(struct mvpp2_port
*port
)
3924 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
3926 switch (port
->phy_interface
) {
3927 case PHY_INTERFACE_MODE_SGMII
:
3928 val
|= MVPP2_GMAC_INBAND_AN_MASK
;
3930 case PHY_INTERFACE_MODE_RGMII
:
3931 val
|= MVPP2_GMAC_PORT_RGMII_MASK
;
3933 val
&= ~MVPP2_GMAC_PCS_ENABLE_MASK
;
3936 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
3939 static void mvpp2_port_fc_adv_enable(struct mvpp2_port
*port
)
3943 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
3944 val
|= MVPP2_GMAC_FC_ADV_EN
;
3945 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
3948 static void mvpp2_port_enable(struct mvpp2_port
*port
)
3952 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
3953 val
|= MVPP2_GMAC_PORT_EN_MASK
;
3954 val
|= MVPP2_GMAC_MIB_CNTR_EN_MASK
;
3955 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
3958 static void mvpp2_port_disable(struct mvpp2_port
*port
)
3962 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
3963 val
&= ~(MVPP2_GMAC_PORT_EN_MASK
);
3964 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
3967 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
3968 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port
*port
)
3972 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_1_REG
) &
3973 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK
;
3974 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
3977 /* Configure loopback port */
3978 static void mvpp2_port_loopback_set(struct mvpp2_port
*port
)
3982 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
3984 if (port
->speed
== 1000)
3985 val
|= MVPP2_GMAC_GMII_LB_EN_MASK
;
3987 val
&= ~MVPP2_GMAC_GMII_LB_EN_MASK
;
3989 if (port
->phy_interface
== PHY_INTERFACE_MODE_SGMII
)
3990 val
|= MVPP2_GMAC_PCS_LB_EN_MASK
;
3992 val
&= ~MVPP2_GMAC_PCS_LB_EN_MASK
;
3994 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
3997 static void mvpp2_port_reset(struct mvpp2_port
*port
)
4001 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
) &
4002 ~MVPP2_GMAC_PORT_RESET_MASK
;
4003 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
4005 while (readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
) &
4006 MVPP2_GMAC_PORT_RESET_MASK
)
4010 /* Change maximum receive size of the port */
4011 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port
*port
)
4015 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
4016 val
&= ~MVPP2_GMAC_MAX_RX_SIZE_MASK
;
4017 val
|= (((port
->pkt_size
- MVPP2_MH_SIZE
) / 2) <<
4018 MVPP2_GMAC_MAX_RX_SIZE_OFFS
);
4019 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
4022 /* Set defaults to the MVPP2 port */
4023 static void mvpp2_defaults_set(struct mvpp2_port
*port
)
4025 int tx_port_num
, val
, queue
, ptxq
, lrxq
;
4027 /* Configure port to loopback if needed */
4028 if (port
->flags
& MVPP2_F_LOOPBACK
)
4029 mvpp2_port_loopback_set(port
);
4031 /* Update TX FIFO MIN Threshold */
4032 val
= readl(port
->base
+ MVPP2_GMAC_PORT_FIFO_CFG_1_REG
);
4033 val
&= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK
;
4034 /* Min. TX threshold must be less than minimal packet length */
4035 val
|= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
4036 writel(val
, port
->base
+ MVPP2_GMAC_PORT_FIFO_CFG_1_REG
);
4038 /* Disable Legacy WRR, Disable EJP, Release from reset */
4039 tx_port_num
= mvpp2_egress_port(port
);
4040 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
,
4042 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_CMD_1_REG
, 0);
4044 /* Close bandwidth for all queues */
4045 for (queue
= 0; queue
< MVPP2_MAX_TXQ
; queue
++) {
4046 ptxq
= mvpp2_txq_phys(port
->id
, queue
);
4047 mvpp2_write(port
->priv
,
4048 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq
), 0);
4051 /* Set refill period to 1 usec, refill tokens
4052 * and bucket size to maximum
4054 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PERIOD_REG
,
4055 port
->priv
->tclk
/ USEC_PER_SEC
);
4056 val
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_REFILL_REG
);
4057 val
&= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK
;
4058 val
|= MVPP2_TXP_REFILL_PERIOD_MASK(1);
4059 val
|= MVPP2_TXP_REFILL_TOKENS_ALL_MASK
;
4060 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_REFILL_REG
, val
);
4061 val
= MVPP2_TXP_TOKEN_SIZE_MAX
;
4062 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_TOKEN_SIZE_REG
, val
);
4064 /* Set MaximumLowLatencyPacketSize value to 256 */
4065 mvpp2_write(port
->priv
, MVPP2_RX_CTRL_REG(port
->id
),
4066 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK
|
4067 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
4069 /* Enable Rx cache snoop */
4070 for (lrxq
= 0; lrxq
< rxq_number
; lrxq
++) {
4071 queue
= port
->rxqs
[lrxq
]->id
;
4072 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
));
4073 val
|= MVPP2_SNOOP_PKT_SIZE_MASK
|
4074 MVPP2_SNOOP_BUF_HDR_MASK
;
4075 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
), val
);
4078 /* At default, mask all interrupts to all present cpus */
4079 mvpp2_interrupts_disable(port
);
4082 /* Enable/disable receiving packets */
4083 static void mvpp2_ingress_enable(struct mvpp2_port
*port
)
4088 for (lrxq
= 0; lrxq
< rxq_number
; lrxq
++) {
4089 queue
= port
->rxqs
[lrxq
]->id
;
4090 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
));
4091 val
&= ~MVPP2_RXQ_DISABLE_MASK
;
4092 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
), val
);
4096 static void mvpp2_ingress_disable(struct mvpp2_port
*port
)
4101 for (lrxq
= 0; lrxq
< rxq_number
; lrxq
++) {
4102 queue
= port
->rxqs
[lrxq
]->id
;
4103 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
));
4104 val
|= MVPP2_RXQ_DISABLE_MASK
;
4105 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
), val
);
4109 /* Enable transmit via physical egress queue
4110 * - HW starts take descriptors from DRAM
4112 static void mvpp2_egress_enable(struct mvpp2_port
*port
)
4116 int tx_port_num
= mvpp2_egress_port(port
);
4118 /* Enable all initialized TXs. */
4120 for (queue
= 0; queue
< txq_number
; queue
++) {
4121 struct mvpp2_tx_queue
*txq
= port
->txqs
[queue
];
4123 if (txq
->descs
!= NULL
)
4124 qmap
|= (1 << queue
);
4127 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
4128 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
, qmap
);
4131 /* Disable transmit via physical egress queue
4132 * - HW doesn't take descriptors from DRAM
4134 static void mvpp2_egress_disable(struct mvpp2_port
*port
)
4138 int tx_port_num
= mvpp2_egress_port(port
);
4140 /* Issue stop command for active channels only */
4141 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
4142 reg_data
= (mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
)) &
4143 MVPP2_TXP_SCHED_ENQ_MASK
;
4145 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
,
4146 (reg_data
<< MVPP2_TXP_SCHED_DISQ_OFFSET
));
4148 /* Wait for all Tx activity to terminate. */
4151 if (delay
>= MVPP2_TX_DISABLE_TIMEOUT_MSEC
) {
4152 netdev_warn(port
->dev
,
4153 "Tx stop timed out, status=0x%08x\n",
4160 /* Check port TX Command register that all
4161 * Tx queues are stopped
4163 reg_data
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
);
4164 } while (reg_data
& MVPP2_TXP_SCHED_ENQ_MASK
);
4167 /* Rx descriptors helper methods */
4169 /* Get number of Rx descriptors occupied by received packets */
4171 mvpp2_rxq_received(struct mvpp2_port
*port
, int rxq_id
)
4173 u32 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_STATUS_REG(rxq_id
));
4175 return val
& MVPP2_RXQ_OCCUPIED_MASK
;
4178 /* Update Rx queue status with the number of occupied and available
4179 * Rx descriptor slots.
4182 mvpp2_rxq_status_update(struct mvpp2_port
*port
, int rxq_id
,
4183 int used_count
, int free_count
)
4185 /* Decrement the number of used descriptors and increment count
4186 * increment the number of free descriptors.
4188 u32 val
= used_count
| (free_count
<< MVPP2_RXQ_NUM_NEW_OFFSET
);
4190 mvpp2_write(port
->priv
, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id
), val
);
4193 /* Get pointer to next RX descriptor to be processed by SW */
4194 static inline struct mvpp2_rx_desc
*
4195 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue
*rxq
)
4197 int rx_desc
= rxq
->next_desc_to_proc
;
4199 rxq
->next_desc_to_proc
= MVPP2_QUEUE_NEXT_DESC(rxq
, rx_desc
);
4200 prefetch(rxq
->descs
+ rxq
->next_desc_to_proc
);
4201 return rxq
->descs
+ rx_desc
;
4204 /* Set rx queue offset */
4205 static void mvpp2_rxq_offset_set(struct mvpp2_port
*port
,
4206 int prxq
, int offset
)
4210 /* Convert offset from bytes to units of 32 bytes */
4211 offset
= offset
>> 5;
4213 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
));
4214 val
&= ~MVPP2_RXQ_PACKET_OFFSET_MASK
;
4217 val
|= ((offset
<< MVPP2_RXQ_PACKET_OFFSET_OFFS
) &
4218 MVPP2_RXQ_PACKET_OFFSET_MASK
);
4220 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
), val
);
4223 /* Obtain BM cookie information from descriptor */
4224 static u32
mvpp2_bm_cookie_build(struct mvpp2_port
*port
,
4225 struct mvpp2_rx_desc
*rx_desc
)
4227 int cpu
= smp_processor_id();
4230 pool
= (mvpp2_rxdesc_status_get(port
, rx_desc
) &
4231 MVPP2_RXD_BM_POOL_ID_MASK
) >>
4232 MVPP2_RXD_BM_POOL_ID_OFFS
;
4234 return ((pool
& 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS
) |
4235 ((cpu
& 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS
);
4238 /* Tx descriptors helper methods */
4240 /* Get pointer to next Tx descriptor to be processed (send) by HW */
4241 static struct mvpp2_tx_desc
*
4242 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue
*txq
)
4244 int tx_desc
= txq
->next_desc_to_proc
;
4246 txq
->next_desc_to_proc
= MVPP2_QUEUE_NEXT_DESC(txq
, tx_desc
);
4247 return txq
->descs
+ tx_desc
;
4250 /* Update HW with number of aggregated Tx descriptors to be sent */
4251 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port
*port
, int pending
)
4253 /* aggregated access - relevant TXQ number is written in TX desc */
4254 mvpp2_write(port
->priv
, MVPP2_AGGR_TXQ_UPDATE_REG
, pending
);
4258 /* Check if there are enough free descriptors in aggregated txq.
4259 * If not, update the number of occupied descriptors and repeat the check.
4261 static int mvpp2_aggr_desc_num_check(struct mvpp2
*priv
,
4262 struct mvpp2_tx_queue
*aggr_txq
, int num
)
4264 if ((aggr_txq
->count
+ num
) > aggr_txq
->size
) {
4265 /* Update number of occupied aggregated Tx descriptors */
4266 int cpu
= smp_processor_id();
4267 u32 val
= mvpp2_read(priv
, MVPP2_AGGR_TXQ_STATUS_REG(cpu
));
4269 aggr_txq
->count
= val
& MVPP2_AGGR_TXQ_PENDING_MASK
;
4272 if ((aggr_txq
->count
+ num
) > aggr_txq
->size
)
4278 /* Reserved Tx descriptors allocation request */
4279 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2
*priv
,
4280 struct mvpp2_tx_queue
*txq
, int num
)
4284 val
= (txq
->id
<< MVPP2_TXQ_RSVD_REQ_Q_OFFSET
) | num
;
4285 mvpp2_write(priv
, MVPP2_TXQ_RSVD_REQ_REG
, val
);
4287 val
= mvpp2_read(priv
, MVPP2_TXQ_RSVD_RSLT_REG
);
4289 return val
& MVPP2_TXQ_RSVD_RSLT_MASK
;
4292 /* Check if there are enough reserved descriptors for transmission.
4293 * If not, request chunk of reserved descriptors and check again.
4295 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2
*priv
,
4296 struct mvpp2_tx_queue
*txq
,
4297 struct mvpp2_txq_pcpu
*txq_pcpu
,
4300 int req
, cpu
, desc_count
;
4302 if (txq_pcpu
->reserved_num
>= num
)
4305 /* Not enough descriptors reserved! Update the reserved descriptor
4306 * count and check again.
4310 /* Compute total of used descriptors */
4311 for_each_present_cpu(cpu
) {
4312 struct mvpp2_txq_pcpu
*txq_pcpu_aux
;
4314 txq_pcpu_aux
= per_cpu_ptr(txq
->pcpu
, cpu
);
4315 desc_count
+= txq_pcpu_aux
->count
;
4316 desc_count
+= txq_pcpu_aux
->reserved_num
;
4319 req
= max(MVPP2_CPU_DESC_CHUNK
, num
- txq_pcpu
->reserved_num
);
4323 (txq
->size
- (num_present_cpus() * MVPP2_CPU_DESC_CHUNK
)))
4326 txq_pcpu
->reserved_num
+= mvpp2_txq_alloc_reserved_desc(priv
, txq
, req
);
4328 /* OK, the descriptor cound has been updated: check again. */
4329 if (txq_pcpu
->reserved_num
< num
)
4334 /* Release the last allocated Tx descriptor. Useful to handle DMA
4335 * mapping failures in the Tx path.
4337 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue
*txq
)
4339 if (txq
->next_desc_to_proc
== 0)
4340 txq
->next_desc_to_proc
= txq
->last_desc
- 1;
4342 txq
->next_desc_to_proc
--;
4345 /* Set Tx descriptors fields relevant for CSUM calculation */
4346 static u32
mvpp2_txq_desc_csum(int l3_offs
, int l3_proto
,
4347 int ip_hdr_len
, int l4_proto
)
4351 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
4352 * G_L4_chk, L4_type required only for checksum calculation
4354 command
= (l3_offs
<< MVPP2_TXD_L3_OFF_SHIFT
);
4355 command
|= (ip_hdr_len
<< MVPP2_TXD_IP_HLEN_SHIFT
);
4356 command
|= MVPP2_TXD_IP_CSUM_DISABLE
;
4358 if (l3_proto
== swab16(ETH_P_IP
)) {
4359 command
&= ~MVPP2_TXD_IP_CSUM_DISABLE
; /* enable IPv4 csum */
4360 command
&= ~MVPP2_TXD_L3_IP6
; /* enable IPv4 */
4362 command
|= MVPP2_TXD_L3_IP6
; /* enable IPv6 */
4365 if (l4_proto
== IPPROTO_TCP
) {
4366 command
&= ~MVPP2_TXD_L4_UDP
; /* enable TCP */
4367 command
&= ~MVPP2_TXD_L4_CSUM_FRAG
; /* generate L4 csum */
4368 } else if (l4_proto
== IPPROTO_UDP
) {
4369 command
|= MVPP2_TXD_L4_UDP
; /* enable UDP */
4370 command
&= ~MVPP2_TXD_L4_CSUM_FRAG
; /* generate L4 csum */
4372 command
|= MVPP2_TXD_L4_CSUM_NOT
;
4378 /* Get number of sent descriptors and decrement counter.
4379 * The number of sent descriptors is returned.
4382 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port
*port
,
4383 struct mvpp2_tx_queue
*txq
)
4387 /* Reading status reg resets transmitted descriptor counter */
4388 val
= mvpp2_read(port
->priv
, MVPP2_TXQ_SENT_REG(txq
->id
));
4390 return (val
& MVPP2_TRANSMITTED_COUNT_MASK
) >>
4391 MVPP2_TRANSMITTED_COUNT_OFFSET
;
4394 static void mvpp2_txq_sent_counter_clear(void *arg
)
4396 struct mvpp2_port
*port
= arg
;
4399 for (queue
= 0; queue
< txq_number
; queue
++) {
4400 int id
= port
->txqs
[queue
]->id
;
4402 mvpp2_read(port
->priv
, MVPP2_TXQ_SENT_REG(id
));
4406 /* Set max sizes for Tx queues */
4407 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port
*port
)
4410 int txq
, tx_port_num
;
4412 mtu
= port
->pkt_size
* 8;
4413 if (mtu
> MVPP2_TXP_MTU_MAX
)
4414 mtu
= MVPP2_TXP_MTU_MAX
;
4416 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
4419 /* Indirect access to registers */
4420 tx_port_num
= mvpp2_egress_port(port
);
4421 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
4424 val
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_MTU_REG
);
4425 val
&= ~MVPP2_TXP_MTU_MAX
;
4427 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_MTU_REG
, val
);
4429 /* TXP token size and all TXQs token size must be larger that MTU */
4430 val
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_TOKEN_SIZE_REG
);
4431 size
= val
& MVPP2_TXP_TOKEN_SIZE_MAX
;
4434 val
&= ~MVPP2_TXP_TOKEN_SIZE_MAX
;
4436 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_TOKEN_SIZE_REG
, val
);
4439 for (txq
= 0; txq
< txq_number
; txq
++) {
4440 val
= mvpp2_read(port
->priv
,
4441 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq
));
4442 size
= val
& MVPP2_TXQ_TOKEN_SIZE_MAX
;
4446 val
&= ~MVPP2_TXQ_TOKEN_SIZE_MAX
;
4448 mvpp2_write(port
->priv
,
4449 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq
),
4455 /* Set the number of packets that will be received before Rx interrupt
4456 * will be generated by HW.
4458 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port
*port
,
4459 struct mvpp2_rx_queue
*rxq
)
4461 if (rxq
->pkts_coal
> MVPP2_OCCUPIED_THRESH_MASK
)
4462 rxq
->pkts_coal
= MVPP2_OCCUPIED_THRESH_MASK
;
4464 mvpp2_write(port
->priv
, MVPP2_RXQ_NUM_REG
, rxq
->id
);
4465 mvpp2_write(port
->priv
, MVPP2_RXQ_THRESH_REG
,
4469 static u32
mvpp2_usec_to_cycles(u32 usec
, unsigned long clk_hz
)
4471 u64 tmp
= (u64
)clk_hz
* usec
;
4473 do_div(tmp
, USEC_PER_SEC
);
4475 return tmp
> U32_MAX
? U32_MAX
: tmp
;
4478 static u32
mvpp2_cycles_to_usec(u32 cycles
, unsigned long clk_hz
)
4480 u64 tmp
= (u64
)cycles
* USEC_PER_SEC
;
4482 do_div(tmp
, clk_hz
);
4484 return tmp
> U32_MAX
? U32_MAX
: tmp
;
4487 /* Set the time delay in usec before Rx interrupt */
4488 static void mvpp2_rx_time_coal_set(struct mvpp2_port
*port
,
4489 struct mvpp2_rx_queue
*rxq
)
4491 unsigned long freq
= port
->priv
->tclk
;
4492 u32 val
= mvpp2_usec_to_cycles(rxq
->time_coal
, freq
);
4494 if (val
> MVPP2_MAX_ISR_RX_THRESHOLD
) {
4496 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD
, freq
);
4498 /* re-evaluate to get actual register value */
4499 val
= mvpp2_usec_to_cycles(rxq
->time_coal
, freq
);
4502 mvpp2_write(port
->priv
, MVPP2_ISR_RX_THRESHOLD_REG(rxq
->id
), val
);
4505 /* Free Tx queue skbuffs */
4506 static void mvpp2_txq_bufs_free(struct mvpp2_port
*port
,
4507 struct mvpp2_tx_queue
*txq
,
4508 struct mvpp2_txq_pcpu
*txq_pcpu
, int num
)
4512 for (i
= 0; i
< num
; i
++) {
4513 struct mvpp2_txq_pcpu_buf
*tx_buf
=
4514 txq_pcpu
->buffs
+ txq_pcpu
->txq_get_index
;
4516 dma_unmap_single(port
->dev
->dev
.parent
, tx_buf
->dma
,
4517 tx_buf
->size
, DMA_TO_DEVICE
);
4519 dev_kfree_skb_any(tx_buf
->skb
);
4521 mvpp2_txq_inc_get(txq_pcpu
);
4525 static inline struct mvpp2_rx_queue
*mvpp2_get_rx_queue(struct mvpp2_port
*port
,
4528 int queue
= fls(cause
) - 1;
4530 return port
->rxqs
[queue
];
4533 static inline struct mvpp2_tx_queue
*mvpp2_get_tx_queue(struct mvpp2_port
*port
,
4536 int queue
= fls(cause
) - 1;
4538 return port
->txqs
[queue
];
4541 /* Handle end of transmission */
4542 static void mvpp2_txq_done(struct mvpp2_port
*port
, struct mvpp2_tx_queue
*txq
,
4543 struct mvpp2_txq_pcpu
*txq_pcpu
)
4545 struct netdev_queue
*nq
= netdev_get_tx_queue(port
->dev
, txq
->log_id
);
4548 if (txq_pcpu
->cpu
!= smp_processor_id())
4549 netdev_err(port
->dev
, "wrong cpu on the end of Tx processing\n");
4551 tx_done
= mvpp2_txq_sent_desc_proc(port
, txq
);
4554 mvpp2_txq_bufs_free(port
, txq
, txq_pcpu
, tx_done
);
4556 txq_pcpu
->count
-= tx_done
;
4558 if (netif_tx_queue_stopped(nq
))
4559 if (txq_pcpu
->size
- txq_pcpu
->count
>= MAX_SKB_FRAGS
+ 1)
4560 netif_tx_wake_queue(nq
);
4563 static unsigned int mvpp2_tx_done(struct mvpp2_port
*port
, u32 cause
)
4565 struct mvpp2_tx_queue
*txq
;
4566 struct mvpp2_txq_pcpu
*txq_pcpu
;
4567 unsigned int tx_todo
= 0;
4570 txq
= mvpp2_get_tx_queue(port
, cause
);
4574 txq_pcpu
= this_cpu_ptr(txq
->pcpu
);
4576 if (txq_pcpu
->count
) {
4577 mvpp2_txq_done(port
, txq
, txq_pcpu
);
4578 tx_todo
+= txq_pcpu
->count
;
4581 cause
&= ~(1 << txq
->log_id
);
4586 /* Rx/Tx queue initialization/cleanup methods */
4588 /* Allocate and initialize descriptors for aggr TXQ */
4589 static int mvpp2_aggr_txq_init(struct platform_device
*pdev
,
4590 struct mvpp2_tx_queue
*aggr_txq
,
4591 int desc_num
, int cpu
,
4594 /* Allocate memory for TX descriptors */
4595 aggr_txq
->descs
= dma_alloc_coherent(&pdev
->dev
,
4596 desc_num
* MVPP2_DESC_ALIGNED_SIZE
,
4597 &aggr_txq
->descs_dma
, GFP_KERNEL
);
4598 if (!aggr_txq
->descs
)
4601 aggr_txq
->last_desc
= aggr_txq
->size
- 1;
4603 /* Aggr TXQ no reset WA */
4604 aggr_txq
->next_desc_to_proc
= mvpp2_read(priv
,
4605 MVPP2_AGGR_TXQ_INDEX_REG(cpu
));
4607 /* Set Tx descriptors queue starting address */
4608 /* indirect access */
4609 mvpp2_write(priv
, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu
),
4610 aggr_txq
->descs_dma
);
4611 mvpp2_write(priv
, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu
), desc_num
);
4616 /* Create a specified Rx queue */
4617 static int mvpp2_rxq_init(struct mvpp2_port
*port
,
4618 struct mvpp2_rx_queue
*rxq
)
4621 rxq
->size
= port
->rx_ring_size
;
4623 /* Allocate memory for RX descriptors */
4624 rxq
->descs
= dma_alloc_coherent(port
->dev
->dev
.parent
,
4625 rxq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
4626 &rxq
->descs_dma
, GFP_KERNEL
);
4630 rxq
->last_desc
= rxq
->size
- 1;
4632 /* Zero occupied and non-occupied counters - direct access */
4633 mvpp2_write(port
->priv
, MVPP2_RXQ_STATUS_REG(rxq
->id
), 0);
4635 /* Set Rx descriptors queue starting address - indirect access */
4636 mvpp2_write(port
->priv
, MVPP2_RXQ_NUM_REG
, rxq
->id
);
4637 mvpp2_write(port
->priv
, MVPP2_RXQ_DESC_ADDR_REG
, rxq
->descs_dma
);
4638 mvpp2_write(port
->priv
, MVPP2_RXQ_DESC_SIZE_REG
, rxq
->size
);
4639 mvpp2_write(port
->priv
, MVPP2_RXQ_INDEX_REG
, 0);
4642 mvpp2_rxq_offset_set(port
, rxq
->id
, NET_SKB_PAD
);
4644 /* Set coalescing pkts and time */
4645 mvpp2_rx_pkts_coal_set(port
, rxq
);
4646 mvpp2_rx_time_coal_set(port
, rxq
);
4648 /* Add number of descriptors ready for receiving packets */
4649 mvpp2_rxq_status_update(port
, rxq
->id
, 0, rxq
->size
);
4654 /* Push packets received by the RXQ to BM pool */
4655 static void mvpp2_rxq_drop_pkts(struct mvpp2_port
*port
,
4656 struct mvpp2_rx_queue
*rxq
)
4660 rx_received
= mvpp2_rxq_received(port
, rxq
->id
);
4664 for (i
= 0; i
< rx_received
; i
++) {
4665 struct mvpp2_rx_desc
*rx_desc
= mvpp2_rxq_next_desc_get(rxq
);
4666 u32 bm
= mvpp2_bm_cookie_build(port
, rx_desc
);
4668 mvpp2_pool_refill(port
, bm
,
4669 mvpp2_rxdesc_dma_addr_get(port
, rx_desc
),
4670 mvpp2_rxdesc_cookie_get(port
, rx_desc
));
4672 mvpp2_rxq_status_update(port
, rxq
->id
, rx_received
, rx_received
);
4675 /* Cleanup Rx queue */
4676 static void mvpp2_rxq_deinit(struct mvpp2_port
*port
,
4677 struct mvpp2_rx_queue
*rxq
)
4679 mvpp2_rxq_drop_pkts(port
, rxq
);
4682 dma_free_coherent(port
->dev
->dev
.parent
,
4683 rxq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
4689 rxq
->next_desc_to_proc
= 0;
4692 /* Clear Rx descriptors queue starting address and size;
4693 * free descriptor number
4695 mvpp2_write(port
->priv
, MVPP2_RXQ_STATUS_REG(rxq
->id
), 0);
4696 mvpp2_write(port
->priv
, MVPP2_RXQ_NUM_REG
, rxq
->id
);
4697 mvpp2_write(port
->priv
, MVPP2_RXQ_DESC_ADDR_REG
, 0);
4698 mvpp2_write(port
->priv
, MVPP2_RXQ_DESC_SIZE_REG
, 0);
4701 /* Create and initialize a Tx queue */
4702 static int mvpp2_txq_init(struct mvpp2_port
*port
,
4703 struct mvpp2_tx_queue
*txq
)
4706 int cpu
, desc
, desc_per_txq
, tx_port_num
;
4707 struct mvpp2_txq_pcpu
*txq_pcpu
;
4709 txq
->size
= port
->tx_ring_size
;
4711 /* Allocate memory for Tx descriptors */
4712 txq
->descs
= dma_alloc_coherent(port
->dev
->dev
.parent
,
4713 txq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
4714 &txq
->descs_dma
, GFP_KERNEL
);
4718 txq
->last_desc
= txq
->size
- 1;
4720 /* Set Tx descriptors queue starting address - indirect access */
4721 mvpp2_write(port
->priv
, MVPP2_TXQ_NUM_REG
, txq
->id
);
4722 mvpp2_write(port
->priv
, MVPP2_TXQ_DESC_ADDR_REG
, txq
->descs_dma
);
4723 mvpp2_write(port
->priv
, MVPP2_TXQ_DESC_SIZE_REG
, txq
->size
&
4724 MVPP2_TXQ_DESC_SIZE_MASK
);
4725 mvpp2_write(port
->priv
, MVPP2_TXQ_INDEX_REG
, 0);
4726 mvpp2_write(port
->priv
, MVPP2_TXQ_RSVD_CLR_REG
,
4727 txq
->id
<< MVPP2_TXQ_RSVD_CLR_OFFSET
);
4728 val
= mvpp2_read(port
->priv
, MVPP2_TXQ_PENDING_REG
);
4729 val
&= ~MVPP2_TXQ_PENDING_MASK
;
4730 mvpp2_write(port
->priv
, MVPP2_TXQ_PENDING_REG
, val
);
4732 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
4733 * for each existing TXQ.
4734 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
4735 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
4738 desc
= (port
->id
* MVPP2_MAX_TXQ
* desc_per_txq
) +
4739 (txq
->log_id
* desc_per_txq
);
4741 mvpp2_write(port
->priv
, MVPP2_TXQ_PREF_BUF_REG
,
4742 MVPP2_PREF_BUF_PTR(desc
) | MVPP2_PREF_BUF_SIZE_16
|
4743 MVPP2_PREF_BUF_THRESH(desc_per_txq
/2));
4745 /* WRR / EJP configuration - indirect access */
4746 tx_port_num
= mvpp2_egress_port(port
);
4747 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
4749 val
= mvpp2_read(port
->priv
, MVPP2_TXQ_SCHED_REFILL_REG(txq
->log_id
));
4750 val
&= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK
;
4751 val
|= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
4752 val
|= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK
;
4753 mvpp2_write(port
->priv
, MVPP2_TXQ_SCHED_REFILL_REG(txq
->log_id
), val
);
4755 val
= MVPP2_TXQ_TOKEN_SIZE_MAX
;
4756 mvpp2_write(port
->priv
, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq
->log_id
),
4759 for_each_present_cpu(cpu
) {
4760 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
4761 txq_pcpu
->size
= txq
->size
;
4762 txq_pcpu
->buffs
= kmalloc(txq_pcpu
->size
*
4763 sizeof(struct mvpp2_txq_pcpu_buf
),
4765 if (!txq_pcpu
->buffs
)
4768 txq_pcpu
->count
= 0;
4769 txq_pcpu
->reserved_num
= 0;
4770 txq_pcpu
->txq_put_index
= 0;
4771 txq_pcpu
->txq_get_index
= 0;
4777 for_each_present_cpu(cpu
) {
4778 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
4779 kfree(txq_pcpu
->buffs
);
4782 dma_free_coherent(port
->dev
->dev
.parent
,
4783 txq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
4784 txq
->descs
, txq
->descs_dma
);
4789 /* Free allocated TXQ resources */
4790 static void mvpp2_txq_deinit(struct mvpp2_port
*port
,
4791 struct mvpp2_tx_queue
*txq
)
4793 struct mvpp2_txq_pcpu
*txq_pcpu
;
4796 for_each_present_cpu(cpu
) {
4797 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
4798 kfree(txq_pcpu
->buffs
);
4802 dma_free_coherent(port
->dev
->dev
.parent
,
4803 txq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
4804 txq
->descs
, txq
->descs_dma
);
4808 txq
->next_desc_to_proc
= 0;
4811 /* Set minimum bandwidth for disabled TXQs */
4812 mvpp2_write(port
->priv
, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq
->id
), 0);
4814 /* Set Tx descriptors queue starting address and size */
4815 mvpp2_write(port
->priv
, MVPP2_TXQ_NUM_REG
, txq
->id
);
4816 mvpp2_write(port
->priv
, MVPP2_TXQ_DESC_ADDR_REG
, 0);
4817 mvpp2_write(port
->priv
, MVPP2_TXQ_DESC_SIZE_REG
, 0);
4820 /* Cleanup Tx ports */
4821 static void mvpp2_txq_clean(struct mvpp2_port
*port
, struct mvpp2_tx_queue
*txq
)
4823 struct mvpp2_txq_pcpu
*txq_pcpu
;
4824 int delay
, pending
, cpu
;
4827 mvpp2_write(port
->priv
, MVPP2_TXQ_NUM_REG
, txq
->id
);
4828 val
= mvpp2_read(port
->priv
, MVPP2_TXQ_PREF_BUF_REG
);
4829 val
|= MVPP2_TXQ_DRAIN_EN_MASK
;
4830 mvpp2_write(port
->priv
, MVPP2_TXQ_PREF_BUF_REG
, val
);
4832 /* The napi queue has been stopped so wait for all packets
4833 * to be transmitted.
4837 if (delay
>= MVPP2_TX_PENDING_TIMEOUT_MSEC
) {
4838 netdev_warn(port
->dev
,
4839 "port %d: cleaning queue %d timed out\n",
4840 port
->id
, txq
->log_id
);
4846 pending
= mvpp2_read(port
->priv
, MVPP2_TXQ_PENDING_REG
) &
4847 MVPP2_TXQ_PENDING_MASK
;
4850 val
&= ~MVPP2_TXQ_DRAIN_EN_MASK
;
4851 mvpp2_write(port
->priv
, MVPP2_TXQ_PREF_BUF_REG
, val
);
4853 for_each_present_cpu(cpu
) {
4854 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
4856 /* Release all packets */
4857 mvpp2_txq_bufs_free(port
, txq
, txq_pcpu
, txq_pcpu
->count
);
4860 txq_pcpu
->count
= 0;
4861 txq_pcpu
->txq_put_index
= 0;
4862 txq_pcpu
->txq_get_index
= 0;
4866 /* Cleanup all Tx queues */
4867 static void mvpp2_cleanup_txqs(struct mvpp2_port
*port
)
4869 struct mvpp2_tx_queue
*txq
;
4873 val
= mvpp2_read(port
->priv
, MVPP2_TX_PORT_FLUSH_REG
);
4875 /* Reset Tx ports and delete Tx queues */
4876 val
|= MVPP2_TX_PORT_FLUSH_MASK(port
->id
);
4877 mvpp2_write(port
->priv
, MVPP2_TX_PORT_FLUSH_REG
, val
);
4879 for (queue
= 0; queue
< txq_number
; queue
++) {
4880 txq
= port
->txqs
[queue
];
4881 mvpp2_txq_clean(port
, txq
);
4882 mvpp2_txq_deinit(port
, txq
);
4885 on_each_cpu(mvpp2_txq_sent_counter_clear
, port
, 1);
4887 val
&= ~MVPP2_TX_PORT_FLUSH_MASK(port
->id
);
4888 mvpp2_write(port
->priv
, MVPP2_TX_PORT_FLUSH_REG
, val
);
4891 /* Cleanup all Rx queues */
4892 static void mvpp2_cleanup_rxqs(struct mvpp2_port
*port
)
4896 for (queue
= 0; queue
< rxq_number
; queue
++)
4897 mvpp2_rxq_deinit(port
, port
->rxqs
[queue
]);
4900 /* Init all Rx queues for port */
4901 static int mvpp2_setup_rxqs(struct mvpp2_port
*port
)
4905 for (queue
= 0; queue
< rxq_number
; queue
++) {
4906 err
= mvpp2_rxq_init(port
, port
->rxqs
[queue
]);
4913 mvpp2_cleanup_rxqs(port
);
4917 /* Init all tx queues for port */
4918 static int mvpp2_setup_txqs(struct mvpp2_port
*port
)
4920 struct mvpp2_tx_queue
*txq
;
4923 for (queue
= 0; queue
< txq_number
; queue
++) {
4924 txq
= port
->txqs
[queue
];
4925 err
= mvpp2_txq_init(port
, txq
);
4930 on_each_cpu(mvpp2_txq_sent_counter_clear
, port
, 1);
4934 mvpp2_cleanup_txqs(port
);
4938 /* The callback for per-port interrupt */
4939 static irqreturn_t
mvpp2_isr(int irq
, void *dev_id
)
4941 struct mvpp2_port
*port
= (struct mvpp2_port
*)dev_id
;
4943 mvpp2_interrupts_disable(port
);
4945 napi_schedule(&port
->napi
);
4951 static void mvpp2_link_event(struct net_device
*dev
)
4953 struct mvpp2_port
*port
= netdev_priv(dev
);
4954 struct phy_device
*phydev
= dev
->phydev
;
4955 int status_change
= 0;
4959 if ((port
->speed
!= phydev
->speed
) ||
4960 (port
->duplex
!= phydev
->duplex
)) {
4963 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4964 val
&= ~(MVPP2_GMAC_CONFIG_MII_SPEED
|
4965 MVPP2_GMAC_CONFIG_GMII_SPEED
|
4966 MVPP2_GMAC_CONFIG_FULL_DUPLEX
|
4967 MVPP2_GMAC_AN_SPEED_EN
|
4968 MVPP2_GMAC_AN_DUPLEX_EN
);
4971 val
|= MVPP2_GMAC_CONFIG_FULL_DUPLEX
;
4973 if (phydev
->speed
== SPEED_1000
)
4974 val
|= MVPP2_GMAC_CONFIG_GMII_SPEED
;
4975 else if (phydev
->speed
== SPEED_100
)
4976 val
|= MVPP2_GMAC_CONFIG_MII_SPEED
;
4978 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4980 port
->duplex
= phydev
->duplex
;
4981 port
->speed
= phydev
->speed
;
4985 if (phydev
->link
!= port
->link
) {
4986 if (!phydev
->link
) {
4991 port
->link
= phydev
->link
;
4995 if (status_change
) {
4997 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4998 val
|= (MVPP2_GMAC_FORCE_LINK_PASS
|
4999 MVPP2_GMAC_FORCE_LINK_DOWN
);
5000 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
5001 mvpp2_egress_enable(port
);
5002 mvpp2_ingress_enable(port
);
5004 mvpp2_ingress_disable(port
);
5005 mvpp2_egress_disable(port
);
5007 phy_print_status(phydev
);
5011 static void mvpp2_timer_set(struct mvpp2_port_pcpu
*port_pcpu
)
5015 if (!port_pcpu
->timer_scheduled
) {
5016 port_pcpu
->timer_scheduled
= true;
5017 interval
= MVPP2_TXDONE_HRTIMER_PERIOD_NS
;
5018 hrtimer_start(&port_pcpu
->tx_done_timer
, interval
,
5019 HRTIMER_MODE_REL_PINNED
);
5023 static void mvpp2_tx_proc_cb(unsigned long data
)
5025 struct net_device
*dev
= (struct net_device
*)data
;
5026 struct mvpp2_port
*port
= netdev_priv(dev
);
5027 struct mvpp2_port_pcpu
*port_pcpu
= this_cpu_ptr(port
->pcpu
);
5028 unsigned int tx_todo
, cause
;
5030 if (!netif_running(dev
))
5032 port_pcpu
->timer_scheduled
= false;
5034 /* Process all the Tx queues */
5035 cause
= (1 << txq_number
) - 1;
5036 tx_todo
= mvpp2_tx_done(port
, cause
);
5038 /* Set the timer in case not all the packets were processed */
5040 mvpp2_timer_set(port_pcpu
);
5043 static enum hrtimer_restart
mvpp2_hr_timer_cb(struct hrtimer
*timer
)
5045 struct mvpp2_port_pcpu
*port_pcpu
= container_of(timer
,
5046 struct mvpp2_port_pcpu
,
5049 tasklet_schedule(&port_pcpu
->tx_done_tasklet
);
5051 return HRTIMER_NORESTART
;
5054 /* Main RX/TX processing routines */
5056 /* Display more error info */
5057 static void mvpp2_rx_error(struct mvpp2_port
*port
,
5058 struct mvpp2_rx_desc
*rx_desc
)
5060 u32 status
= mvpp2_rxdesc_status_get(port
, rx_desc
);
5061 size_t sz
= mvpp2_rxdesc_size_get(port
, rx_desc
);
5063 switch (status
& MVPP2_RXD_ERR_CODE_MASK
) {
5064 case MVPP2_RXD_ERR_CRC
:
5065 netdev_err(port
->dev
, "bad rx status %08x (crc error), size=%zu\n",
5068 case MVPP2_RXD_ERR_OVERRUN
:
5069 netdev_err(port
->dev
, "bad rx status %08x (overrun error), size=%zu\n",
5072 case MVPP2_RXD_ERR_RESOURCE
:
5073 netdev_err(port
->dev
, "bad rx status %08x (resource error), size=%zu\n",
5079 /* Handle RX checksum offload */
5080 static void mvpp2_rx_csum(struct mvpp2_port
*port
, u32 status
,
5081 struct sk_buff
*skb
)
5083 if (((status
& MVPP2_RXD_L3_IP4
) &&
5084 !(status
& MVPP2_RXD_IP4_HEADER_ERR
)) ||
5085 (status
& MVPP2_RXD_L3_IP6
))
5086 if (((status
& MVPP2_RXD_L4_UDP
) ||
5087 (status
& MVPP2_RXD_L4_TCP
)) &&
5088 (status
& MVPP2_RXD_L4_CSUM_OK
)) {
5090 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5094 skb
->ip_summed
= CHECKSUM_NONE
;
5097 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
5098 static int mvpp2_rx_refill(struct mvpp2_port
*port
,
5099 struct mvpp2_bm_pool
*bm_pool
, u32 bm
)
5101 dma_addr_t dma_addr
;
5102 phys_addr_t phys_addr
;
5105 /* No recycle or too many buffers are in use, so allocate a new skb */
5106 buf
= mvpp2_buf_alloc(port
, bm_pool
, &dma_addr
, &phys_addr
,
5111 mvpp2_pool_refill(port
, bm
, dma_addr
, phys_addr
);
5116 /* Handle tx checksum */
5117 static u32
mvpp2_skb_tx_csum(struct mvpp2_port
*port
, struct sk_buff
*skb
)
5119 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
5123 if (skb
->protocol
== htons(ETH_P_IP
)) {
5124 struct iphdr
*ip4h
= ip_hdr(skb
);
5126 /* Calculate IPv4 checksum and L4 checksum */
5127 ip_hdr_len
= ip4h
->ihl
;
5128 l4_proto
= ip4h
->protocol
;
5129 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
5130 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
5132 /* Read l4_protocol from one of IPv6 extra headers */
5133 if (skb_network_header_len(skb
) > 0)
5134 ip_hdr_len
= (skb_network_header_len(skb
) >> 2);
5135 l4_proto
= ip6h
->nexthdr
;
5137 return MVPP2_TXD_L4_CSUM_NOT
;
5140 return mvpp2_txq_desc_csum(skb_network_offset(skb
),
5141 skb
->protocol
, ip_hdr_len
, l4_proto
);
5144 return MVPP2_TXD_L4_CSUM_NOT
| MVPP2_TXD_IP_CSUM_DISABLE
;
5147 /* Main rx processing */
5148 static int mvpp2_rx(struct mvpp2_port
*port
, int rx_todo
,
5149 struct mvpp2_rx_queue
*rxq
)
5151 struct net_device
*dev
= port
->dev
;
5157 /* Get number of received packets and clamp the to-do */
5158 rx_received
= mvpp2_rxq_received(port
, rxq
->id
);
5159 if (rx_todo
> rx_received
)
5160 rx_todo
= rx_received
;
5162 while (rx_done
< rx_todo
) {
5163 struct mvpp2_rx_desc
*rx_desc
= mvpp2_rxq_next_desc_get(rxq
);
5164 struct mvpp2_bm_pool
*bm_pool
;
5165 struct sk_buff
*skb
;
5166 unsigned int frag_size
;
5167 dma_addr_t dma_addr
;
5168 phys_addr_t phys_addr
;
5170 int pool
, rx_bytes
, err
;
5174 rx_status
= mvpp2_rxdesc_status_get(port
, rx_desc
);
5175 rx_bytes
= mvpp2_rxdesc_size_get(port
, rx_desc
);
5176 rx_bytes
-= MVPP2_MH_SIZE
;
5177 dma_addr
= mvpp2_rxdesc_dma_addr_get(port
, rx_desc
);
5178 phys_addr
= mvpp2_rxdesc_cookie_get(port
, rx_desc
);
5179 data
= (void *)phys_to_virt(phys_addr
);
5181 bm
= mvpp2_bm_cookie_build(port
, rx_desc
);
5182 pool
= mvpp2_bm_cookie_pool_get(bm
);
5183 bm_pool
= &port
->priv
->bm_pools
[pool
];
5185 /* In case of an error, release the requested buffer pointer
5186 * to the Buffer Manager. This request process is controlled
5187 * by the hardware, and the information about the buffer is
5188 * comprised by the RX descriptor.
5190 if (rx_status
& MVPP2_RXD_ERR_SUMMARY
) {
5192 dev
->stats
.rx_errors
++;
5193 mvpp2_rx_error(port
, rx_desc
);
5194 /* Return the buffer to the pool */
5195 mvpp2_pool_refill(port
, bm
, dma_addr
, phys_addr
);
5199 if (bm_pool
->frag_size
> PAGE_SIZE
)
5202 frag_size
= bm_pool
->frag_size
;
5204 skb
= build_skb(data
, frag_size
);
5206 netdev_warn(port
->dev
, "skb build failed\n");
5207 goto err_drop_frame
;
5210 err
= mvpp2_rx_refill(port
, bm_pool
, bm
);
5212 netdev_err(port
->dev
, "failed to refill BM pools\n");
5213 goto err_drop_frame
;
5216 dma_unmap_single(dev
->dev
.parent
, dma_addr
,
5217 bm_pool
->buf_size
, DMA_FROM_DEVICE
);
5220 rcvd_bytes
+= rx_bytes
;
5222 skb_reserve(skb
, MVPP2_MH_SIZE
+ NET_SKB_PAD
);
5223 skb_put(skb
, rx_bytes
);
5224 skb
->protocol
= eth_type_trans(skb
, dev
);
5225 mvpp2_rx_csum(port
, rx_status
, skb
);
5227 napi_gro_receive(&port
->napi
, skb
);
5231 struct mvpp2_pcpu_stats
*stats
= this_cpu_ptr(port
->stats
);
5233 u64_stats_update_begin(&stats
->syncp
);
5234 stats
->rx_packets
+= rcvd_pkts
;
5235 stats
->rx_bytes
+= rcvd_bytes
;
5236 u64_stats_update_end(&stats
->syncp
);
5239 /* Update Rx queue management counters */
5241 mvpp2_rxq_status_update(port
, rxq
->id
, rx_done
, rx_done
);
5247 tx_desc_unmap_put(struct mvpp2_port
*port
, struct mvpp2_tx_queue
*txq
,
5248 struct mvpp2_tx_desc
*desc
)
5250 dma_addr_t buf_dma_addr
=
5251 mvpp2_txdesc_dma_addr_get(port
, desc
);
5253 mvpp2_txdesc_size_get(port
, desc
);
5254 dma_unmap_single(port
->dev
->dev
.parent
, buf_dma_addr
,
5255 buf_sz
, DMA_TO_DEVICE
);
5256 mvpp2_txq_desc_put(txq
);
5259 /* Handle tx fragmentation processing */
5260 static int mvpp2_tx_frag_process(struct mvpp2_port
*port
, struct sk_buff
*skb
,
5261 struct mvpp2_tx_queue
*aggr_txq
,
5262 struct mvpp2_tx_queue
*txq
)
5264 struct mvpp2_txq_pcpu
*txq_pcpu
= this_cpu_ptr(txq
->pcpu
);
5265 struct mvpp2_tx_desc
*tx_desc
;
5267 dma_addr_t buf_dma_addr
;
5269 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
5270 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
5271 void *addr
= page_address(frag
->page
.p
) + frag
->page_offset
;
5273 tx_desc
= mvpp2_txq_next_desc_get(aggr_txq
);
5274 mvpp2_txdesc_txq_set(port
, tx_desc
, txq
->id
);
5275 mvpp2_txdesc_size_set(port
, tx_desc
, frag
->size
);
5277 buf_dma_addr
= dma_map_single(port
->dev
->dev
.parent
, addr
,
5280 if (dma_mapping_error(port
->dev
->dev
.parent
, buf_dma_addr
)) {
5281 mvpp2_txq_desc_put(txq
);
5285 mvpp2_txdesc_offset_set(port
, tx_desc
,
5286 buf_dma_addr
& MVPP2_TX_DESC_ALIGN
);
5287 mvpp2_txdesc_dma_addr_set(port
, tx_desc
,
5288 buf_dma_addr
& ~MVPP2_TX_DESC_ALIGN
);
5290 if (i
== (skb_shinfo(skb
)->nr_frags
- 1)) {
5291 /* Last descriptor */
5292 mvpp2_txdesc_cmd_set(port
, tx_desc
,
5294 mvpp2_txq_inc_put(port
, txq_pcpu
, skb
, tx_desc
);
5296 /* Descriptor in the middle: Not First, Not Last */
5297 mvpp2_txdesc_cmd_set(port
, tx_desc
, 0);
5298 mvpp2_txq_inc_put(port
, txq_pcpu
, NULL
, tx_desc
);
5305 /* Release all descriptors that were used to map fragments of
5306 * this packet, as well as the corresponding DMA mappings
5308 for (i
= i
- 1; i
>= 0; i
--) {
5309 tx_desc
= txq
->descs
+ i
;
5310 tx_desc_unmap_put(port
, txq
, tx_desc
);
5316 /* Main tx processing */
5317 static int mvpp2_tx(struct sk_buff
*skb
, struct net_device
*dev
)
5319 struct mvpp2_port
*port
= netdev_priv(dev
);
5320 struct mvpp2_tx_queue
*txq
, *aggr_txq
;
5321 struct mvpp2_txq_pcpu
*txq_pcpu
;
5322 struct mvpp2_tx_desc
*tx_desc
;
5323 dma_addr_t buf_dma_addr
;
5328 txq_id
= skb_get_queue_mapping(skb
);
5329 txq
= port
->txqs
[txq_id
];
5330 txq_pcpu
= this_cpu_ptr(txq
->pcpu
);
5331 aggr_txq
= &port
->priv
->aggr_txqs
[smp_processor_id()];
5333 frags
= skb_shinfo(skb
)->nr_frags
+ 1;
5335 /* Check number of available descriptors */
5336 if (mvpp2_aggr_desc_num_check(port
->priv
, aggr_txq
, frags
) ||
5337 mvpp2_txq_reserved_desc_num_proc(port
->priv
, txq
,
5343 /* Get a descriptor for the first part of the packet */
5344 tx_desc
= mvpp2_txq_next_desc_get(aggr_txq
);
5345 mvpp2_txdesc_txq_set(port
, tx_desc
, txq
->id
);
5346 mvpp2_txdesc_size_set(port
, tx_desc
, skb_headlen(skb
));
5348 buf_dma_addr
= dma_map_single(dev
->dev
.parent
, skb
->data
,
5349 skb_headlen(skb
), DMA_TO_DEVICE
);
5350 if (unlikely(dma_mapping_error(dev
->dev
.parent
, buf_dma_addr
))) {
5351 mvpp2_txq_desc_put(txq
);
5356 mvpp2_txdesc_offset_set(port
, tx_desc
,
5357 buf_dma_addr
& MVPP2_TX_DESC_ALIGN
);
5358 mvpp2_txdesc_dma_addr_set(port
, tx_desc
,
5359 buf_dma_addr
& ~MVPP2_TX_DESC_ALIGN
);
5361 tx_cmd
= mvpp2_skb_tx_csum(port
, skb
);
5364 /* First and Last descriptor */
5365 tx_cmd
|= MVPP2_TXD_F_DESC
| MVPP2_TXD_L_DESC
;
5366 mvpp2_txdesc_cmd_set(port
, tx_desc
, tx_cmd
);
5367 mvpp2_txq_inc_put(port
, txq_pcpu
, skb
, tx_desc
);
5369 /* First but not Last */
5370 tx_cmd
|= MVPP2_TXD_F_DESC
| MVPP2_TXD_PADDING_DISABLE
;
5371 mvpp2_txdesc_cmd_set(port
, tx_desc
, tx_cmd
);
5372 mvpp2_txq_inc_put(port
, txq_pcpu
, NULL
, tx_desc
);
5374 /* Continue with other skb fragments */
5375 if (mvpp2_tx_frag_process(port
, skb
, aggr_txq
, txq
)) {
5376 tx_desc_unmap_put(port
, txq
, tx_desc
);
5382 txq_pcpu
->reserved_num
-= frags
;
5383 txq_pcpu
->count
+= frags
;
5384 aggr_txq
->count
+= frags
;
5386 /* Enable transmit */
5388 mvpp2_aggr_txq_pend_desc_add(port
, frags
);
5390 if (txq_pcpu
->size
- txq_pcpu
->count
< MAX_SKB_FRAGS
+ 1) {
5391 struct netdev_queue
*nq
= netdev_get_tx_queue(dev
, txq_id
);
5393 netif_tx_stop_queue(nq
);
5397 struct mvpp2_pcpu_stats
*stats
= this_cpu_ptr(port
->stats
);
5399 u64_stats_update_begin(&stats
->syncp
);
5400 stats
->tx_packets
++;
5401 stats
->tx_bytes
+= skb
->len
;
5402 u64_stats_update_end(&stats
->syncp
);
5404 dev
->stats
.tx_dropped
++;
5405 dev_kfree_skb_any(skb
);
5408 /* Finalize TX processing */
5409 if (txq_pcpu
->count
>= txq
->done_pkts_coal
)
5410 mvpp2_txq_done(port
, txq
, txq_pcpu
);
5412 /* Set the timer in case not all frags were processed */
5413 if (txq_pcpu
->count
<= frags
&& txq_pcpu
->count
> 0) {
5414 struct mvpp2_port_pcpu
*port_pcpu
= this_cpu_ptr(port
->pcpu
);
5416 mvpp2_timer_set(port_pcpu
);
5419 return NETDEV_TX_OK
;
5422 static inline void mvpp2_cause_error(struct net_device
*dev
, int cause
)
5424 if (cause
& MVPP2_CAUSE_FCS_ERR_MASK
)
5425 netdev_err(dev
, "FCS error\n");
5426 if (cause
& MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK
)
5427 netdev_err(dev
, "rx fifo overrun error\n");
5428 if (cause
& MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK
)
5429 netdev_err(dev
, "tx fifo underrun error\n");
5432 static int mvpp2_poll(struct napi_struct
*napi
, int budget
)
5434 u32 cause_rx_tx
, cause_rx
, cause_misc
;
5436 struct mvpp2_port
*port
= netdev_priv(napi
->dev
);
5438 /* Rx/Tx cause register
5440 * Bits 0-15: each bit indicates received packets on the Rx queue
5441 * (bit 0 is for Rx queue 0).
5443 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
5444 * (bit 16 is for Tx queue 0).
5446 * Each CPU has its own Rx/Tx cause register
5448 cause_rx_tx
= mvpp2_read(port
->priv
,
5449 MVPP2_ISR_RX_TX_CAUSE_REG(port
->id
));
5450 cause_rx_tx
&= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK
;
5451 cause_misc
= cause_rx_tx
& MVPP2_CAUSE_MISC_SUM_MASK
;
5454 mvpp2_cause_error(port
->dev
, cause_misc
);
5456 /* Clear the cause register */
5457 mvpp2_write(port
->priv
, MVPP2_ISR_MISC_CAUSE_REG
, 0);
5458 mvpp2_write(port
->priv
, MVPP2_ISR_RX_TX_CAUSE_REG(port
->id
),
5459 cause_rx_tx
& ~MVPP2_CAUSE_MISC_SUM_MASK
);
5462 cause_rx
= cause_rx_tx
& MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK
;
5464 /* Process RX packets */
5465 cause_rx
|= port
->pending_cause_rx
;
5466 while (cause_rx
&& budget
> 0) {
5468 struct mvpp2_rx_queue
*rxq
;
5470 rxq
= mvpp2_get_rx_queue(port
, cause_rx
);
5474 count
= mvpp2_rx(port
, budget
, rxq
);
5478 /* Clear the bit associated to this Rx queue
5479 * so that next iteration will continue from
5480 * the next Rx queue.
5482 cause_rx
&= ~(1 << rxq
->logic_rxq
);
5488 napi_complete_done(napi
, rx_done
);
5490 mvpp2_interrupts_enable(port
);
5492 port
->pending_cause_rx
= cause_rx
;
5496 /* Set hw internals when starting port */
5497 static void mvpp2_start_dev(struct mvpp2_port
*port
)
5499 struct net_device
*ndev
= port
->dev
;
5501 mvpp2_gmac_max_rx_size_set(port
);
5502 mvpp2_txp_max_tx_size_set(port
);
5504 napi_enable(&port
->napi
);
5506 /* Enable interrupts on all CPUs */
5507 mvpp2_interrupts_enable(port
);
5509 mvpp2_port_enable(port
);
5510 phy_start(ndev
->phydev
);
5511 netif_tx_start_all_queues(port
->dev
);
5514 /* Set hw internals when stopping port */
5515 static void mvpp2_stop_dev(struct mvpp2_port
*port
)
5517 struct net_device
*ndev
= port
->dev
;
5519 /* Stop new packets from arriving to RXQs */
5520 mvpp2_ingress_disable(port
);
5524 /* Disable interrupts on all CPUs */
5525 mvpp2_interrupts_disable(port
);
5527 napi_disable(&port
->napi
);
5529 netif_carrier_off(port
->dev
);
5530 netif_tx_stop_all_queues(port
->dev
);
5532 mvpp2_egress_disable(port
);
5533 mvpp2_port_disable(port
);
5534 phy_stop(ndev
->phydev
);
5537 static int mvpp2_check_ringparam_valid(struct net_device
*dev
,
5538 struct ethtool_ringparam
*ring
)
5540 u16 new_rx_pending
= ring
->rx_pending
;
5541 u16 new_tx_pending
= ring
->tx_pending
;
5543 if (ring
->rx_pending
== 0 || ring
->tx_pending
== 0)
5546 if (ring
->rx_pending
> MVPP2_MAX_RXD
)
5547 new_rx_pending
= MVPP2_MAX_RXD
;
5548 else if (!IS_ALIGNED(ring
->rx_pending
, 16))
5549 new_rx_pending
= ALIGN(ring
->rx_pending
, 16);
5551 if (ring
->tx_pending
> MVPP2_MAX_TXD
)
5552 new_tx_pending
= MVPP2_MAX_TXD
;
5553 else if (!IS_ALIGNED(ring
->tx_pending
, 32))
5554 new_tx_pending
= ALIGN(ring
->tx_pending
, 32);
5556 if (ring
->rx_pending
!= new_rx_pending
) {
5557 netdev_info(dev
, "illegal Rx ring size value %d, round to %d\n",
5558 ring
->rx_pending
, new_rx_pending
);
5559 ring
->rx_pending
= new_rx_pending
;
5562 if (ring
->tx_pending
!= new_tx_pending
) {
5563 netdev_info(dev
, "illegal Tx ring size value %d, round to %d\n",
5564 ring
->tx_pending
, new_tx_pending
);
5565 ring
->tx_pending
= new_tx_pending
;
5571 static void mvpp2_get_mac_address(struct mvpp2_port
*port
, unsigned char *addr
)
5573 u32 mac_addr_l
, mac_addr_m
, mac_addr_h
;
5575 mac_addr_l
= readl(port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
5576 mac_addr_m
= readl(port
->priv
->lms_base
+ MVPP2_SRC_ADDR_MIDDLE
);
5577 mac_addr_h
= readl(port
->priv
->lms_base
+ MVPP2_SRC_ADDR_HIGH
);
5578 addr
[0] = (mac_addr_h
>> 24) & 0xFF;
5579 addr
[1] = (mac_addr_h
>> 16) & 0xFF;
5580 addr
[2] = (mac_addr_h
>> 8) & 0xFF;
5581 addr
[3] = mac_addr_h
& 0xFF;
5582 addr
[4] = mac_addr_m
& 0xFF;
5583 addr
[5] = (mac_addr_l
>> MVPP2_GMAC_SA_LOW_OFFS
) & 0xFF;
5586 static int mvpp2_phy_connect(struct mvpp2_port
*port
)
5588 struct phy_device
*phy_dev
;
5590 phy_dev
= of_phy_connect(port
->dev
, port
->phy_node
, mvpp2_link_event
, 0,
5591 port
->phy_interface
);
5593 netdev_err(port
->dev
, "cannot connect to phy\n");
5596 phy_dev
->supported
&= PHY_GBIT_FEATURES
;
5597 phy_dev
->advertising
= phy_dev
->supported
;
5606 static void mvpp2_phy_disconnect(struct mvpp2_port
*port
)
5608 struct net_device
*ndev
= port
->dev
;
5610 phy_disconnect(ndev
->phydev
);
5613 static int mvpp2_open(struct net_device
*dev
)
5615 struct mvpp2_port
*port
= netdev_priv(dev
);
5616 unsigned char mac_bcast
[ETH_ALEN
] = {
5617 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
5620 err
= mvpp2_prs_mac_da_accept(port
->priv
, port
->id
, mac_bcast
, true);
5622 netdev_err(dev
, "mvpp2_prs_mac_da_accept BC failed\n");
5625 err
= mvpp2_prs_mac_da_accept(port
->priv
, port
->id
,
5626 dev
->dev_addr
, true);
5628 netdev_err(dev
, "mvpp2_prs_mac_da_accept MC failed\n");
5631 err
= mvpp2_prs_tag_mode_set(port
->priv
, port
->id
, MVPP2_TAG_TYPE_MH
);
5633 netdev_err(dev
, "mvpp2_prs_tag_mode_set failed\n");
5636 err
= mvpp2_prs_def_flow(port
);
5638 netdev_err(dev
, "mvpp2_prs_def_flow failed\n");
5642 /* Allocate the Rx/Tx queues */
5643 err
= mvpp2_setup_rxqs(port
);
5645 netdev_err(port
->dev
, "cannot allocate Rx queues\n");
5649 err
= mvpp2_setup_txqs(port
);
5651 netdev_err(port
->dev
, "cannot allocate Tx queues\n");
5652 goto err_cleanup_rxqs
;
5655 err
= request_irq(port
->irq
, mvpp2_isr
, 0, dev
->name
, port
);
5657 netdev_err(port
->dev
, "cannot request IRQ %d\n", port
->irq
);
5658 goto err_cleanup_txqs
;
5661 /* In default link is down */
5662 netif_carrier_off(port
->dev
);
5664 err
= mvpp2_phy_connect(port
);
5668 /* Unmask interrupts on all CPUs */
5669 on_each_cpu(mvpp2_interrupts_unmask
, port
, 1);
5671 mvpp2_start_dev(port
);
5676 free_irq(port
->irq
, port
);
5678 mvpp2_cleanup_txqs(port
);
5680 mvpp2_cleanup_rxqs(port
);
5684 static int mvpp2_stop(struct net_device
*dev
)
5686 struct mvpp2_port
*port
= netdev_priv(dev
);
5687 struct mvpp2_port_pcpu
*port_pcpu
;
5690 mvpp2_stop_dev(port
);
5691 mvpp2_phy_disconnect(port
);
5693 /* Mask interrupts on all CPUs */
5694 on_each_cpu(mvpp2_interrupts_mask
, port
, 1);
5696 free_irq(port
->irq
, port
);
5697 for_each_present_cpu(cpu
) {
5698 port_pcpu
= per_cpu_ptr(port
->pcpu
, cpu
);
5700 hrtimer_cancel(&port_pcpu
->tx_done_timer
);
5701 port_pcpu
->timer_scheduled
= false;
5702 tasklet_kill(&port_pcpu
->tx_done_tasklet
);
5704 mvpp2_cleanup_rxqs(port
);
5705 mvpp2_cleanup_txqs(port
);
5710 static void mvpp2_set_rx_mode(struct net_device
*dev
)
5712 struct mvpp2_port
*port
= netdev_priv(dev
);
5713 struct mvpp2
*priv
= port
->priv
;
5714 struct netdev_hw_addr
*ha
;
5716 bool allmulti
= dev
->flags
& IFF_ALLMULTI
;
5718 mvpp2_prs_mac_promisc_set(priv
, id
, dev
->flags
& IFF_PROMISC
);
5719 mvpp2_prs_mac_multi_set(priv
, id
, MVPP2_PE_MAC_MC_ALL
, allmulti
);
5720 mvpp2_prs_mac_multi_set(priv
, id
, MVPP2_PE_MAC_MC_IP6
, allmulti
);
5722 /* Remove all port->id's mcast enries */
5723 mvpp2_prs_mcast_del_all(priv
, id
);
5725 if (allmulti
&& !netdev_mc_empty(dev
)) {
5726 netdev_for_each_mc_addr(ha
, dev
)
5727 mvpp2_prs_mac_da_accept(priv
, id
, ha
->addr
, true);
5731 static int mvpp2_set_mac_address(struct net_device
*dev
, void *p
)
5733 struct mvpp2_port
*port
= netdev_priv(dev
);
5734 const struct sockaddr
*addr
= p
;
5737 if (!is_valid_ether_addr(addr
->sa_data
)) {
5738 err
= -EADDRNOTAVAIL
;
5742 if (!netif_running(dev
)) {
5743 err
= mvpp2_prs_update_mac_da(dev
, addr
->sa_data
);
5746 /* Reconfigure parser to accept the original MAC address */
5747 err
= mvpp2_prs_update_mac_da(dev
, dev
->dev_addr
);
5752 mvpp2_stop_dev(port
);
5754 err
= mvpp2_prs_update_mac_da(dev
, addr
->sa_data
);
5758 /* Reconfigure parser accept the original MAC address */
5759 err
= mvpp2_prs_update_mac_da(dev
, dev
->dev_addr
);
5763 mvpp2_start_dev(port
);
5764 mvpp2_egress_enable(port
);
5765 mvpp2_ingress_enable(port
);
5769 netdev_err(dev
, "fail to change MAC address\n");
5773 static int mvpp2_change_mtu(struct net_device
*dev
, int mtu
)
5775 struct mvpp2_port
*port
= netdev_priv(dev
);
5778 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu
), 8)) {
5779 netdev_info(dev
, "illegal MTU value %d, round to %d\n", mtu
,
5780 ALIGN(MVPP2_RX_PKT_SIZE(mtu
), 8));
5781 mtu
= ALIGN(MVPP2_RX_PKT_SIZE(mtu
), 8);
5784 if (!netif_running(dev
)) {
5785 err
= mvpp2_bm_update_mtu(dev
, mtu
);
5787 port
->pkt_size
= MVPP2_RX_PKT_SIZE(mtu
);
5791 /* Reconfigure BM to the original MTU */
5792 err
= mvpp2_bm_update_mtu(dev
, dev
->mtu
);
5797 mvpp2_stop_dev(port
);
5799 err
= mvpp2_bm_update_mtu(dev
, mtu
);
5801 port
->pkt_size
= MVPP2_RX_PKT_SIZE(mtu
);
5805 /* Reconfigure BM to the original MTU */
5806 err
= mvpp2_bm_update_mtu(dev
, dev
->mtu
);
5811 mvpp2_start_dev(port
);
5812 mvpp2_egress_enable(port
);
5813 mvpp2_ingress_enable(port
);
5818 netdev_err(dev
, "fail to change MTU\n");
5823 mvpp2_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
5825 struct mvpp2_port
*port
= netdev_priv(dev
);
5829 for_each_possible_cpu(cpu
) {
5830 struct mvpp2_pcpu_stats
*cpu_stats
;
5836 cpu_stats
= per_cpu_ptr(port
->stats
, cpu
);
5838 start
= u64_stats_fetch_begin_irq(&cpu_stats
->syncp
);
5839 rx_packets
= cpu_stats
->rx_packets
;
5840 rx_bytes
= cpu_stats
->rx_bytes
;
5841 tx_packets
= cpu_stats
->tx_packets
;
5842 tx_bytes
= cpu_stats
->tx_bytes
;
5843 } while (u64_stats_fetch_retry_irq(&cpu_stats
->syncp
, start
));
5845 stats
->rx_packets
+= rx_packets
;
5846 stats
->rx_bytes
+= rx_bytes
;
5847 stats
->tx_packets
+= tx_packets
;
5848 stats
->tx_bytes
+= tx_bytes
;
5851 stats
->rx_errors
= dev
->stats
.rx_errors
;
5852 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
5853 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
5856 static int mvpp2_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
5863 ret
= phy_mii_ioctl(dev
->phydev
, ifr
, cmd
);
5865 mvpp2_link_event(dev
);
5870 /* Ethtool methods */
5872 /* Set interrupt coalescing for ethtools */
5873 static int mvpp2_ethtool_set_coalesce(struct net_device
*dev
,
5874 struct ethtool_coalesce
*c
)
5876 struct mvpp2_port
*port
= netdev_priv(dev
);
5879 for (queue
= 0; queue
< rxq_number
; queue
++) {
5880 struct mvpp2_rx_queue
*rxq
= port
->rxqs
[queue
];
5882 rxq
->time_coal
= c
->rx_coalesce_usecs
;
5883 rxq
->pkts_coal
= c
->rx_max_coalesced_frames
;
5884 mvpp2_rx_pkts_coal_set(port
, rxq
);
5885 mvpp2_rx_time_coal_set(port
, rxq
);
5888 for (queue
= 0; queue
< txq_number
; queue
++) {
5889 struct mvpp2_tx_queue
*txq
= port
->txqs
[queue
];
5891 txq
->done_pkts_coal
= c
->tx_max_coalesced_frames
;
5897 /* get coalescing for ethtools */
5898 static int mvpp2_ethtool_get_coalesce(struct net_device
*dev
,
5899 struct ethtool_coalesce
*c
)
5901 struct mvpp2_port
*port
= netdev_priv(dev
);
5903 c
->rx_coalesce_usecs
= port
->rxqs
[0]->time_coal
;
5904 c
->rx_max_coalesced_frames
= port
->rxqs
[0]->pkts_coal
;
5905 c
->tx_max_coalesced_frames
= port
->txqs
[0]->done_pkts_coal
;
5909 static void mvpp2_ethtool_get_drvinfo(struct net_device
*dev
,
5910 struct ethtool_drvinfo
*drvinfo
)
5912 strlcpy(drvinfo
->driver
, MVPP2_DRIVER_NAME
,
5913 sizeof(drvinfo
->driver
));
5914 strlcpy(drvinfo
->version
, MVPP2_DRIVER_VERSION
,
5915 sizeof(drvinfo
->version
));
5916 strlcpy(drvinfo
->bus_info
, dev_name(&dev
->dev
),
5917 sizeof(drvinfo
->bus_info
));
5920 static void mvpp2_ethtool_get_ringparam(struct net_device
*dev
,
5921 struct ethtool_ringparam
*ring
)
5923 struct mvpp2_port
*port
= netdev_priv(dev
);
5925 ring
->rx_max_pending
= MVPP2_MAX_RXD
;
5926 ring
->tx_max_pending
= MVPP2_MAX_TXD
;
5927 ring
->rx_pending
= port
->rx_ring_size
;
5928 ring
->tx_pending
= port
->tx_ring_size
;
5931 static int mvpp2_ethtool_set_ringparam(struct net_device
*dev
,
5932 struct ethtool_ringparam
*ring
)
5934 struct mvpp2_port
*port
= netdev_priv(dev
);
5935 u16 prev_rx_ring_size
= port
->rx_ring_size
;
5936 u16 prev_tx_ring_size
= port
->tx_ring_size
;
5939 err
= mvpp2_check_ringparam_valid(dev
, ring
);
5943 if (!netif_running(dev
)) {
5944 port
->rx_ring_size
= ring
->rx_pending
;
5945 port
->tx_ring_size
= ring
->tx_pending
;
5949 /* The interface is running, so we have to force a
5950 * reallocation of the queues
5952 mvpp2_stop_dev(port
);
5953 mvpp2_cleanup_rxqs(port
);
5954 mvpp2_cleanup_txqs(port
);
5956 port
->rx_ring_size
= ring
->rx_pending
;
5957 port
->tx_ring_size
= ring
->tx_pending
;
5959 err
= mvpp2_setup_rxqs(port
);
5961 /* Reallocate Rx queues with the original ring size */
5962 port
->rx_ring_size
= prev_rx_ring_size
;
5963 ring
->rx_pending
= prev_rx_ring_size
;
5964 err
= mvpp2_setup_rxqs(port
);
5968 err
= mvpp2_setup_txqs(port
);
5970 /* Reallocate Tx queues with the original ring size */
5971 port
->tx_ring_size
= prev_tx_ring_size
;
5972 ring
->tx_pending
= prev_tx_ring_size
;
5973 err
= mvpp2_setup_txqs(port
);
5975 goto err_clean_rxqs
;
5978 mvpp2_start_dev(port
);
5979 mvpp2_egress_enable(port
);
5980 mvpp2_ingress_enable(port
);
5985 mvpp2_cleanup_rxqs(port
);
5987 netdev_err(dev
, "fail to change ring parameters");
5993 static const struct net_device_ops mvpp2_netdev_ops
= {
5994 .ndo_open
= mvpp2_open
,
5995 .ndo_stop
= mvpp2_stop
,
5996 .ndo_start_xmit
= mvpp2_tx
,
5997 .ndo_set_rx_mode
= mvpp2_set_rx_mode
,
5998 .ndo_set_mac_address
= mvpp2_set_mac_address
,
5999 .ndo_change_mtu
= mvpp2_change_mtu
,
6000 .ndo_get_stats64
= mvpp2_get_stats64
,
6001 .ndo_do_ioctl
= mvpp2_ioctl
,
6004 static const struct ethtool_ops mvpp2_eth_tool_ops
= {
6005 .nway_reset
= phy_ethtool_nway_reset
,
6006 .get_link
= ethtool_op_get_link
,
6007 .set_coalesce
= mvpp2_ethtool_set_coalesce
,
6008 .get_coalesce
= mvpp2_ethtool_get_coalesce
,
6009 .get_drvinfo
= mvpp2_ethtool_get_drvinfo
,
6010 .get_ringparam
= mvpp2_ethtool_get_ringparam
,
6011 .set_ringparam
= mvpp2_ethtool_set_ringparam
,
6012 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
6013 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
6016 /* Driver initialization */
6018 static void mvpp2_port_power_up(struct mvpp2_port
*port
)
6020 mvpp2_port_mii_set(port
);
6021 mvpp2_port_periodic_xon_disable(port
);
6022 mvpp2_port_fc_adv_enable(port
);
6023 mvpp2_port_reset(port
);
6026 /* Initialize port HW */
6027 static int mvpp2_port_init(struct mvpp2_port
*port
)
6029 struct device
*dev
= port
->dev
->dev
.parent
;
6030 struct mvpp2
*priv
= port
->priv
;
6031 struct mvpp2_txq_pcpu
*txq_pcpu
;
6032 int queue
, cpu
, err
;
6034 if (port
->first_rxq
+ rxq_number
> MVPP2_RXQ_TOTAL_NUM
)
6038 mvpp2_egress_disable(port
);
6039 mvpp2_port_disable(port
);
6041 port
->txqs
= devm_kcalloc(dev
, txq_number
, sizeof(*port
->txqs
),
6046 /* Associate physical Tx queues to this port and initialize.
6047 * The mapping is predefined.
6049 for (queue
= 0; queue
< txq_number
; queue
++) {
6050 int queue_phy_id
= mvpp2_txq_phys(port
->id
, queue
);
6051 struct mvpp2_tx_queue
*txq
;
6053 txq
= devm_kzalloc(dev
, sizeof(*txq
), GFP_KERNEL
);
6056 goto err_free_percpu
;
6059 txq
->pcpu
= alloc_percpu(struct mvpp2_txq_pcpu
);
6062 goto err_free_percpu
;
6065 txq
->id
= queue_phy_id
;
6066 txq
->log_id
= queue
;
6067 txq
->done_pkts_coal
= MVPP2_TXDONE_COAL_PKTS_THRESH
;
6068 for_each_present_cpu(cpu
) {
6069 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
6070 txq_pcpu
->cpu
= cpu
;
6073 port
->txqs
[queue
] = txq
;
6076 port
->rxqs
= devm_kcalloc(dev
, rxq_number
, sizeof(*port
->rxqs
),
6080 goto err_free_percpu
;
6083 /* Allocate and initialize Rx queue for this port */
6084 for (queue
= 0; queue
< rxq_number
; queue
++) {
6085 struct mvpp2_rx_queue
*rxq
;
6087 /* Map physical Rx queue to port's logical Rx queue */
6088 rxq
= devm_kzalloc(dev
, sizeof(*rxq
), GFP_KERNEL
);
6091 goto err_free_percpu
;
6093 /* Map this Rx queue to a physical queue */
6094 rxq
->id
= port
->first_rxq
+ queue
;
6095 rxq
->port
= port
->id
;
6096 rxq
->logic_rxq
= queue
;
6098 port
->rxqs
[queue
] = rxq
;
6101 /* Configure Rx queue group interrupt for this port */
6102 mvpp2_write(priv
, MVPP2_ISR_RXQ_GROUP_REG(port
->id
), rxq_number
);
6104 /* Create Rx descriptor rings */
6105 for (queue
= 0; queue
< rxq_number
; queue
++) {
6106 struct mvpp2_rx_queue
*rxq
= port
->rxqs
[queue
];
6108 rxq
->size
= port
->rx_ring_size
;
6109 rxq
->pkts_coal
= MVPP2_RX_COAL_PKTS
;
6110 rxq
->time_coal
= MVPP2_RX_COAL_USEC
;
6113 mvpp2_ingress_disable(port
);
6115 /* Port default configuration */
6116 mvpp2_defaults_set(port
);
6118 /* Port's classifier configuration */
6119 mvpp2_cls_oversize_rxq_set(port
);
6120 mvpp2_cls_port_config(port
);
6122 /* Provide an initial Rx packet size */
6123 port
->pkt_size
= MVPP2_RX_PKT_SIZE(port
->dev
->mtu
);
6125 /* Initialize pools for swf */
6126 err
= mvpp2_swf_bm_pool_init(port
);
6128 goto err_free_percpu
;
6133 for (queue
= 0; queue
< txq_number
; queue
++) {
6134 if (!port
->txqs
[queue
])
6136 free_percpu(port
->txqs
[queue
]->pcpu
);
6141 /* Ports initialization */
6142 static int mvpp2_port_probe(struct platform_device
*pdev
,
6143 struct device_node
*port_node
,
6145 int *next_first_rxq
)
6147 struct device_node
*phy_node
;
6148 struct mvpp2_port
*port
;
6149 struct mvpp2_port_pcpu
*port_pcpu
;
6150 struct net_device
*dev
;
6151 struct resource
*res
;
6152 const char *dt_mac_addr
;
6153 const char *mac_from
;
6154 char hw_mac_addr
[ETH_ALEN
];
6158 int priv_common_regs_num
= 2;
6161 dev
= alloc_etherdev_mqs(sizeof(struct mvpp2_port
), txq_number
,
6166 phy_node
= of_parse_phandle(port_node
, "phy", 0);
6168 dev_err(&pdev
->dev
, "missing phy\n");
6170 goto err_free_netdev
;
6173 phy_mode
= of_get_phy_mode(port_node
);
6175 dev_err(&pdev
->dev
, "incorrect phy mode\n");
6177 goto err_free_netdev
;
6180 if (of_property_read_u32(port_node
, "port-id", &id
)) {
6182 dev_err(&pdev
->dev
, "missing port-id value\n");
6183 goto err_free_netdev
;
6186 dev
->tx_queue_len
= MVPP2_MAX_TXD
;
6187 dev
->watchdog_timeo
= 5 * HZ
;
6188 dev
->netdev_ops
= &mvpp2_netdev_ops
;
6189 dev
->ethtool_ops
= &mvpp2_eth_tool_ops
;
6191 port
= netdev_priv(dev
);
6193 port
->irq
= irq_of_parse_and_map(port_node
, 0);
6194 if (port
->irq
<= 0) {
6196 goto err_free_netdev
;
6199 if (of_property_read_bool(port_node
, "marvell,loopback"))
6200 port
->flags
|= MVPP2_F_LOOPBACK
;
6204 port
->first_rxq
= *next_first_rxq
;
6205 port
->phy_node
= phy_node
;
6206 port
->phy_interface
= phy_mode
;
6208 res
= platform_get_resource(pdev
, IORESOURCE_MEM
,
6209 priv_common_regs_num
+ id
);
6210 port
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
6211 if (IS_ERR(port
->base
)) {
6212 err
= PTR_ERR(port
->base
);
6216 /* Alloc per-cpu stats */
6217 port
->stats
= netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats
);
6223 dt_mac_addr
= of_get_mac_address(port_node
);
6224 if (dt_mac_addr
&& is_valid_ether_addr(dt_mac_addr
)) {
6225 mac_from
= "device tree";
6226 ether_addr_copy(dev
->dev_addr
, dt_mac_addr
);
6228 mvpp2_get_mac_address(port
, hw_mac_addr
);
6229 if (is_valid_ether_addr(hw_mac_addr
)) {
6230 mac_from
= "hardware";
6231 ether_addr_copy(dev
->dev_addr
, hw_mac_addr
);
6233 mac_from
= "random";
6234 eth_hw_addr_random(dev
);
6238 port
->tx_ring_size
= MVPP2_MAX_TXD
;
6239 port
->rx_ring_size
= MVPP2_MAX_RXD
;
6241 SET_NETDEV_DEV(dev
, &pdev
->dev
);
6243 err
= mvpp2_port_init(port
);
6245 dev_err(&pdev
->dev
, "failed to init port %d\n", id
);
6246 goto err_free_stats
;
6248 mvpp2_port_power_up(port
);
6250 port
->pcpu
= alloc_percpu(struct mvpp2_port_pcpu
);
6253 goto err_free_txq_pcpu
;
6256 for_each_present_cpu(cpu
) {
6257 port_pcpu
= per_cpu_ptr(port
->pcpu
, cpu
);
6259 hrtimer_init(&port_pcpu
->tx_done_timer
, CLOCK_MONOTONIC
,
6260 HRTIMER_MODE_REL_PINNED
);
6261 port_pcpu
->tx_done_timer
.function
= mvpp2_hr_timer_cb
;
6262 port_pcpu
->timer_scheduled
= false;
6264 tasklet_init(&port_pcpu
->tx_done_tasklet
, mvpp2_tx_proc_cb
,
6265 (unsigned long)dev
);
6268 netif_napi_add(dev
, &port
->napi
, mvpp2_poll
, NAPI_POLL_WEIGHT
);
6269 features
= NETIF_F_SG
| NETIF_F_IP_CSUM
;
6270 dev
->features
= features
| NETIF_F_RXCSUM
;
6271 dev
->hw_features
|= features
| NETIF_F_RXCSUM
| NETIF_F_GRO
;
6272 dev
->vlan_features
|= features
;
6274 /* MTU range: 68 - 9676 */
6275 dev
->min_mtu
= ETH_MIN_MTU
;
6276 /* 9676 == 9700 - 20 and rounding to 8 */
6277 dev
->max_mtu
= 9676;
6279 err
= register_netdev(dev
);
6281 dev_err(&pdev
->dev
, "failed to register netdev\n");
6282 goto err_free_port_pcpu
;
6284 netdev_info(dev
, "Using %s mac address %pM\n", mac_from
, dev
->dev_addr
);
6286 /* Increment the first Rx queue number to be used by the next port */
6287 *next_first_rxq
+= rxq_number
;
6288 priv
->port_list
[id
] = port
;
6292 free_percpu(port
->pcpu
);
6294 for (i
= 0; i
< txq_number
; i
++)
6295 free_percpu(port
->txqs
[i
]->pcpu
);
6297 free_percpu(port
->stats
);
6299 irq_dispose_mapping(port
->irq
);
6301 of_node_put(phy_node
);
6306 /* Ports removal routine */
6307 static void mvpp2_port_remove(struct mvpp2_port
*port
)
6311 unregister_netdev(port
->dev
);
6312 of_node_put(port
->phy_node
);
6313 free_percpu(port
->pcpu
);
6314 free_percpu(port
->stats
);
6315 for (i
= 0; i
< txq_number
; i
++)
6316 free_percpu(port
->txqs
[i
]->pcpu
);
6317 irq_dispose_mapping(port
->irq
);
6318 free_netdev(port
->dev
);
6321 /* Initialize decoding windows */
6322 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info
*dram
,
6328 for (i
= 0; i
< 6; i
++) {
6329 mvpp2_write(priv
, MVPP2_WIN_BASE(i
), 0);
6330 mvpp2_write(priv
, MVPP2_WIN_SIZE(i
), 0);
6333 mvpp2_write(priv
, MVPP2_WIN_REMAP(i
), 0);
6338 for (i
= 0; i
< dram
->num_cs
; i
++) {
6339 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
6341 mvpp2_write(priv
, MVPP2_WIN_BASE(i
),
6342 (cs
->base
& 0xffff0000) | (cs
->mbus_attr
<< 8) |
6343 dram
->mbus_dram_target_id
);
6345 mvpp2_write(priv
, MVPP2_WIN_SIZE(i
),
6346 (cs
->size
- 1) & 0xffff0000);
6348 win_enable
|= (1 << i
);
6351 mvpp2_write(priv
, MVPP2_BASE_ADDR_ENABLE
, win_enable
);
6354 /* Initialize Rx FIFO's */
6355 static void mvpp2_rx_fifo_init(struct mvpp2
*priv
)
6359 for (port
= 0; port
< MVPP2_MAX_PORTS
; port
++) {
6360 mvpp2_write(priv
, MVPP2_RX_DATA_FIFO_SIZE_REG(port
),
6361 MVPP2_RX_FIFO_PORT_DATA_SIZE
);
6362 mvpp2_write(priv
, MVPP2_RX_ATTR_FIFO_SIZE_REG(port
),
6363 MVPP2_RX_FIFO_PORT_ATTR_SIZE
);
6366 mvpp2_write(priv
, MVPP2_RX_MIN_PKT_SIZE_REG
,
6367 MVPP2_RX_FIFO_PORT_MIN_PKT
);
6368 mvpp2_write(priv
, MVPP2_RX_FIFO_INIT_REG
, 0x1);
6371 /* Initialize network controller common part HW */
6372 static int mvpp2_init(struct platform_device
*pdev
, struct mvpp2
*priv
)
6374 const struct mbus_dram_target_info
*dram_target_info
;
6378 /* Checks for hardware constraints */
6379 if (rxq_number
% 4 || (rxq_number
> MVPP2_MAX_RXQ
) ||
6380 (txq_number
> MVPP2_MAX_TXQ
)) {
6381 dev_err(&pdev
->dev
, "invalid queue size parameter\n");
6385 /* MBUS windows configuration */
6386 dram_target_info
= mv_mbus_dram_info();
6387 if (dram_target_info
)
6388 mvpp2_conf_mbus_windows(dram_target_info
, priv
);
6390 /* Disable HW PHY polling */
6391 val
= readl(priv
->lms_base
+ MVPP2_PHY_AN_CFG0_REG
);
6392 val
|= MVPP2_PHY_AN_STOP_SMI0_MASK
;
6393 writel(val
, priv
->lms_base
+ MVPP2_PHY_AN_CFG0_REG
);
6395 /* Allocate and initialize aggregated TXQs */
6396 priv
->aggr_txqs
= devm_kcalloc(&pdev
->dev
, num_present_cpus(),
6397 sizeof(struct mvpp2_tx_queue
),
6399 if (!priv
->aggr_txqs
)
6402 for_each_present_cpu(i
) {
6403 priv
->aggr_txqs
[i
].id
= i
;
6404 priv
->aggr_txqs
[i
].size
= MVPP2_AGGR_TXQ_SIZE
;
6405 err
= mvpp2_aggr_txq_init(pdev
, &priv
->aggr_txqs
[i
],
6406 MVPP2_AGGR_TXQ_SIZE
, i
, priv
);
6412 mvpp2_rx_fifo_init(priv
);
6414 /* Reset Rx queue group interrupt configuration */
6415 for (i
= 0; i
< MVPP2_MAX_PORTS
; i
++)
6416 mvpp2_write(priv
, MVPP2_ISR_RXQ_GROUP_REG(i
), rxq_number
);
6418 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT
,
6419 priv
->lms_base
+ MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG
);
6421 /* Allow cache snoop when transmiting packets */
6422 mvpp2_write(priv
, MVPP2_TX_SNOOP_REG
, 0x1);
6424 /* Buffer Manager initialization */
6425 err
= mvpp2_bm_init(pdev
, priv
);
6429 /* Parser default initialization */
6430 err
= mvpp2_prs_default_init(pdev
, priv
);
6434 /* Classifier default initialization */
6435 mvpp2_cls_init(priv
);
6440 static int mvpp2_probe(struct platform_device
*pdev
)
6442 struct device_node
*dn
= pdev
->dev
.of_node
;
6443 struct device_node
*port_node
;
6445 struct resource
*res
;
6446 int port_count
, first_rxq
;
6449 priv
= devm_kzalloc(&pdev
->dev
, sizeof(struct mvpp2
), GFP_KERNEL
);
6454 (unsigned long)of_device_get_match_data(&pdev
->dev
);
6456 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
6457 priv
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
6458 if (IS_ERR(priv
->base
))
6459 return PTR_ERR(priv
->base
);
6461 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
6462 priv
->lms_base
= devm_ioremap_resource(&pdev
->dev
, res
);
6463 if (IS_ERR(priv
->lms_base
))
6464 return PTR_ERR(priv
->lms_base
);
6466 priv
->pp_clk
= devm_clk_get(&pdev
->dev
, "pp_clk");
6467 if (IS_ERR(priv
->pp_clk
))
6468 return PTR_ERR(priv
->pp_clk
);
6469 err
= clk_prepare_enable(priv
->pp_clk
);
6473 priv
->gop_clk
= devm_clk_get(&pdev
->dev
, "gop_clk");
6474 if (IS_ERR(priv
->gop_clk
)) {
6475 err
= PTR_ERR(priv
->gop_clk
);
6478 err
= clk_prepare_enable(priv
->gop_clk
);
6482 /* Get system's tclk rate */
6483 priv
->tclk
= clk_get_rate(priv
->pp_clk
);
6485 /* Initialize network controller */
6486 err
= mvpp2_init(pdev
, priv
);
6488 dev_err(&pdev
->dev
, "failed to initialize controller\n");
6492 port_count
= of_get_available_child_count(dn
);
6493 if (port_count
== 0) {
6494 dev_err(&pdev
->dev
, "no ports enabled\n");
6499 priv
->port_list
= devm_kcalloc(&pdev
->dev
, port_count
,
6500 sizeof(struct mvpp2_port
*),
6502 if (!priv
->port_list
) {
6507 /* Initialize ports */
6509 for_each_available_child_of_node(dn
, port_node
) {
6510 err
= mvpp2_port_probe(pdev
, port_node
, priv
, &first_rxq
);
6515 platform_set_drvdata(pdev
, priv
);
6519 clk_disable_unprepare(priv
->gop_clk
);
6521 clk_disable_unprepare(priv
->pp_clk
);
6525 static int mvpp2_remove(struct platform_device
*pdev
)
6527 struct mvpp2
*priv
= platform_get_drvdata(pdev
);
6528 struct device_node
*dn
= pdev
->dev
.of_node
;
6529 struct device_node
*port_node
;
6532 for_each_available_child_of_node(dn
, port_node
) {
6533 if (priv
->port_list
[i
])
6534 mvpp2_port_remove(priv
->port_list
[i
]);
6538 for (i
= 0; i
< MVPP2_BM_POOLS_NUM
; i
++) {
6539 struct mvpp2_bm_pool
*bm_pool
= &priv
->bm_pools
[i
];
6541 mvpp2_bm_pool_destroy(pdev
, priv
, bm_pool
);
6544 for_each_present_cpu(i
) {
6545 struct mvpp2_tx_queue
*aggr_txq
= &priv
->aggr_txqs
[i
];
6547 dma_free_coherent(&pdev
->dev
,
6548 MVPP2_AGGR_TXQ_SIZE
* MVPP2_DESC_ALIGNED_SIZE
,
6550 aggr_txq
->descs_dma
);
6553 clk_disable_unprepare(priv
->pp_clk
);
6554 clk_disable_unprepare(priv
->gop_clk
);
6559 static const struct of_device_id mvpp2_match
[] = {
6561 .compatible
= "marvell,armada-375-pp2",
6562 .data
= (void *)MVPP21
,
6566 MODULE_DEVICE_TABLE(of
, mvpp2_match
);
6568 static struct platform_driver mvpp2_driver
= {
6569 .probe
= mvpp2_probe
,
6570 .remove
= mvpp2_remove
,
6572 .name
= MVPP2_DRIVER_NAME
,
6573 .of_match_table
= mvpp2_match
,
6577 module_platform_driver(mvpp2_driver
);
6579 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
6580 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
6581 MODULE_LICENSE("GPL v2");