]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/marvell/mvpp2.c
e9710e86a072879c7d645d80951b0f01fee1a57f
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / marvell / mvpp2.c
1 /*
2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
3 *
4 * Copyright (C) 2014 Marvell
5 *
6 * Marcin Wojtas <mw@semihalf.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/platform_device.h>
17 #include <linux/skbuff.h>
18 #include <linux/inetdevice.h>
19 #include <linux/mbus.h>
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/cpumask.h>
23 #include <linux/of.h>
24 #include <linux/of_irq.h>
25 #include <linux/of_mdio.h>
26 #include <linux/of_net.h>
27 #include <linux/of_address.h>
28 #include <linux/of_device.h>
29 #include <linux/phy.h>
30 #include <linux/clk.h>
31 #include <linux/hrtimer.h>
32 #include <linux/ktime.h>
33 #include <uapi/linux/ppp_defs.h>
34 #include <net/ip.h>
35 #include <net/ipv6.h>
36
37 /* RX Fifo Registers */
38 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
39 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
40 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
41 #define MVPP2_RX_FIFO_INIT_REG 0x64
42
43 /* RX DMA Top Registers */
44 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
45 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
46 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
47 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
48 #define MVPP2_POOL_BUF_SIZE_OFFSET 5
49 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
50 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
51 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
52 #define MVPP2_RXQ_POOL_SHORT_OFFS 20
53 #define MVPP2_RXQ_POOL_SHORT_MASK 0x700000
54 #define MVPP2_RXQ_POOL_LONG_OFFS 24
55 #define MVPP2_RXQ_POOL_LONG_MASK 0x7000000
56 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
57 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
58 #define MVPP2_RXQ_DISABLE_MASK BIT(31)
59
60 /* Parser Registers */
61 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
62 #define MVPP2_PRS_PORT_LU_MAX 0xf
63 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
64 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
65 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
66 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
67 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
68 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
69 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
70 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
71 #define MVPP2_PRS_TCAM_IDX_REG 0x1100
72 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
73 #define MVPP2_PRS_TCAM_INV_MASK BIT(31)
74 #define MVPP2_PRS_SRAM_IDX_REG 0x1200
75 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
76 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230
77 #define MVPP2_PRS_TCAM_EN_MASK BIT(0)
78
79 /* Classifier Registers */
80 #define MVPP2_CLS_MODE_REG 0x1800
81 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
82 #define MVPP2_CLS_PORT_WAY_REG 0x1810
83 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
84 #define MVPP2_CLS_LKP_INDEX_REG 0x1814
85 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
86 #define MVPP2_CLS_LKP_TBL_REG 0x1818
87 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
88 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
89 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820
90 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824
91 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828
92 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c
93 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
94 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
95 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
96 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
97 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
98 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
99
100 /* Descriptor Manager Top Registers */
101 #define MVPP2_RXQ_NUM_REG 0x2040
102 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044
103 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048
104 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
105 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
106 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
107 #define MVPP2_RXQ_NUM_NEW_OFFSET 16
108 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
109 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
110 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
111 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
112 #define MVPP2_RXQ_THRESH_REG 0x204c
113 #define MVPP2_OCCUPIED_THRESH_OFFSET 0
114 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
115 #define MVPP2_RXQ_INDEX_REG 0x2050
116 #define MVPP2_TXQ_NUM_REG 0x2080
117 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084
118 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088
119 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
120 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
121 #define MVPP2_TXQ_INDEX_REG 0x2098
122 #define MVPP2_TXQ_PREF_BUF_REG 0x209c
123 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
124 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
125 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
126 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
127 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
128 #define MVPP2_TXQ_PENDING_REG 0x20a0
129 #define MVPP2_TXQ_PENDING_MASK 0x3fff
130 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4
131 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
132 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16
133 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
134 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
135 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
136 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
137 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
138 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
139 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16
140 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
141 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
142 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
143 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
144 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
145 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
146
147 /* MBUS bridge registers */
148 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
149 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
150 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
151 #define MVPP2_BASE_ADDR_ENABLE 0x4060
152
153 /* Interrupt Cause and Mask registers */
154 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
155 #define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
156 #define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
157 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
158 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
159 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
160 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
161 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
162 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
163 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
164 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
165 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
166 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
167 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
168 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
169 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
170 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
171 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
172 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
173 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
174 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
175
176 /* Buffer Manager registers */
177 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
178 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
179 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
180 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0
181 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
182 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
183 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
184 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
185 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
186 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
187 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
188 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
189 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
190 #define MVPP2_BM_START_MASK BIT(0)
191 #define MVPP2_BM_STOP_MASK BIT(1)
192 #define MVPP2_BM_STATE_MASK BIT(4)
193 #define MVPP2_BM_LOW_THRESH_OFFS 8
194 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00
195 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
196 MVPP2_BM_LOW_THRESH_OFFS)
197 #define MVPP2_BM_HIGH_THRESH_OFFS 16
198 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
199 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
200 MVPP2_BM_HIGH_THRESH_OFFS)
201 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
202 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
203 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
204 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
205 #define MVPP2_BM_BPPE_FULL_MASK BIT(3)
206 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
207 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
208 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
209 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
210 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440
211 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
212 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
213 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
214 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
215 #define MVPP2_BM_VIRT_RLS_REG 0x64c0
216
217 /* TX Scheduler registers */
218 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
219 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
220 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff
221 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8
222 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
223 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
224 #define MVPP2_TXP_SCHED_MTU_REG 0x801c
225 #define MVPP2_TXP_MTU_MAX 0x7FFFF
226 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020
227 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
228 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
229 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
230 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
231 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
232 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
233 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
234 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
235 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
236 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
237 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
238 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
239 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
240
241 /* TX general registers */
242 #define MVPP2_TX_SNOOP_REG 0x8800
243 #define MVPP2_TX_PORT_FLUSH_REG 0x8810
244 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
245
246 /* LMS registers */
247 #define MVPP2_SRC_ADDR_MIDDLE 0x24
248 #define MVPP2_SRC_ADDR_HIGH 0x28
249 #define MVPP2_PHY_AN_CFG0_REG 0x34
250 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
251 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
252 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
253
254 /* Per-port registers */
255 #define MVPP2_GMAC_CTRL_0_REG 0x0
256 #define MVPP2_GMAC_PORT_EN_MASK BIT(0)
257 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
258 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
259 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
260 #define MVPP2_GMAC_CTRL_1_REG 0x4
261 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
262 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
263 #define MVPP2_GMAC_PCS_LB_EN_BIT 6
264 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
265 #define MVPP2_GMAC_SA_LOW_OFFS 7
266 #define MVPP2_GMAC_CTRL_2_REG 0x8
267 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
268 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
269 #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
270 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
271 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc
272 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
273 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
274 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
275 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
276 #define MVPP2_GMAC_AN_SPEED_EN BIT(7)
277 #define MVPP2_GMAC_FC_ADV_EN BIT(9)
278 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
279 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
280 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
281 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
282 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
283 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
284 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
285
286 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
287
288 /* Descriptor ring Macros */
289 #define MVPP2_QUEUE_NEXT_DESC(q, index) \
290 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
291
292 /* Various constants */
293
294 /* Coalescing */
295 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15
296 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
297 #define MVPP2_RX_COAL_PKTS 32
298 #define MVPP2_RX_COAL_USEC 100
299
300 /* The two bytes Marvell header. Either contains a special value used
301 * by Marvell switches when a specific hardware mode is enabled (not
302 * supported by this driver) or is filled automatically by zeroes on
303 * the RX side. Those two bytes being at the front of the Ethernet
304 * header, they allow to have the IP header aligned on a 4 bytes
305 * boundary automatically: the hardware skips those two bytes on its
306 * own.
307 */
308 #define MVPP2_MH_SIZE 2
309 #define MVPP2_ETH_TYPE_LEN 2
310 #define MVPP2_PPPOE_HDR_SIZE 8
311 #define MVPP2_VLAN_TAG_LEN 4
312
313 /* Lbtd 802.3 type */
314 #define MVPP2_IP_LBDT_TYPE 0xfffa
315
316 #define MVPP2_TX_CSUM_MAX_SIZE 9800
317
318 /* Timeout constants */
319 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
320 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
321
322 #define MVPP2_TX_MTU_MAX 0x7ffff
323
324 /* Maximum number of T-CONTs of PON port */
325 #define MVPP2_MAX_TCONT 16
326
327 /* Maximum number of supported ports */
328 #define MVPP2_MAX_PORTS 4
329
330 /* Maximum number of TXQs used by single port */
331 #define MVPP2_MAX_TXQ 8
332
333 /* Maximum number of RXQs used by single port */
334 #define MVPP2_MAX_RXQ 8
335
336 /* Dfault number of RXQs in use */
337 #define MVPP2_DEFAULT_RXQ 4
338
339 /* Total number of RXQs available to all ports */
340 #define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
341
342 /* Max number of Rx descriptors */
343 #define MVPP2_MAX_RXD 128
344
345 /* Max number of Tx descriptors */
346 #define MVPP2_MAX_TXD 1024
347
348 /* Amount of Tx descriptors that can be reserved at once by CPU */
349 #define MVPP2_CPU_DESC_CHUNK 64
350
351 /* Max number of Tx descriptors in each aggregated queue */
352 #define MVPP2_AGGR_TXQ_SIZE 256
353
354 /* Descriptor aligned size */
355 #define MVPP2_DESC_ALIGNED_SIZE 32
356
357 /* Descriptor alignment mask */
358 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
359
360 /* RX FIFO constants */
361 #define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
362 #define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
363 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
364
365 /* RX buffer constants */
366 #define MVPP2_SKB_SHINFO_SIZE \
367 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
368
369 #define MVPP2_RX_PKT_SIZE(mtu) \
370 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
371 ETH_HLEN + ETH_FCS_LEN, cache_line_size())
372
373 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
374 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
375 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
376 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
377
378 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
379
380 /* IPv6 max L3 address size */
381 #define MVPP2_MAX_L3_ADDR_SIZE 16
382
383 /* Port flags */
384 #define MVPP2_F_LOOPBACK BIT(0)
385
386 /* Marvell tag types */
387 enum mvpp2_tag_type {
388 MVPP2_TAG_TYPE_NONE = 0,
389 MVPP2_TAG_TYPE_MH = 1,
390 MVPP2_TAG_TYPE_DSA = 2,
391 MVPP2_TAG_TYPE_EDSA = 3,
392 MVPP2_TAG_TYPE_VLAN = 4,
393 MVPP2_TAG_TYPE_LAST = 5
394 };
395
396 /* Parser constants */
397 #define MVPP2_PRS_TCAM_SRAM_SIZE 256
398 #define MVPP2_PRS_TCAM_WORDS 6
399 #define MVPP2_PRS_SRAM_WORDS 4
400 #define MVPP2_PRS_FLOW_ID_SIZE 64
401 #define MVPP2_PRS_FLOW_ID_MASK 0x3f
402 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1
403 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
404 #define MVPP2_PRS_IPV4_HEAD 0x40
405 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
406 #define MVPP2_PRS_IPV4_MC 0xe0
407 #define MVPP2_PRS_IPV4_MC_MASK 0xf0
408 #define MVPP2_PRS_IPV4_BC_MASK 0xff
409 #define MVPP2_PRS_IPV4_IHL 0x5
410 #define MVPP2_PRS_IPV4_IHL_MASK 0xf
411 #define MVPP2_PRS_IPV6_MC 0xff
412 #define MVPP2_PRS_IPV6_MC_MASK 0xff
413 #define MVPP2_PRS_IPV6_HOP_MASK 0xff
414 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff
415 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
416 #define MVPP2_PRS_DBL_VLANS_MAX 100
417
418 /* Tcam structure:
419 * - lookup ID - 4 bits
420 * - port ID - 1 byte
421 * - additional information - 1 byte
422 * - header data - 8 bytes
423 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
424 */
425 #define MVPP2_PRS_AI_BITS 8
426 #define MVPP2_PRS_PORT_MASK 0xff
427 #define MVPP2_PRS_LU_MASK 0xf
428 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
429 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
430 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
431 (((offs) * 2) - ((offs) % 2) + 2)
432 #define MVPP2_PRS_TCAM_AI_BYTE 16
433 #define MVPP2_PRS_TCAM_PORT_BYTE 17
434 #define MVPP2_PRS_TCAM_LU_BYTE 20
435 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
436 #define MVPP2_PRS_TCAM_INV_WORD 5
437 /* Tcam entries ID */
438 #define MVPP2_PE_DROP_ALL 0
439 #define MVPP2_PE_FIRST_FREE_TID 1
440 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
441 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
442 #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
443 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
444 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
445 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
446 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
447 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
448 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
449 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
450 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
451 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
452 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
453 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
454 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
455 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
456 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
457 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
458 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
459 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
460 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
461 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
462 #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
463 #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
464 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
465
466 /* Sram structure
467 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
468 */
469 #define MVPP2_PRS_SRAM_RI_OFFS 0
470 #define MVPP2_PRS_SRAM_RI_WORD 0
471 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
472 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
473 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
474 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64
475 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
476 #define MVPP2_PRS_SRAM_UDF_OFFS 73
477 #define MVPP2_PRS_SRAM_UDF_BITS 8
478 #define MVPP2_PRS_SRAM_UDF_MASK 0xff
479 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
480 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
481 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
482 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
483 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
484 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
485 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
486 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
487 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
488 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
489 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
490 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
491 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
492 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
493 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
494 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
495 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
496 #define MVPP2_PRS_SRAM_AI_OFFS 90
497 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
498 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
499 #define MVPP2_PRS_SRAM_AI_MASK 0xff
500 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
501 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
502 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110
503 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111
504
505 /* Sram result info bits assignment */
506 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1
507 #define MVPP2_PRS_RI_DSA_MASK 0x2
508 #define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
509 #define MVPP2_PRS_RI_VLAN_NONE 0x0
510 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
511 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
512 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
513 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
514 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
515 #define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
516 #define MVPP2_PRS_RI_L2_UCAST 0x0
517 #define MVPP2_PRS_RI_L2_MCAST BIT(9)
518 #define MVPP2_PRS_RI_L2_BCAST BIT(10)
519 #define MVPP2_PRS_RI_PPPOE_MASK 0x800
520 #define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
521 #define MVPP2_PRS_RI_L3_UN 0x0
522 #define MVPP2_PRS_RI_L3_IP4 BIT(12)
523 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
524 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
525 #define MVPP2_PRS_RI_L3_IP6 BIT(14)
526 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
527 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
528 #define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
529 #define MVPP2_PRS_RI_L3_UCAST 0x0
530 #define MVPP2_PRS_RI_L3_MCAST BIT(15)
531 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
532 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
533 #define MVPP2_PRS_RI_UDF3_MASK 0x300000
534 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
535 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
536 #define MVPP2_PRS_RI_L4_TCP BIT(22)
537 #define MVPP2_PRS_RI_L4_UDP BIT(23)
538 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
539 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000
540 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
541 #define MVPP2_PRS_RI_DROP_MASK 0x80000000
542
543 /* Sram additional info bits assignment */
544 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
545 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
546 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
547 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
548 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
549 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
550 #define MVPP2_PRS_SINGLE_VLAN_AI 0
551 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
552
553 /* DSA/EDSA type */
554 #define MVPP2_PRS_TAGGED true
555 #define MVPP2_PRS_UNTAGGED false
556 #define MVPP2_PRS_EDSA true
557 #define MVPP2_PRS_DSA false
558
559 /* MAC entries, shadow udf */
560 enum mvpp2_prs_udf {
561 MVPP2_PRS_UDF_MAC_DEF,
562 MVPP2_PRS_UDF_MAC_RANGE,
563 MVPP2_PRS_UDF_L2_DEF,
564 MVPP2_PRS_UDF_L2_DEF_COPY,
565 MVPP2_PRS_UDF_L2_USER,
566 };
567
568 /* Lookup ID */
569 enum mvpp2_prs_lookup {
570 MVPP2_PRS_LU_MH,
571 MVPP2_PRS_LU_MAC,
572 MVPP2_PRS_LU_DSA,
573 MVPP2_PRS_LU_VLAN,
574 MVPP2_PRS_LU_L2,
575 MVPP2_PRS_LU_PPPOE,
576 MVPP2_PRS_LU_IP4,
577 MVPP2_PRS_LU_IP6,
578 MVPP2_PRS_LU_FLOWS,
579 MVPP2_PRS_LU_LAST,
580 };
581
582 /* L3 cast enum */
583 enum mvpp2_prs_l3_cast {
584 MVPP2_PRS_L3_UNI_CAST,
585 MVPP2_PRS_L3_MULTI_CAST,
586 MVPP2_PRS_L3_BROAD_CAST
587 };
588
589 /* Classifier constants */
590 #define MVPP2_CLS_FLOWS_TBL_SIZE 512
591 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
592 #define MVPP2_CLS_LKP_TBL_SIZE 64
593
594 /* BM constants */
595 #define MVPP2_BM_POOLS_NUM 8
596 #define MVPP2_BM_LONG_BUF_NUM 1024
597 #define MVPP2_BM_SHORT_BUF_NUM 2048
598 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
599 #define MVPP2_BM_POOL_PTR_ALIGN 128
600 #define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
601 #define MVPP2_BM_SWF_SHORT_POOL 3
602
603 /* BM cookie (32 bits) definition */
604 #define MVPP2_BM_COOKIE_POOL_OFFS 8
605 #define MVPP2_BM_COOKIE_CPU_OFFS 24
606
607 /* BM short pool packet size
608 * These value assure that for SWF the total number
609 * of bytes allocated for each buffer will be 512
610 */
611 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
612
613 enum mvpp2_bm_type {
614 MVPP2_BM_FREE,
615 MVPP2_BM_SWF_LONG,
616 MVPP2_BM_SWF_SHORT
617 };
618
619 /* Definitions */
620
621 /* Shared Packet Processor resources */
622 struct mvpp2 {
623 /* Shared registers' base addresses */
624 void __iomem *base;
625 void __iomem *lms_base;
626
627 /* Common clocks */
628 struct clk *pp_clk;
629 struct clk *gop_clk;
630
631 /* List of pointers to port structures */
632 struct mvpp2_port **port_list;
633
634 /* Aggregated TXQs */
635 struct mvpp2_tx_queue *aggr_txqs;
636
637 /* BM pools */
638 struct mvpp2_bm_pool *bm_pools;
639
640 /* PRS shadow table */
641 struct mvpp2_prs_shadow *prs_shadow;
642 /* PRS auxiliary table for double vlan entries control */
643 bool *prs_double_vlans;
644
645 /* Tclk value */
646 u32 tclk;
647
648 /* HW version */
649 enum { MVPP21, MVPP22 } hw_version;
650 };
651
652 struct mvpp2_pcpu_stats {
653 struct u64_stats_sync syncp;
654 u64 rx_packets;
655 u64 rx_bytes;
656 u64 tx_packets;
657 u64 tx_bytes;
658 };
659
660 /* Per-CPU port control */
661 struct mvpp2_port_pcpu {
662 struct hrtimer tx_done_timer;
663 bool timer_scheduled;
664 /* Tasklet for egress finalization */
665 struct tasklet_struct tx_done_tasklet;
666 };
667
668 struct mvpp2_port {
669 u8 id;
670
671 int irq;
672
673 struct mvpp2 *priv;
674
675 /* Per-port registers' base address */
676 void __iomem *base;
677
678 struct mvpp2_rx_queue **rxqs;
679 struct mvpp2_tx_queue **txqs;
680 struct net_device *dev;
681
682 int pkt_size;
683
684 u32 pending_cause_rx;
685 struct napi_struct napi;
686
687 /* Per-CPU port control */
688 struct mvpp2_port_pcpu __percpu *pcpu;
689
690 /* Flags */
691 unsigned long flags;
692
693 u16 tx_ring_size;
694 u16 rx_ring_size;
695 struct mvpp2_pcpu_stats __percpu *stats;
696
697 phy_interface_t phy_interface;
698 struct device_node *phy_node;
699 unsigned int link;
700 unsigned int duplex;
701 unsigned int speed;
702
703 struct mvpp2_bm_pool *pool_long;
704 struct mvpp2_bm_pool *pool_short;
705
706 /* Index of first port's physical RXQ */
707 u8 first_rxq;
708 };
709
710 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
711 * layout of the transmit and reception DMA descriptors, and their
712 * layout is therefore defined by the hardware design
713 */
714
715 #define MVPP2_TXD_L3_OFF_SHIFT 0
716 #define MVPP2_TXD_IP_HLEN_SHIFT 8
717 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
718 #define MVPP2_TXD_L4_CSUM_NOT BIT(14)
719 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
720 #define MVPP2_TXD_PADDING_DISABLE BIT(23)
721 #define MVPP2_TXD_L4_UDP BIT(24)
722 #define MVPP2_TXD_L3_IP6 BIT(26)
723 #define MVPP2_TXD_L_DESC BIT(28)
724 #define MVPP2_TXD_F_DESC BIT(29)
725
726 #define MVPP2_RXD_ERR_SUMMARY BIT(15)
727 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
728 #define MVPP2_RXD_ERR_CRC 0x0
729 #define MVPP2_RXD_ERR_OVERRUN BIT(13)
730 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
731 #define MVPP2_RXD_BM_POOL_ID_OFFS 16
732 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
733 #define MVPP2_RXD_HWF_SYNC BIT(21)
734 #define MVPP2_RXD_L4_CSUM_OK BIT(22)
735 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
736 #define MVPP2_RXD_L4_TCP BIT(25)
737 #define MVPP2_RXD_L4_UDP BIT(26)
738 #define MVPP2_RXD_L3_IP4 BIT(28)
739 #define MVPP2_RXD_L3_IP6 BIT(30)
740 #define MVPP2_RXD_BUF_HDR BIT(31)
741
742 /* HW TX descriptor for PPv2.1 */
743 struct mvpp21_tx_desc {
744 u32 command; /* Options used by HW for packet transmitting.*/
745 u8 packet_offset; /* the offset from the buffer beginning */
746 u8 phys_txq; /* destination queue ID */
747 u16 data_size; /* data size of transmitted packet in bytes */
748 u32 buf_dma_addr; /* physical addr of transmitted buffer */
749 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
750 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
751 u32 reserved2; /* reserved (for future use) */
752 };
753
754 /* HW RX descriptor for PPv2.1 */
755 struct mvpp21_rx_desc {
756 u32 status; /* info about received packet */
757 u16 reserved1; /* parser_info (for future use, PnC) */
758 u16 data_size; /* size of received packet in bytes */
759 u32 buf_dma_addr; /* physical address of the buffer */
760 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
761 u16 reserved2; /* gem_port_id (for future use, PON) */
762 u16 reserved3; /* csum_l4 (for future use, PnC) */
763 u8 reserved4; /* bm_qset (for future use, BM) */
764 u8 reserved5;
765 u16 reserved6; /* classify_info (for future use, PnC) */
766 u32 reserved7; /* flow_id (for future use, PnC) */
767 u32 reserved8;
768 };
769
770 /* Opaque type used by the driver to manipulate the HW TX and RX
771 * descriptors
772 */
773 struct mvpp2_tx_desc {
774 union {
775 struct mvpp21_tx_desc pp21;
776 };
777 };
778
779 struct mvpp2_rx_desc {
780 union {
781 struct mvpp21_rx_desc pp21;
782 };
783 };
784
785 struct mvpp2_txq_pcpu_buf {
786 /* Transmitted SKB */
787 struct sk_buff *skb;
788
789 /* Physical address of transmitted buffer */
790 dma_addr_t dma;
791
792 /* Size transmitted */
793 size_t size;
794 };
795
796 /* Per-CPU Tx queue control */
797 struct mvpp2_txq_pcpu {
798 int cpu;
799
800 /* Number of Tx DMA descriptors in the descriptor ring */
801 int size;
802
803 /* Number of currently used Tx DMA descriptor in the
804 * descriptor ring
805 */
806 int count;
807
808 /* Number of Tx DMA descriptors reserved for each CPU */
809 int reserved_num;
810
811 /* Infos about transmitted buffers */
812 struct mvpp2_txq_pcpu_buf *buffs;
813
814 /* Index of last TX DMA descriptor that was inserted */
815 int txq_put_index;
816
817 /* Index of the TX DMA descriptor to be cleaned up */
818 int txq_get_index;
819 };
820
821 struct mvpp2_tx_queue {
822 /* Physical number of this Tx queue */
823 u8 id;
824
825 /* Logical number of this Tx queue */
826 u8 log_id;
827
828 /* Number of Tx DMA descriptors in the descriptor ring */
829 int size;
830
831 /* Number of currently used Tx DMA descriptor in the descriptor ring */
832 int count;
833
834 /* Per-CPU control of physical Tx queues */
835 struct mvpp2_txq_pcpu __percpu *pcpu;
836
837 u32 done_pkts_coal;
838
839 /* Virtual address of thex Tx DMA descriptors array */
840 struct mvpp2_tx_desc *descs;
841
842 /* DMA address of the Tx DMA descriptors array */
843 dma_addr_t descs_dma;
844
845 /* Index of the last Tx DMA descriptor */
846 int last_desc;
847
848 /* Index of the next Tx DMA descriptor to process */
849 int next_desc_to_proc;
850 };
851
852 struct mvpp2_rx_queue {
853 /* RX queue number, in the range 0-31 for physical RXQs */
854 u8 id;
855
856 /* Num of rx descriptors in the rx descriptor ring */
857 int size;
858
859 u32 pkts_coal;
860 u32 time_coal;
861
862 /* Virtual address of the RX DMA descriptors array */
863 struct mvpp2_rx_desc *descs;
864
865 /* DMA address of the RX DMA descriptors array */
866 dma_addr_t descs_dma;
867
868 /* Index of the last RX DMA descriptor */
869 int last_desc;
870
871 /* Index of the next RX DMA descriptor to process */
872 int next_desc_to_proc;
873
874 /* ID of port to which physical RXQ is mapped */
875 int port;
876
877 /* Port's logic RXQ number to which physical RXQ is mapped */
878 int logic_rxq;
879 };
880
881 union mvpp2_prs_tcam_entry {
882 u32 word[MVPP2_PRS_TCAM_WORDS];
883 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
884 };
885
886 union mvpp2_prs_sram_entry {
887 u32 word[MVPP2_PRS_SRAM_WORDS];
888 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
889 };
890
891 struct mvpp2_prs_entry {
892 u32 index;
893 union mvpp2_prs_tcam_entry tcam;
894 union mvpp2_prs_sram_entry sram;
895 };
896
897 struct mvpp2_prs_shadow {
898 bool valid;
899 bool finish;
900
901 /* Lookup ID */
902 int lu;
903
904 /* User defined offset */
905 int udf;
906
907 /* Result info */
908 u32 ri;
909 u32 ri_mask;
910 };
911
912 struct mvpp2_cls_flow_entry {
913 u32 index;
914 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
915 };
916
917 struct mvpp2_cls_lookup_entry {
918 u32 lkpid;
919 u32 way;
920 u32 data;
921 };
922
923 struct mvpp2_bm_pool {
924 /* Pool number in the range 0-7 */
925 int id;
926 enum mvpp2_bm_type type;
927
928 /* Buffer Pointers Pool External (BPPE) size */
929 int size;
930 /* Number of buffers for this pool */
931 int buf_num;
932 /* Pool buffer size */
933 int buf_size;
934 /* Packet size */
935 int pkt_size;
936 int frag_size;
937
938 /* BPPE virtual base address */
939 u32 *virt_addr;
940 /* BPPE DMA base address */
941 dma_addr_t dma_addr;
942
943 /* Ports using BM pool */
944 u32 port_map;
945 };
946
947 /* Static declaractions */
948
949 /* Number of RXQs used by single port */
950 static int rxq_number = MVPP2_DEFAULT_RXQ;
951 /* Number of TXQs used by single port */
952 static int txq_number = MVPP2_MAX_TXQ;
953
954 #define MVPP2_DRIVER_NAME "mvpp2"
955 #define MVPP2_DRIVER_VERSION "1.0"
956
957 /* Utility/helper methods */
958
959 static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
960 {
961 writel(data, priv->base + offset);
962 }
963
964 static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
965 {
966 return readl(priv->base + offset);
967 }
968
969 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
970 struct mvpp2_tx_desc *tx_desc)
971 {
972 return tx_desc->pp21.buf_dma_addr;
973 }
974
975 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
976 struct mvpp2_tx_desc *tx_desc,
977 dma_addr_t dma_addr)
978 {
979 tx_desc->pp21.buf_dma_addr = dma_addr;
980 }
981
982 static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
983 struct mvpp2_tx_desc *tx_desc)
984 {
985 return tx_desc->pp21.data_size;
986 }
987
988 static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
989 struct mvpp2_tx_desc *tx_desc,
990 size_t size)
991 {
992 tx_desc->pp21.data_size = size;
993 }
994
995 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
996 struct mvpp2_tx_desc *tx_desc,
997 unsigned int txq)
998 {
999 tx_desc->pp21.phys_txq = txq;
1000 }
1001
1002 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1003 struct mvpp2_tx_desc *tx_desc,
1004 unsigned int command)
1005 {
1006 tx_desc->pp21.command = command;
1007 }
1008
1009 static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
1010 struct mvpp2_tx_desc *tx_desc,
1011 unsigned int offset)
1012 {
1013 tx_desc->pp21.packet_offset = offset;
1014 }
1015
1016 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
1017 struct mvpp2_tx_desc *tx_desc)
1018 {
1019 return tx_desc->pp21.packet_offset;
1020 }
1021
1022 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1023 struct mvpp2_rx_desc *rx_desc)
1024 {
1025 return rx_desc->pp21.buf_dma_addr;
1026 }
1027
1028 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1029 struct mvpp2_rx_desc *rx_desc)
1030 {
1031 return rx_desc->pp21.buf_cookie;
1032 }
1033
1034 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1035 struct mvpp2_rx_desc *rx_desc)
1036 {
1037 return rx_desc->pp21.data_size;
1038 }
1039
1040 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1041 struct mvpp2_rx_desc *rx_desc)
1042 {
1043 return rx_desc->pp21.status;
1044 }
1045
1046 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1047 {
1048 txq_pcpu->txq_get_index++;
1049 if (txq_pcpu->txq_get_index == txq_pcpu->size)
1050 txq_pcpu->txq_get_index = 0;
1051 }
1052
1053 static void mvpp2_txq_inc_put(struct mvpp2_port *port,
1054 struct mvpp2_txq_pcpu *txq_pcpu,
1055 struct sk_buff *skb,
1056 struct mvpp2_tx_desc *tx_desc)
1057 {
1058 struct mvpp2_txq_pcpu_buf *tx_buf =
1059 txq_pcpu->buffs + txq_pcpu->txq_put_index;
1060 tx_buf->skb = skb;
1061 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
1062 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
1063 mvpp2_txdesc_offset_get(port, tx_desc);
1064 txq_pcpu->txq_put_index++;
1065 if (txq_pcpu->txq_put_index == txq_pcpu->size)
1066 txq_pcpu->txq_put_index = 0;
1067 }
1068
1069 /* Get number of physical egress port */
1070 static inline int mvpp2_egress_port(struct mvpp2_port *port)
1071 {
1072 return MVPP2_MAX_TCONT + port->id;
1073 }
1074
1075 /* Get number of physical TXQ */
1076 static inline int mvpp2_txq_phys(int port, int txq)
1077 {
1078 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1079 }
1080
1081 /* Parser configuration routines */
1082
1083 /* Update parser tcam and sram hw entries */
1084 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1085 {
1086 int i;
1087
1088 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1089 return -EINVAL;
1090
1091 /* Clear entry invalidation bit */
1092 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1093
1094 /* Write tcam index - indirect access */
1095 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1096 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1097 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1098
1099 /* Write sram index - indirect access */
1100 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1101 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1102 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1103
1104 return 0;
1105 }
1106
1107 /* Read tcam entry from hw */
1108 static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1109 {
1110 int i;
1111
1112 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1113 return -EINVAL;
1114
1115 /* Write tcam index - indirect access */
1116 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1117
1118 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1119 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1120 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1121 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1122
1123 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1124 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1125
1126 /* Write sram index - indirect access */
1127 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1128 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1129 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1130
1131 return 0;
1132 }
1133
1134 /* Invalidate tcam hw entry */
1135 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1136 {
1137 /* Write index - indirect access */
1138 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1139 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1140 MVPP2_PRS_TCAM_INV_MASK);
1141 }
1142
1143 /* Enable shadow table entry and set its lookup ID */
1144 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1145 {
1146 priv->prs_shadow[index].valid = true;
1147 priv->prs_shadow[index].lu = lu;
1148 }
1149
1150 /* Update ri fields in shadow table entry */
1151 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1152 unsigned int ri, unsigned int ri_mask)
1153 {
1154 priv->prs_shadow[index].ri_mask = ri_mask;
1155 priv->prs_shadow[index].ri = ri;
1156 }
1157
1158 /* Update lookup field in tcam sw entry */
1159 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1160 {
1161 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1162
1163 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1164 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1165 }
1166
1167 /* Update mask for single port in tcam sw entry */
1168 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1169 unsigned int port, bool add)
1170 {
1171 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1172
1173 if (add)
1174 pe->tcam.byte[enable_off] &= ~(1 << port);
1175 else
1176 pe->tcam.byte[enable_off] |= 1 << port;
1177 }
1178
1179 /* Update port map in tcam sw entry */
1180 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1181 unsigned int ports)
1182 {
1183 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1184 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1185
1186 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1187 pe->tcam.byte[enable_off] &= ~port_mask;
1188 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1189 }
1190
1191 /* Obtain port map from tcam sw entry */
1192 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1193 {
1194 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1195
1196 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1197 }
1198
1199 /* Set byte of data and its enable bits in tcam sw entry */
1200 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1201 unsigned int offs, unsigned char byte,
1202 unsigned char enable)
1203 {
1204 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1205 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1206 }
1207
1208 /* Get byte of data and its enable bits from tcam sw entry */
1209 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1210 unsigned int offs, unsigned char *byte,
1211 unsigned char *enable)
1212 {
1213 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1214 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1215 }
1216
1217 /* Compare tcam data bytes with a pattern */
1218 static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1219 u16 data)
1220 {
1221 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1222 u16 tcam_data;
1223
1224 tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
1225 if (tcam_data != data)
1226 return false;
1227 return true;
1228 }
1229
1230 /* Update ai bits in tcam sw entry */
1231 static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1232 unsigned int bits, unsigned int enable)
1233 {
1234 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1235
1236 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1237
1238 if (!(enable & BIT(i)))
1239 continue;
1240
1241 if (bits & BIT(i))
1242 pe->tcam.byte[ai_idx] |= 1 << i;
1243 else
1244 pe->tcam.byte[ai_idx] &= ~(1 << i);
1245 }
1246
1247 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1248 }
1249
1250 /* Get ai bits from tcam sw entry */
1251 static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1252 {
1253 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1254 }
1255
1256 /* Set ethertype in tcam sw entry */
1257 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1258 unsigned short ethertype)
1259 {
1260 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1261 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1262 }
1263
1264 /* Set bits in sram sw entry */
1265 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1266 int val)
1267 {
1268 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1269 }
1270
1271 /* Clear bits in sram sw entry */
1272 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1273 int val)
1274 {
1275 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1276 }
1277
1278 /* Update ri bits in sram sw entry */
1279 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1280 unsigned int bits, unsigned int mask)
1281 {
1282 unsigned int i;
1283
1284 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1285 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1286
1287 if (!(mask & BIT(i)))
1288 continue;
1289
1290 if (bits & BIT(i))
1291 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1292 else
1293 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1294
1295 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1296 }
1297 }
1298
1299 /* Obtain ri bits from sram sw entry */
1300 static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1301 {
1302 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1303 }
1304
1305 /* Update ai bits in sram sw entry */
1306 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1307 unsigned int bits, unsigned int mask)
1308 {
1309 unsigned int i;
1310 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1311
1312 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1313
1314 if (!(mask & BIT(i)))
1315 continue;
1316
1317 if (bits & BIT(i))
1318 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1319 else
1320 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1321
1322 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1323 }
1324 }
1325
1326 /* Read ai bits from sram sw entry */
1327 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1328 {
1329 u8 bits;
1330 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1331 int ai_en_off = ai_off + 1;
1332 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1333
1334 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1335 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1336
1337 return bits;
1338 }
1339
1340 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
1341 * lookup interation
1342 */
1343 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1344 unsigned int lu)
1345 {
1346 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1347
1348 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1349 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1350 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1351 }
1352
1353 /* In the sram sw entry set sign and value of the next lookup offset
1354 * and the offset value generated to the classifier
1355 */
1356 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1357 unsigned int op)
1358 {
1359 /* Set sign */
1360 if (shift < 0) {
1361 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1362 shift = 0 - shift;
1363 } else {
1364 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1365 }
1366
1367 /* Set value */
1368 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1369 (unsigned char)shift;
1370
1371 /* Reset and set operation */
1372 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1373 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1374 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1375
1376 /* Set base offset as current */
1377 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1378 }
1379
1380 /* In the sram sw entry set sign and value of the user defined offset
1381 * generated to the classifier
1382 */
1383 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1384 unsigned int type, int offset,
1385 unsigned int op)
1386 {
1387 /* Set sign */
1388 if (offset < 0) {
1389 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1390 offset = 0 - offset;
1391 } else {
1392 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1393 }
1394
1395 /* Set value */
1396 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1397 MVPP2_PRS_SRAM_UDF_MASK);
1398 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1399 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1400 MVPP2_PRS_SRAM_UDF_BITS)] &=
1401 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1402 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1403 MVPP2_PRS_SRAM_UDF_BITS)] |=
1404 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1405
1406 /* Set offset type */
1407 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1408 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1409 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1410
1411 /* Set offset operation */
1412 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1413 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1414 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1415
1416 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1417 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1418 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1419 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1420
1421 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1422 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1423 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1424
1425 /* Set base offset as current */
1426 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1427 }
1428
1429 /* Find parser flow entry */
1430 static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1431 {
1432 struct mvpp2_prs_entry *pe;
1433 int tid;
1434
1435 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1436 if (!pe)
1437 return NULL;
1438 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1439
1440 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1441 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1442 u8 bits;
1443
1444 if (!priv->prs_shadow[tid].valid ||
1445 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1446 continue;
1447
1448 pe->index = tid;
1449 mvpp2_prs_hw_read(priv, pe);
1450 bits = mvpp2_prs_sram_ai_get(pe);
1451
1452 /* Sram store classification lookup ID in AI bits [5:0] */
1453 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1454 return pe;
1455 }
1456 kfree(pe);
1457
1458 return NULL;
1459 }
1460
1461 /* Return first free tcam index, seeking from start to end */
1462 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1463 unsigned char end)
1464 {
1465 int tid;
1466
1467 if (start > end)
1468 swap(start, end);
1469
1470 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1471 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1472
1473 for (tid = start; tid <= end; tid++) {
1474 if (!priv->prs_shadow[tid].valid)
1475 return tid;
1476 }
1477
1478 return -EINVAL;
1479 }
1480
1481 /* Enable/disable dropping all mac da's */
1482 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1483 {
1484 struct mvpp2_prs_entry pe;
1485
1486 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1487 /* Entry exist - update port only */
1488 pe.index = MVPP2_PE_DROP_ALL;
1489 mvpp2_prs_hw_read(priv, &pe);
1490 } else {
1491 /* Entry doesn't exist - create new */
1492 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1493 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1494 pe.index = MVPP2_PE_DROP_ALL;
1495
1496 /* Non-promiscuous mode for all ports - DROP unknown packets */
1497 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1498 MVPP2_PRS_RI_DROP_MASK);
1499
1500 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1501 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1502
1503 /* Update shadow table */
1504 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1505
1506 /* Mask all ports */
1507 mvpp2_prs_tcam_port_map_set(&pe, 0);
1508 }
1509
1510 /* Update port mask */
1511 mvpp2_prs_tcam_port_set(&pe, port, add);
1512
1513 mvpp2_prs_hw_write(priv, &pe);
1514 }
1515
1516 /* Set port to promiscuous mode */
1517 static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1518 {
1519 struct mvpp2_prs_entry pe;
1520
1521 /* Promiscuous mode - Accept unknown packets */
1522
1523 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1524 /* Entry exist - update port only */
1525 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1526 mvpp2_prs_hw_read(priv, &pe);
1527 } else {
1528 /* Entry doesn't exist - create new */
1529 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1530 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1531 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1532
1533 /* Continue - set next lookup */
1534 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1535
1536 /* Set result info bits */
1537 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1538 MVPP2_PRS_RI_L2_CAST_MASK);
1539
1540 /* Shift to ethertype */
1541 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1542 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1543
1544 /* Mask all ports */
1545 mvpp2_prs_tcam_port_map_set(&pe, 0);
1546
1547 /* Update shadow table */
1548 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1549 }
1550
1551 /* Update port mask */
1552 mvpp2_prs_tcam_port_set(&pe, port, add);
1553
1554 mvpp2_prs_hw_write(priv, &pe);
1555 }
1556
1557 /* Accept multicast */
1558 static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1559 bool add)
1560 {
1561 struct mvpp2_prs_entry pe;
1562 unsigned char da_mc;
1563
1564 /* Ethernet multicast address first byte is
1565 * 0x01 for IPv4 and 0x33 for IPv6
1566 */
1567 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1568
1569 if (priv->prs_shadow[index].valid) {
1570 /* Entry exist - update port only */
1571 pe.index = index;
1572 mvpp2_prs_hw_read(priv, &pe);
1573 } else {
1574 /* Entry doesn't exist - create new */
1575 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1576 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1577 pe.index = index;
1578
1579 /* Continue - set next lookup */
1580 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1581
1582 /* Set result info bits */
1583 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1584 MVPP2_PRS_RI_L2_CAST_MASK);
1585
1586 /* Update tcam entry data first byte */
1587 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1588
1589 /* Shift to ethertype */
1590 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1591 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1592
1593 /* Mask all ports */
1594 mvpp2_prs_tcam_port_map_set(&pe, 0);
1595
1596 /* Update shadow table */
1597 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1598 }
1599
1600 /* Update port mask */
1601 mvpp2_prs_tcam_port_set(&pe, port, add);
1602
1603 mvpp2_prs_hw_write(priv, &pe);
1604 }
1605
1606 /* Set entry for dsa packets */
1607 static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
1608 bool tagged, bool extend)
1609 {
1610 struct mvpp2_prs_entry pe;
1611 int tid, shift;
1612
1613 if (extend) {
1614 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
1615 shift = 8;
1616 } else {
1617 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
1618 shift = 4;
1619 }
1620
1621 if (priv->prs_shadow[tid].valid) {
1622 /* Entry exist - update port only */
1623 pe.index = tid;
1624 mvpp2_prs_hw_read(priv, &pe);
1625 } else {
1626 /* Entry doesn't exist - create new */
1627 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1628 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1629 pe.index = tid;
1630
1631 /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
1632 mvpp2_prs_sram_shift_set(&pe, shift,
1633 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1634
1635 /* Update shadow table */
1636 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1637
1638 if (tagged) {
1639 /* Set tagged bit in DSA tag */
1640 mvpp2_prs_tcam_data_byte_set(&pe, 0,
1641 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1642 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1643 /* Clear all ai bits for next iteration */
1644 mvpp2_prs_sram_ai_update(&pe, 0,
1645 MVPP2_PRS_SRAM_AI_MASK);
1646 /* If packet is tagged continue check vlans */
1647 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1648 } else {
1649 /* Set result info bits to 'no vlans' */
1650 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1651 MVPP2_PRS_RI_VLAN_MASK);
1652 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1653 }
1654
1655 /* Mask all ports */
1656 mvpp2_prs_tcam_port_map_set(&pe, 0);
1657 }
1658
1659 /* Update port mask */
1660 mvpp2_prs_tcam_port_set(&pe, port, add);
1661
1662 mvpp2_prs_hw_write(priv, &pe);
1663 }
1664
1665 /* Set entry for dsa ethertype */
1666 static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
1667 bool add, bool tagged, bool extend)
1668 {
1669 struct mvpp2_prs_entry pe;
1670 int tid, shift, port_mask;
1671
1672 if (extend) {
1673 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
1674 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
1675 port_mask = 0;
1676 shift = 8;
1677 } else {
1678 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
1679 MVPP2_PE_ETYPE_DSA_UNTAGGED;
1680 port_mask = MVPP2_PRS_PORT_MASK;
1681 shift = 4;
1682 }
1683
1684 if (priv->prs_shadow[tid].valid) {
1685 /* Entry exist - update port only */
1686 pe.index = tid;
1687 mvpp2_prs_hw_read(priv, &pe);
1688 } else {
1689 /* Entry doesn't exist - create new */
1690 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1691 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1692 pe.index = tid;
1693
1694 /* Set ethertype */
1695 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
1696 mvpp2_prs_match_etype(&pe, 2, 0);
1697
1698 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
1699 MVPP2_PRS_RI_DSA_MASK);
1700 /* Shift ethertype + 2 byte reserved + tag*/
1701 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
1702 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1703
1704 /* Update shadow table */
1705 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1706
1707 if (tagged) {
1708 /* Set tagged bit in DSA tag */
1709 mvpp2_prs_tcam_data_byte_set(&pe,
1710 MVPP2_ETH_TYPE_LEN + 2 + 3,
1711 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1712 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1713 /* Clear all ai bits for next iteration */
1714 mvpp2_prs_sram_ai_update(&pe, 0,
1715 MVPP2_PRS_SRAM_AI_MASK);
1716 /* If packet is tagged continue check vlans */
1717 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1718 } else {
1719 /* Set result info bits to 'no vlans' */
1720 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1721 MVPP2_PRS_RI_VLAN_MASK);
1722 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1723 }
1724 /* Mask/unmask all ports, depending on dsa type */
1725 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
1726 }
1727
1728 /* Update port mask */
1729 mvpp2_prs_tcam_port_set(&pe, port, add);
1730
1731 mvpp2_prs_hw_write(priv, &pe);
1732 }
1733
1734 /* Search for existing single/triple vlan entry */
1735 static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
1736 unsigned short tpid, int ai)
1737 {
1738 struct mvpp2_prs_entry *pe;
1739 int tid;
1740
1741 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1742 if (!pe)
1743 return NULL;
1744 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1745
1746 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1747 for (tid = MVPP2_PE_FIRST_FREE_TID;
1748 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1749 unsigned int ri_bits, ai_bits;
1750 bool match;
1751
1752 if (!priv->prs_shadow[tid].valid ||
1753 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1754 continue;
1755
1756 pe->index = tid;
1757
1758 mvpp2_prs_hw_read(priv, pe);
1759 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
1760 if (!match)
1761 continue;
1762
1763 /* Get vlan type */
1764 ri_bits = mvpp2_prs_sram_ri_get(pe);
1765 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
1766
1767 /* Get current ai value from tcam */
1768 ai_bits = mvpp2_prs_tcam_ai_get(pe);
1769 /* Clear double vlan bit */
1770 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
1771
1772 if (ai != ai_bits)
1773 continue;
1774
1775 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
1776 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
1777 return pe;
1778 }
1779 kfree(pe);
1780
1781 return NULL;
1782 }
1783
1784 /* Add/update single/triple vlan entry */
1785 static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
1786 unsigned int port_map)
1787 {
1788 struct mvpp2_prs_entry *pe;
1789 int tid_aux, tid;
1790 int ret = 0;
1791
1792 pe = mvpp2_prs_vlan_find(priv, tpid, ai);
1793
1794 if (!pe) {
1795 /* Create new tcam entry */
1796 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
1797 MVPP2_PE_FIRST_FREE_TID);
1798 if (tid < 0)
1799 return tid;
1800
1801 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1802 if (!pe)
1803 return -ENOMEM;
1804
1805 /* Get last double vlan tid */
1806 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
1807 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
1808 unsigned int ri_bits;
1809
1810 if (!priv->prs_shadow[tid_aux].valid ||
1811 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
1812 continue;
1813
1814 pe->index = tid_aux;
1815 mvpp2_prs_hw_read(priv, pe);
1816 ri_bits = mvpp2_prs_sram_ri_get(pe);
1817 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
1818 MVPP2_PRS_RI_VLAN_DOUBLE)
1819 break;
1820 }
1821
1822 if (tid <= tid_aux) {
1823 ret = -EINVAL;
1824 goto error;
1825 }
1826
1827 memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
1828 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1829 pe->index = tid;
1830
1831 mvpp2_prs_match_etype(pe, 0, tpid);
1832
1833 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
1834 /* Shift 4 bytes - skip 1 vlan tag */
1835 mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
1836 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1837 /* Clear all ai bits for next iteration */
1838 mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1839
1840 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
1841 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
1842 MVPP2_PRS_RI_VLAN_MASK);
1843 } else {
1844 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
1845 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
1846 MVPP2_PRS_RI_VLAN_MASK);
1847 }
1848 mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
1849
1850 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
1851 }
1852 /* Update ports' mask */
1853 mvpp2_prs_tcam_port_map_set(pe, port_map);
1854
1855 mvpp2_prs_hw_write(priv, pe);
1856
1857 error:
1858 kfree(pe);
1859
1860 return ret;
1861 }
1862
1863 /* Get first free double vlan ai number */
1864 static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
1865 {
1866 int i;
1867
1868 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
1869 if (!priv->prs_double_vlans[i])
1870 return i;
1871 }
1872
1873 return -EINVAL;
1874 }
1875
1876 /* Search for existing double vlan entry */
1877 static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
1878 unsigned short tpid1,
1879 unsigned short tpid2)
1880 {
1881 struct mvpp2_prs_entry *pe;
1882 int tid;
1883
1884 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1885 if (!pe)
1886 return NULL;
1887 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1888
1889 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1890 for (tid = MVPP2_PE_FIRST_FREE_TID;
1891 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1892 unsigned int ri_mask;
1893 bool match;
1894
1895 if (!priv->prs_shadow[tid].valid ||
1896 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1897 continue;
1898
1899 pe->index = tid;
1900 mvpp2_prs_hw_read(priv, pe);
1901
1902 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
1903 && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
1904
1905 if (!match)
1906 continue;
1907
1908 ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
1909 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
1910 return pe;
1911 }
1912 kfree(pe);
1913
1914 return NULL;
1915 }
1916
1917 /* Add or update double vlan entry */
1918 static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
1919 unsigned short tpid2,
1920 unsigned int port_map)
1921 {
1922 struct mvpp2_prs_entry *pe;
1923 int tid_aux, tid, ai, ret = 0;
1924
1925 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
1926
1927 if (!pe) {
1928 /* Create new tcam entry */
1929 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1930 MVPP2_PE_LAST_FREE_TID);
1931 if (tid < 0)
1932 return tid;
1933
1934 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1935 if (!pe)
1936 return -ENOMEM;
1937
1938 /* Set ai value for new double vlan entry */
1939 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
1940 if (ai < 0) {
1941 ret = ai;
1942 goto error;
1943 }
1944
1945 /* Get first single/triple vlan tid */
1946 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
1947 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
1948 unsigned int ri_bits;
1949
1950 if (!priv->prs_shadow[tid_aux].valid ||
1951 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
1952 continue;
1953
1954 pe->index = tid_aux;
1955 mvpp2_prs_hw_read(priv, pe);
1956 ri_bits = mvpp2_prs_sram_ri_get(pe);
1957 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
1958 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
1959 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
1960 break;
1961 }
1962
1963 if (tid >= tid_aux) {
1964 ret = -ERANGE;
1965 goto error;
1966 }
1967
1968 memset(pe, 0, sizeof(struct mvpp2_prs_entry));
1969 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1970 pe->index = tid;
1971
1972 priv->prs_double_vlans[ai] = true;
1973
1974 mvpp2_prs_match_etype(pe, 0, tpid1);
1975 mvpp2_prs_match_etype(pe, 4, tpid2);
1976
1977 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
1978 /* Shift 8 bytes - skip 2 vlan tags */
1979 mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
1980 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1981 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1982 MVPP2_PRS_RI_VLAN_MASK);
1983 mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
1984 MVPP2_PRS_SRAM_AI_MASK);
1985
1986 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
1987 }
1988
1989 /* Update ports' mask */
1990 mvpp2_prs_tcam_port_map_set(pe, port_map);
1991 mvpp2_prs_hw_write(priv, pe);
1992
1993 error:
1994 kfree(pe);
1995 return ret;
1996 }
1997
1998 /* IPv4 header parsing for fragmentation and L4 offset */
1999 static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
2000 unsigned int ri, unsigned int ri_mask)
2001 {
2002 struct mvpp2_prs_entry pe;
2003 int tid;
2004
2005 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2006 (proto != IPPROTO_IGMP))
2007 return -EINVAL;
2008
2009 /* Fragmented packet */
2010 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2011 MVPP2_PE_LAST_FREE_TID);
2012 if (tid < 0)
2013 return tid;
2014
2015 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2016 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2017 pe.index = tid;
2018
2019 /* Set next lu to IPv4 */
2020 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2021 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2022 /* Set L4 offset */
2023 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2024 sizeof(struct iphdr) - 4,
2025 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2026 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2027 MVPP2_PRS_IPV4_DIP_AI_BIT);
2028 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
2029 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2030
2031 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2032 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2033 /* Unmask all ports */
2034 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2035
2036 /* Update shadow table and hw entry */
2037 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2038 mvpp2_prs_hw_write(priv, &pe);
2039
2040 /* Not fragmented packet */
2041 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2042 MVPP2_PE_LAST_FREE_TID);
2043 if (tid < 0)
2044 return tid;
2045
2046 pe.index = tid;
2047 /* Clear ri before updating */
2048 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2049 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2050 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2051
2052 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
2053 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
2054
2055 /* Update shadow table and hw entry */
2056 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2057 mvpp2_prs_hw_write(priv, &pe);
2058
2059 return 0;
2060 }
2061
2062 /* IPv4 L3 multicast or broadcast */
2063 static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
2064 {
2065 struct mvpp2_prs_entry pe;
2066 int mask, tid;
2067
2068 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2069 MVPP2_PE_LAST_FREE_TID);
2070 if (tid < 0)
2071 return tid;
2072
2073 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2074 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2075 pe.index = tid;
2076
2077 switch (l3_cast) {
2078 case MVPP2_PRS_L3_MULTI_CAST:
2079 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
2080 MVPP2_PRS_IPV4_MC_MASK);
2081 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2082 MVPP2_PRS_RI_L3_ADDR_MASK);
2083 break;
2084 case MVPP2_PRS_L3_BROAD_CAST:
2085 mask = MVPP2_PRS_IPV4_BC_MASK;
2086 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
2087 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
2088 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
2089 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
2090 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
2091 MVPP2_PRS_RI_L3_ADDR_MASK);
2092 break;
2093 default:
2094 return -EINVAL;
2095 }
2096
2097 /* Finished: go to flowid generation */
2098 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2099 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2100
2101 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2102 MVPP2_PRS_IPV4_DIP_AI_BIT);
2103 /* Unmask all ports */
2104 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2105
2106 /* Update shadow table and hw entry */
2107 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2108 mvpp2_prs_hw_write(priv, &pe);
2109
2110 return 0;
2111 }
2112
2113 /* Set entries for protocols over IPv6 */
2114 static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2115 unsigned int ri, unsigned int ri_mask)
2116 {
2117 struct mvpp2_prs_entry pe;
2118 int tid;
2119
2120 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2121 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2122 return -EINVAL;
2123
2124 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2125 MVPP2_PE_LAST_FREE_TID);
2126 if (tid < 0)
2127 return tid;
2128
2129 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2130 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2131 pe.index = tid;
2132
2133 /* Finished: go to flowid generation */
2134 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2135 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2136 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2137 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2138 sizeof(struct ipv6hdr) - 6,
2139 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2140
2141 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2142 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2143 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2144 /* Unmask all ports */
2145 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2146
2147 /* Write HW */
2148 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2149 mvpp2_prs_hw_write(priv, &pe);
2150
2151 return 0;
2152 }
2153
2154 /* IPv6 L3 multicast entry */
2155 static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2156 {
2157 struct mvpp2_prs_entry pe;
2158 int tid;
2159
2160 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2161 return -EINVAL;
2162
2163 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2164 MVPP2_PE_LAST_FREE_TID);
2165 if (tid < 0)
2166 return tid;
2167
2168 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2169 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2170 pe.index = tid;
2171
2172 /* Finished: go to flowid generation */
2173 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2174 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2175 MVPP2_PRS_RI_L3_ADDR_MASK);
2176 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2177 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2178 /* Shift back to IPv6 NH */
2179 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2180
2181 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2182 MVPP2_PRS_IPV6_MC_MASK);
2183 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2184 /* Unmask all ports */
2185 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2186
2187 /* Update shadow table and hw entry */
2188 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2189 mvpp2_prs_hw_write(priv, &pe);
2190
2191 return 0;
2192 }
2193
2194 /* Parser per-port initialization */
2195 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2196 int lu_max, int offset)
2197 {
2198 u32 val;
2199
2200 /* Set lookup ID */
2201 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2202 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2203 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2204 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2205
2206 /* Set maximum number of loops for packet received from port */
2207 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2208 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2209 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2210 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2211
2212 /* Set initial offset for packet header extraction for the first
2213 * searching loop
2214 */
2215 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2216 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2217 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2218 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2219 }
2220
2221 /* Default flow entries initialization for all ports */
2222 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2223 {
2224 struct mvpp2_prs_entry pe;
2225 int port;
2226
2227 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
2228 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2229 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2230 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2231
2232 /* Mask all ports */
2233 mvpp2_prs_tcam_port_map_set(&pe, 0);
2234
2235 /* Set flow ID*/
2236 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2237 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2238
2239 /* Update shadow table and hw entry */
2240 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2241 mvpp2_prs_hw_write(priv, &pe);
2242 }
2243 }
2244
2245 /* Set default entry for Marvell Header field */
2246 static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2247 {
2248 struct mvpp2_prs_entry pe;
2249
2250 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2251
2252 pe.index = MVPP2_PE_MH_DEFAULT;
2253 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2254 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2255 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2256 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2257
2258 /* Unmask all ports */
2259 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2260
2261 /* Update shadow table and hw entry */
2262 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2263 mvpp2_prs_hw_write(priv, &pe);
2264 }
2265
2266 /* Set default entires (place holder) for promiscuous, non-promiscuous and
2267 * multicast MAC addresses
2268 */
2269 static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2270 {
2271 struct mvpp2_prs_entry pe;
2272
2273 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2274
2275 /* Non-promiscuous mode for all ports - DROP unknown packets */
2276 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2277 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2278
2279 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2280 MVPP2_PRS_RI_DROP_MASK);
2281 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2282 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2283
2284 /* Unmask all ports */
2285 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2286
2287 /* Update shadow table and hw entry */
2288 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2289 mvpp2_prs_hw_write(priv, &pe);
2290
2291 /* place holders only - no ports */
2292 mvpp2_prs_mac_drop_all_set(priv, 0, false);
2293 mvpp2_prs_mac_promisc_set(priv, 0, false);
2294 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
2295 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
2296 }
2297
2298 /* Set default entries for various types of dsa packets */
2299 static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2300 {
2301 struct mvpp2_prs_entry pe;
2302
2303 /* None tagged EDSA entry - place holder */
2304 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2305 MVPP2_PRS_EDSA);
2306
2307 /* Tagged EDSA entry - place holder */
2308 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2309
2310 /* None tagged DSA entry - place holder */
2311 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2312 MVPP2_PRS_DSA);
2313
2314 /* Tagged DSA entry - place holder */
2315 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2316
2317 /* None tagged EDSA ethertype entry - place holder*/
2318 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2319 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2320
2321 /* Tagged EDSA ethertype entry - place holder*/
2322 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2323 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2324
2325 /* None tagged DSA ethertype entry */
2326 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2327 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2328
2329 /* Tagged DSA ethertype entry */
2330 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2331 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2332
2333 /* Set default entry, in case DSA or EDSA tag not found */
2334 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2335 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2336 pe.index = MVPP2_PE_DSA_DEFAULT;
2337 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2338
2339 /* Shift 0 bytes */
2340 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2341 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2342
2343 /* Clear all sram ai bits for next iteration */
2344 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2345
2346 /* Unmask all ports */
2347 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2348
2349 mvpp2_prs_hw_write(priv, &pe);
2350 }
2351
2352 /* Match basic ethertypes */
2353 static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2354 {
2355 struct mvpp2_prs_entry pe;
2356 int tid;
2357
2358 /* Ethertype: PPPoE */
2359 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2360 MVPP2_PE_LAST_FREE_TID);
2361 if (tid < 0)
2362 return tid;
2363
2364 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2365 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2366 pe.index = tid;
2367
2368 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2369
2370 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2371 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2372 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2373 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2374 MVPP2_PRS_RI_PPPOE_MASK);
2375
2376 /* Update shadow table and hw entry */
2377 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2378 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2379 priv->prs_shadow[pe.index].finish = false;
2380 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2381 MVPP2_PRS_RI_PPPOE_MASK);
2382 mvpp2_prs_hw_write(priv, &pe);
2383
2384 /* Ethertype: ARP */
2385 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2386 MVPP2_PE_LAST_FREE_TID);
2387 if (tid < 0)
2388 return tid;
2389
2390 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2391 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2392 pe.index = tid;
2393
2394 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2395
2396 /* Generate flow in the next iteration*/
2397 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2398 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2399 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2400 MVPP2_PRS_RI_L3_PROTO_MASK);
2401 /* Set L3 offset */
2402 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2403 MVPP2_ETH_TYPE_LEN,
2404 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2405
2406 /* Update shadow table and hw entry */
2407 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2408 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2409 priv->prs_shadow[pe.index].finish = true;
2410 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2411 MVPP2_PRS_RI_L3_PROTO_MASK);
2412 mvpp2_prs_hw_write(priv, &pe);
2413
2414 /* Ethertype: LBTD */
2415 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2416 MVPP2_PE_LAST_FREE_TID);
2417 if (tid < 0)
2418 return tid;
2419
2420 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2421 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2422 pe.index = tid;
2423
2424 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2425
2426 /* Generate flow in the next iteration*/
2427 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2428 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2429 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2430 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2431 MVPP2_PRS_RI_CPU_CODE_MASK |
2432 MVPP2_PRS_RI_UDF3_MASK);
2433 /* Set L3 offset */
2434 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2435 MVPP2_ETH_TYPE_LEN,
2436 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2437
2438 /* Update shadow table and hw entry */
2439 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2440 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2441 priv->prs_shadow[pe.index].finish = true;
2442 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2443 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2444 MVPP2_PRS_RI_CPU_CODE_MASK |
2445 MVPP2_PRS_RI_UDF3_MASK);
2446 mvpp2_prs_hw_write(priv, &pe);
2447
2448 /* Ethertype: IPv4 without options */
2449 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2450 MVPP2_PE_LAST_FREE_TID);
2451 if (tid < 0)
2452 return tid;
2453
2454 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2455 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2456 pe.index = tid;
2457
2458 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2459 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2460 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2461 MVPP2_PRS_IPV4_HEAD_MASK |
2462 MVPP2_PRS_IPV4_IHL_MASK);
2463
2464 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2465 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2466 MVPP2_PRS_RI_L3_PROTO_MASK);
2467 /* Skip eth_type + 4 bytes of IP header */
2468 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2469 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2470 /* Set L3 offset */
2471 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2472 MVPP2_ETH_TYPE_LEN,
2473 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2474
2475 /* Update shadow table and hw entry */
2476 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2477 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2478 priv->prs_shadow[pe.index].finish = false;
2479 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2480 MVPP2_PRS_RI_L3_PROTO_MASK);
2481 mvpp2_prs_hw_write(priv, &pe);
2482
2483 /* Ethertype: IPv4 with options */
2484 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2485 MVPP2_PE_LAST_FREE_TID);
2486 if (tid < 0)
2487 return tid;
2488
2489 pe.index = tid;
2490
2491 /* Clear tcam data before updating */
2492 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2493 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2494
2495 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2496 MVPP2_PRS_IPV4_HEAD,
2497 MVPP2_PRS_IPV4_HEAD_MASK);
2498
2499 /* Clear ri before updating */
2500 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2501 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2502 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2503 MVPP2_PRS_RI_L3_PROTO_MASK);
2504
2505 /* Update shadow table and hw entry */
2506 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2507 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2508 priv->prs_shadow[pe.index].finish = false;
2509 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
2510 MVPP2_PRS_RI_L3_PROTO_MASK);
2511 mvpp2_prs_hw_write(priv, &pe);
2512
2513 /* Ethertype: IPv6 without options */
2514 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2515 MVPP2_PE_LAST_FREE_TID);
2516 if (tid < 0)
2517 return tid;
2518
2519 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2520 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2521 pe.index = tid;
2522
2523 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
2524
2525 /* Skip DIP of IPV6 header */
2526 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2527 MVPP2_MAX_L3_ADDR_SIZE,
2528 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2529 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2530 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2531 MVPP2_PRS_RI_L3_PROTO_MASK);
2532 /* Set L3 offset */
2533 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2534 MVPP2_ETH_TYPE_LEN,
2535 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2536
2537 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2538 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2539 priv->prs_shadow[pe.index].finish = false;
2540 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2541 MVPP2_PRS_RI_L3_PROTO_MASK);
2542 mvpp2_prs_hw_write(priv, &pe);
2543
2544 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2545 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2546 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2547 pe.index = MVPP2_PE_ETH_TYPE_UN;
2548
2549 /* Unmask all ports */
2550 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2551
2552 /* Generate flow in the next iteration*/
2553 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2554 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2555 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2556 MVPP2_PRS_RI_L3_PROTO_MASK);
2557 /* Set L3 offset even it's unknown L3 */
2558 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2559 MVPP2_ETH_TYPE_LEN,
2560 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2561
2562 /* Update shadow table and hw entry */
2563 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2564 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2565 priv->prs_shadow[pe.index].finish = true;
2566 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2567 MVPP2_PRS_RI_L3_PROTO_MASK);
2568 mvpp2_prs_hw_write(priv, &pe);
2569
2570 return 0;
2571 }
2572
2573 /* Configure vlan entries and detect up to 2 successive VLAN tags.
2574 * Possible options:
2575 * 0x8100, 0x88A8
2576 * 0x8100, 0x8100
2577 * 0x8100
2578 * 0x88A8
2579 */
2580 static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
2581 {
2582 struct mvpp2_prs_entry pe;
2583 int err;
2584
2585 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
2586 MVPP2_PRS_DBL_VLANS_MAX,
2587 GFP_KERNEL);
2588 if (!priv->prs_double_vlans)
2589 return -ENOMEM;
2590
2591 /* Double VLAN: 0x8100, 0x88A8 */
2592 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
2593 MVPP2_PRS_PORT_MASK);
2594 if (err)
2595 return err;
2596
2597 /* Double VLAN: 0x8100, 0x8100 */
2598 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
2599 MVPP2_PRS_PORT_MASK);
2600 if (err)
2601 return err;
2602
2603 /* Single VLAN: 0x88a8 */
2604 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
2605 MVPP2_PRS_PORT_MASK);
2606 if (err)
2607 return err;
2608
2609 /* Single VLAN: 0x8100 */
2610 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
2611 MVPP2_PRS_PORT_MASK);
2612 if (err)
2613 return err;
2614
2615 /* Set default double vlan entry */
2616 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2617 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2618 pe.index = MVPP2_PE_VLAN_DBL;
2619
2620 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2621 /* Clear ai for next iterations */
2622 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2623 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2624 MVPP2_PRS_RI_VLAN_MASK);
2625
2626 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
2627 MVPP2_PRS_DBL_VLAN_AI_BIT);
2628 /* Unmask all ports */
2629 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2630
2631 /* Update shadow table and hw entry */
2632 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2633 mvpp2_prs_hw_write(priv, &pe);
2634
2635 /* Set default vlan none entry */
2636 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2637 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2638 pe.index = MVPP2_PE_VLAN_NONE;
2639
2640 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2641 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2642 MVPP2_PRS_RI_VLAN_MASK);
2643
2644 /* Unmask all ports */
2645 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2646
2647 /* Update shadow table and hw entry */
2648 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2649 mvpp2_prs_hw_write(priv, &pe);
2650
2651 return 0;
2652 }
2653
2654 /* Set entries for PPPoE ethertype */
2655 static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
2656 {
2657 struct mvpp2_prs_entry pe;
2658 int tid;
2659
2660 /* IPv4 over PPPoE with options */
2661 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2662 MVPP2_PE_LAST_FREE_TID);
2663 if (tid < 0)
2664 return tid;
2665
2666 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2667 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2668 pe.index = tid;
2669
2670 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
2671
2672 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2673 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2674 MVPP2_PRS_RI_L3_PROTO_MASK);
2675 /* Skip eth_type + 4 bytes of IP header */
2676 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2677 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2678 /* Set L3 offset */
2679 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2680 MVPP2_ETH_TYPE_LEN,
2681 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2682
2683 /* Update shadow table and hw entry */
2684 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2685 mvpp2_prs_hw_write(priv, &pe);
2686
2687 /* IPv4 over PPPoE without options */
2688 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2689 MVPP2_PE_LAST_FREE_TID);
2690 if (tid < 0)
2691 return tid;
2692
2693 pe.index = tid;
2694
2695 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2696 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2697 MVPP2_PRS_IPV4_HEAD_MASK |
2698 MVPP2_PRS_IPV4_IHL_MASK);
2699
2700 /* Clear ri before updating */
2701 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2702 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2703 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2704 MVPP2_PRS_RI_L3_PROTO_MASK);
2705
2706 /* Update shadow table and hw entry */
2707 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2708 mvpp2_prs_hw_write(priv, &pe);
2709
2710 /* IPv6 over PPPoE */
2711 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2712 MVPP2_PE_LAST_FREE_TID);
2713 if (tid < 0)
2714 return tid;
2715
2716 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2717 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2718 pe.index = tid;
2719
2720 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
2721
2722 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2723 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2724 MVPP2_PRS_RI_L3_PROTO_MASK);
2725 /* Skip eth_type + 4 bytes of IPv6 header */
2726 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2727 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2728 /* Set L3 offset */
2729 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2730 MVPP2_ETH_TYPE_LEN,
2731 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2732
2733 /* Update shadow table and hw entry */
2734 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2735 mvpp2_prs_hw_write(priv, &pe);
2736
2737 /* Non-IP over PPPoE */
2738 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2739 MVPP2_PE_LAST_FREE_TID);
2740 if (tid < 0)
2741 return tid;
2742
2743 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2744 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2745 pe.index = tid;
2746
2747 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2748 MVPP2_PRS_RI_L3_PROTO_MASK);
2749
2750 /* Finished: go to flowid generation */
2751 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2752 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2753 /* Set L3 offset even if it's unknown L3 */
2754 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2755 MVPP2_ETH_TYPE_LEN,
2756 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2757
2758 /* Update shadow table and hw entry */
2759 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2760 mvpp2_prs_hw_write(priv, &pe);
2761
2762 return 0;
2763 }
2764
2765 /* Initialize entries for IPv4 */
2766 static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
2767 {
2768 struct mvpp2_prs_entry pe;
2769 int err;
2770
2771 /* Set entries for TCP, UDP and IGMP over IPv4 */
2772 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
2773 MVPP2_PRS_RI_L4_PROTO_MASK);
2774 if (err)
2775 return err;
2776
2777 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
2778 MVPP2_PRS_RI_L4_PROTO_MASK);
2779 if (err)
2780 return err;
2781
2782 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
2783 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2784 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2785 MVPP2_PRS_RI_CPU_CODE_MASK |
2786 MVPP2_PRS_RI_UDF3_MASK);
2787 if (err)
2788 return err;
2789
2790 /* IPv4 Broadcast */
2791 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
2792 if (err)
2793 return err;
2794
2795 /* IPv4 Multicast */
2796 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
2797 if (err)
2798 return err;
2799
2800 /* Default IPv4 entry for unknown protocols */
2801 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2802 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2803 pe.index = MVPP2_PE_IP4_PROTO_UN;
2804
2805 /* Set next lu to IPv4 */
2806 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2807 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2808 /* Set L4 offset */
2809 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2810 sizeof(struct iphdr) - 4,
2811 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2812 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2813 MVPP2_PRS_IPV4_DIP_AI_BIT);
2814 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2815 MVPP2_PRS_RI_L4_PROTO_MASK);
2816
2817 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2818 /* Unmask all ports */
2819 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2820
2821 /* Update shadow table and hw entry */
2822 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2823 mvpp2_prs_hw_write(priv, &pe);
2824
2825 /* Default IPv4 entry for unicast address */
2826 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2827 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2828 pe.index = MVPP2_PE_IP4_ADDR_UN;
2829
2830 /* Finished: go to flowid generation */
2831 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2832 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2833 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
2834 MVPP2_PRS_RI_L3_ADDR_MASK);
2835
2836 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2837 MVPP2_PRS_IPV4_DIP_AI_BIT);
2838 /* Unmask all ports */
2839 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2840
2841 /* Update shadow table and hw entry */
2842 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2843 mvpp2_prs_hw_write(priv, &pe);
2844
2845 return 0;
2846 }
2847
2848 /* Initialize entries for IPv6 */
2849 static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
2850 {
2851 struct mvpp2_prs_entry pe;
2852 int tid, err;
2853
2854 /* Set entries for TCP, UDP and ICMP over IPv6 */
2855 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
2856 MVPP2_PRS_RI_L4_TCP,
2857 MVPP2_PRS_RI_L4_PROTO_MASK);
2858 if (err)
2859 return err;
2860
2861 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
2862 MVPP2_PRS_RI_L4_UDP,
2863 MVPP2_PRS_RI_L4_PROTO_MASK);
2864 if (err)
2865 return err;
2866
2867 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
2868 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2869 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2870 MVPP2_PRS_RI_CPU_CODE_MASK |
2871 MVPP2_PRS_RI_UDF3_MASK);
2872 if (err)
2873 return err;
2874
2875 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
2876 /* Result Info: UDF7=1, DS lite */
2877 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
2878 MVPP2_PRS_RI_UDF7_IP6_LITE,
2879 MVPP2_PRS_RI_UDF7_MASK);
2880 if (err)
2881 return err;
2882
2883 /* IPv6 multicast */
2884 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
2885 if (err)
2886 return err;
2887
2888 /* Entry for checking hop limit */
2889 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2890 MVPP2_PE_LAST_FREE_TID);
2891 if (tid < 0)
2892 return tid;
2893
2894 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2895 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2896 pe.index = tid;
2897
2898 /* Finished: go to flowid generation */
2899 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2900 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2901 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
2902 MVPP2_PRS_RI_DROP_MASK,
2903 MVPP2_PRS_RI_L3_PROTO_MASK |
2904 MVPP2_PRS_RI_DROP_MASK);
2905
2906 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
2907 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2908 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2909
2910 /* Update shadow table and hw entry */
2911 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2912 mvpp2_prs_hw_write(priv, &pe);
2913
2914 /* Default IPv6 entry for unknown protocols */
2915 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2916 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2917 pe.index = MVPP2_PE_IP6_PROTO_UN;
2918
2919 /* Finished: go to flowid generation */
2920 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2921 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2922 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2923 MVPP2_PRS_RI_L4_PROTO_MASK);
2924 /* Set L4 offset relatively to our current place */
2925 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2926 sizeof(struct ipv6hdr) - 4,
2927 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2928
2929 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2930 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2931 /* Unmask all ports */
2932 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2933
2934 /* Update shadow table and hw entry */
2935 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2936 mvpp2_prs_hw_write(priv, &pe);
2937
2938 /* Default IPv6 entry for unknown ext protocols */
2939 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2940 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2941 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
2942
2943 /* Finished: go to flowid generation */
2944 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2945 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2946 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2947 MVPP2_PRS_RI_L4_PROTO_MASK);
2948
2949 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
2950 MVPP2_PRS_IPV6_EXT_AI_BIT);
2951 /* Unmask all ports */
2952 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2953
2954 /* Update shadow table and hw entry */
2955 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2956 mvpp2_prs_hw_write(priv, &pe);
2957
2958 /* Default IPv6 entry for unicast address */
2959 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2960 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2961 pe.index = MVPP2_PE_IP6_ADDR_UN;
2962
2963 /* Finished: go to IPv6 again */
2964 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2965 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
2966 MVPP2_PRS_RI_L3_ADDR_MASK);
2967 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2968 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2969 /* Shift back to IPV6 NH */
2970 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2971
2972 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2973 /* Unmask all ports */
2974 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2975
2976 /* Update shadow table and hw entry */
2977 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2978 mvpp2_prs_hw_write(priv, &pe);
2979
2980 return 0;
2981 }
2982
2983 /* Parser default initialization */
2984 static int mvpp2_prs_default_init(struct platform_device *pdev,
2985 struct mvpp2 *priv)
2986 {
2987 int err, index, i;
2988
2989 /* Enable tcam table */
2990 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2991
2992 /* Clear all tcam and sram entries */
2993 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2994 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2995 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2996 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2997
2998 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2999 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3000 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
3001 }
3002
3003 /* Invalidate all tcam entries */
3004 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
3005 mvpp2_prs_hw_inv(priv, index);
3006
3007 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
3008 sizeof(struct mvpp2_prs_shadow),
3009 GFP_KERNEL);
3010 if (!priv->prs_shadow)
3011 return -ENOMEM;
3012
3013 /* Always start from lookup = 0 */
3014 for (index = 0; index < MVPP2_MAX_PORTS; index++)
3015 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
3016 MVPP2_PRS_PORT_LU_MAX, 0);
3017
3018 mvpp2_prs_def_flow_init(priv);
3019
3020 mvpp2_prs_mh_init(priv);
3021
3022 mvpp2_prs_mac_init(priv);
3023
3024 mvpp2_prs_dsa_init(priv);
3025
3026 err = mvpp2_prs_etype_init(priv);
3027 if (err)
3028 return err;
3029
3030 err = mvpp2_prs_vlan_init(pdev, priv);
3031 if (err)
3032 return err;
3033
3034 err = mvpp2_prs_pppoe_init(priv);
3035 if (err)
3036 return err;
3037
3038 err = mvpp2_prs_ip6_init(priv);
3039 if (err)
3040 return err;
3041
3042 err = mvpp2_prs_ip4_init(priv);
3043 if (err)
3044 return err;
3045
3046 return 0;
3047 }
3048
3049 /* Compare MAC DA with tcam entry data */
3050 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
3051 const u8 *da, unsigned char *mask)
3052 {
3053 unsigned char tcam_byte, tcam_mask;
3054 int index;
3055
3056 for (index = 0; index < ETH_ALEN; index++) {
3057 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
3058 if (tcam_mask != mask[index])
3059 return false;
3060
3061 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
3062 return false;
3063 }
3064
3065 return true;
3066 }
3067
3068 /* Find tcam entry with matched pair <MAC DA, port> */
3069 static struct mvpp2_prs_entry *
3070 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
3071 unsigned char *mask, int udf_type)
3072 {
3073 struct mvpp2_prs_entry *pe;
3074 int tid;
3075
3076 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3077 if (!pe)
3078 return NULL;
3079 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3080
3081 /* Go through the all entires with MVPP2_PRS_LU_MAC */
3082 for (tid = MVPP2_PE_FIRST_FREE_TID;
3083 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3084 unsigned int entry_pmap;
3085
3086 if (!priv->prs_shadow[tid].valid ||
3087 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3088 (priv->prs_shadow[tid].udf != udf_type))
3089 continue;
3090
3091 pe->index = tid;
3092 mvpp2_prs_hw_read(priv, pe);
3093 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
3094
3095 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
3096 entry_pmap == pmap)
3097 return pe;
3098 }
3099 kfree(pe);
3100
3101 return NULL;
3102 }
3103
3104 /* Update parser's mac da entry */
3105 static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3106 const u8 *da, bool add)
3107 {
3108 struct mvpp2_prs_entry *pe;
3109 unsigned int pmap, len, ri;
3110 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3111 int tid;
3112
3113 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3114 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
3115 MVPP2_PRS_UDF_MAC_DEF);
3116
3117 /* No such entry */
3118 if (!pe) {
3119 if (!add)
3120 return 0;
3121
3122 /* Create new TCAM entry */
3123 /* Find first range mac entry*/
3124 for (tid = MVPP2_PE_FIRST_FREE_TID;
3125 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
3126 if (priv->prs_shadow[tid].valid &&
3127 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
3128 (priv->prs_shadow[tid].udf ==
3129 MVPP2_PRS_UDF_MAC_RANGE))
3130 break;
3131
3132 /* Go through the all entries from first to last */
3133 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3134 tid - 1);
3135 if (tid < 0)
3136 return tid;
3137
3138 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3139 if (!pe)
3140 return -ENOMEM;
3141 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3142 pe->index = tid;
3143
3144 /* Mask all ports */
3145 mvpp2_prs_tcam_port_map_set(pe, 0);
3146 }
3147
3148 /* Update port mask */
3149 mvpp2_prs_tcam_port_set(pe, port, add);
3150
3151 /* Invalidate the entry if no ports are left enabled */
3152 pmap = mvpp2_prs_tcam_port_map_get(pe);
3153 if (pmap == 0) {
3154 if (add) {
3155 kfree(pe);
3156 return -EINVAL;
3157 }
3158 mvpp2_prs_hw_inv(priv, pe->index);
3159 priv->prs_shadow[pe->index].valid = false;
3160 kfree(pe);
3161 return 0;
3162 }
3163
3164 /* Continue - set next lookup */
3165 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
3166
3167 /* Set match on DA */
3168 len = ETH_ALEN;
3169 while (len--)
3170 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
3171
3172 /* Set result info bits */
3173 if (is_broadcast_ether_addr(da))
3174 ri = MVPP2_PRS_RI_L2_BCAST;
3175 else if (is_multicast_ether_addr(da))
3176 ri = MVPP2_PRS_RI_L2_MCAST;
3177 else
3178 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
3179
3180 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3181 MVPP2_PRS_RI_MAC_ME_MASK);
3182 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3183 MVPP2_PRS_RI_MAC_ME_MASK);
3184
3185 /* Shift to ethertype */
3186 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
3187 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3188
3189 /* Update shadow table and hw entry */
3190 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
3191 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
3192 mvpp2_prs_hw_write(priv, pe);
3193
3194 kfree(pe);
3195
3196 return 0;
3197 }
3198
3199 static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3200 {
3201 struct mvpp2_port *port = netdev_priv(dev);
3202 int err;
3203
3204 /* Remove old parser entry */
3205 err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
3206 false);
3207 if (err)
3208 return err;
3209
3210 /* Add new parser entry */
3211 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
3212 if (err)
3213 return err;
3214
3215 /* Set addr in the device */
3216 ether_addr_copy(dev->dev_addr, da);
3217
3218 return 0;
3219 }
3220
3221 /* Delete all port's multicast simple (not range) entries */
3222 static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
3223 {
3224 struct mvpp2_prs_entry pe;
3225 int index, tid;
3226
3227 for (tid = MVPP2_PE_FIRST_FREE_TID;
3228 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3229 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3230
3231 if (!priv->prs_shadow[tid].valid ||
3232 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3233 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3234 continue;
3235
3236 /* Only simple mac entries */
3237 pe.index = tid;
3238 mvpp2_prs_hw_read(priv, &pe);
3239
3240 /* Read mac addr from entry */
3241 for (index = 0; index < ETH_ALEN; index++)
3242 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3243 &da_mask[index]);
3244
3245 if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
3246 /* Delete this entry */
3247 mvpp2_prs_mac_da_accept(priv, port, da, false);
3248 }
3249 }
3250
3251 static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3252 {
3253 switch (type) {
3254 case MVPP2_TAG_TYPE_EDSA:
3255 /* Add port to EDSA entries */
3256 mvpp2_prs_dsa_tag_set(priv, port, true,
3257 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3258 mvpp2_prs_dsa_tag_set(priv, port, true,
3259 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3260 /* Remove port from DSA entries */
3261 mvpp2_prs_dsa_tag_set(priv, port, false,
3262 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3263 mvpp2_prs_dsa_tag_set(priv, port, false,
3264 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3265 break;
3266
3267 case MVPP2_TAG_TYPE_DSA:
3268 /* Add port to DSA entries */
3269 mvpp2_prs_dsa_tag_set(priv, port, true,
3270 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3271 mvpp2_prs_dsa_tag_set(priv, port, true,
3272 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3273 /* Remove port from EDSA entries */
3274 mvpp2_prs_dsa_tag_set(priv, port, false,
3275 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3276 mvpp2_prs_dsa_tag_set(priv, port, false,
3277 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3278 break;
3279
3280 case MVPP2_TAG_TYPE_MH:
3281 case MVPP2_TAG_TYPE_NONE:
3282 /* Remove port form EDSA and DSA entries */
3283 mvpp2_prs_dsa_tag_set(priv, port, false,
3284 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3285 mvpp2_prs_dsa_tag_set(priv, port, false,
3286 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3287 mvpp2_prs_dsa_tag_set(priv, port, false,
3288 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3289 mvpp2_prs_dsa_tag_set(priv, port, false,
3290 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3291 break;
3292
3293 default:
3294 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
3295 return -EINVAL;
3296 }
3297
3298 return 0;
3299 }
3300
3301 /* Set prs flow for the port */
3302 static int mvpp2_prs_def_flow(struct mvpp2_port *port)
3303 {
3304 struct mvpp2_prs_entry *pe;
3305 int tid;
3306
3307 pe = mvpp2_prs_flow_find(port->priv, port->id);
3308
3309 /* Such entry not exist */
3310 if (!pe) {
3311 /* Go through the all entires from last to first */
3312 tid = mvpp2_prs_tcam_first_free(port->priv,
3313 MVPP2_PE_LAST_FREE_TID,
3314 MVPP2_PE_FIRST_FREE_TID);
3315 if (tid < 0)
3316 return tid;
3317
3318 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3319 if (!pe)
3320 return -ENOMEM;
3321
3322 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
3323 pe->index = tid;
3324
3325 /* Set flow ID*/
3326 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
3327 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
3328
3329 /* Update shadow table */
3330 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
3331 }
3332
3333 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
3334 mvpp2_prs_hw_write(port->priv, pe);
3335 kfree(pe);
3336
3337 return 0;
3338 }
3339
3340 /* Classifier configuration routines */
3341
3342 /* Update classification flow table registers */
3343 static void mvpp2_cls_flow_write(struct mvpp2 *priv,
3344 struct mvpp2_cls_flow_entry *fe)
3345 {
3346 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
3347 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
3348 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
3349 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
3350 }
3351
3352 /* Update classification lookup table register */
3353 static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
3354 struct mvpp2_cls_lookup_entry *le)
3355 {
3356 u32 val;
3357
3358 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
3359 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
3360 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
3361 }
3362
3363 /* Classifier default initialization */
3364 static void mvpp2_cls_init(struct mvpp2 *priv)
3365 {
3366 struct mvpp2_cls_lookup_entry le;
3367 struct mvpp2_cls_flow_entry fe;
3368 int index;
3369
3370 /* Enable classifier */
3371 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
3372
3373 /* Clear classifier flow table */
3374 memset(&fe.data, 0, sizeof(fe.data));
3375 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
3376 fe.index = index;
3377 mvpp2_cls_flow_write(priv, &fe);
3378 }
3379
3380 /* Clear classifier lookup table */
3381 le.data = 0;
3382 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
3383 le.lkpid = index;
3384 le.way = 0;
3385 mvpp2_cls_lookup_write(priv, &le);
3386
3387 le.way = 1;
3388 mvpp2_cls_lookup_write(priv, &le);
3389 }
3390 }
3391
3392 static void mvpp2_cls_port_config(struct mvpp2_port *port)
3393 {
3394 struct mvpp2_cls_lookup_entry le;
3395 u32 val;
3396
3397 /* Set way for the port */
3398 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
3399 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
3400 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
3401
3402 /* Pick the entry to be accessed in lookup ID decoding table
3403 * according to the way and lkpid.
3404 */
3405 le.lkpid = port->id;
3406 le.way = 0;
3407 le.data = 0;
3408
3409 /* Set initial CPU queue for receiving packets */
3410 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
3411 le.data |= port->first_rxq;
3412
3413 /* Disable classification engines */
3414 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
3415
3416 /* Update lookup ID table entry */
3417 mvpp2_cls_lookup_write(port->priv, &le);
3418 }
3419
3420 /* Set CPU queue number for oversize packets */
3421 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
3422 {
3423 u32 val;
3424
3425 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
3426 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
3427
3428 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
3429 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
3430
3431 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
3432 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
3433 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
3434 }
3435
3436 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
3437 {
3438 if (likely(pool->frag_size <= PAGE_SIZE))
3439 return netdev_alloc_frag(pool->frag_size);
3440 else
3441 return kmalloc(pool->frag_size, GFP_ATOMIC);
3442 }
3443
3444 static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
3445 {
3446 if (likely(pool->frag_size <= PAGE_SIZE))
3447 skb_free_frag(data);
3448 else
3449 kfree(data);
3450 }
3451
3452 /* Buffer Manager configuration routines */
3453
3454 /* Create pool */
3455 static int mvpp2_bm_pool_create(struct platform_device *pdev,
3456 struct mvpp2 *priv,
3457 struct mvpp2_bm_pool *bm_pool, int size)
3458 {
3459 int size_bytes;
3460 u32 val;
3461
3462 size_bytes = sizeof(u32) * size;
3463 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
3464 &bm_pool->dma_addr,
3465 GFP_KERNEL);
3466 if (!bm_pool->virt_addr)
3467 return -ENOMEM;
3468
3469 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
3470 MVPP2_BM_POOL_PTR_ALIGN)) {
3471 dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
3472 bm_pool->dma_addr);
3473 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
3474 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
3475 return -ENOMEM;
3476 }
3477
3478 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
3479 bm_pool->dma_addr);
3480 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
3481
3482 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3483 val |= MVPP2_BM_START_MASK;
3484 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3485
3486 bm_pool->type = MVPP2_BM_FREE;
3487 bm_pool->size = size;
3488 bm_pool->pkt_size = 0;
3489 bm_pool->buf_num = 0;
3490
3491 return 0;
3492 }
3493
3494 /* Set pool buffer size */
3495 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
3496 struct mvpp2_bm_pool *bm_pool,
3497 int buf_size)
3498 {
3499 u32 val;
3500
3501 bm_pool->buf_size = buf_size;
3502
3503 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
3504 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
3505 }
3506
3507 /* Free all buffers from the pool */
3508 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
3509 struct mvpp2_bm_pool *bm_pool)
3510 {
3511 int i;
3512
3513 for (i = 0; i < bm_pool->buf_num; i++) {
3514 dma_addr_t buf_dma_addr;
3515 phys_addr_t buf_phys_addr;
3516 void *data;
3517
3518 buf_dma_addr = mvpp2_read(priv,
3519 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
3520 buf_phys_addr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
3521
3522 dma_unmap_single(dev, buf_dma_addr,
3523 bm_pool->buf_size, DMA_FROM_DEVICE);
3524
3525 data = (void *)phys_to_virt(buf_phys_addr);
3526 if (!data)
3527 break;
3528
3529 mvpp2_frag_free(bm_pool, data);
3530 }
3531
3532 /* Update BM driver with number of buffers removed from pool */
3533 bm_pool->buf_num -= i;
3534 }
3535
3536 /* Cleanup pool */
3537 static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
3538 struct mvpp2 *priv,
3539 struct mvpp2_bm_pool *bm_pool)
3540 {
3541 u32 val;
3542
3543 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
3544 if (bm_pool->buf_num) {
3545 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
3546 return 0;
3547 }
3548
3549 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3550 val |= MVPP2_BM_STOP_MASK;
3551 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3552
3553 dma_free_coherent(&pdev->dev, sizeof(u32) * bm_pool->size,
3554 bm_pool->virt_addr,
3555 bm_pool->dma_addr);
3556 return 0;
3557 }
3558
3559 static int mvpp2_bm_pools_init(struct platform_device *pdev,
3560 struct mvpp2 *priv)
3561 {
3562 int i, err, size;
3563 struct mvpp2_bm_pool *bm_pool;
3564
3565 /* Create all pools with maximum size */
3566 size = MVPP2_BM_POOL_SIZE_MAX;
3567 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3568 bm_pool = &priv->bm_pools[i];
3569 bm_pool->id = i;
3570 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
3571 if (err)
3572 goto err_unroll_pools;
3573 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
3574 }
3575 return 0;
3576
3577 err_unroll_pools:
3578 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
3579 for (i = i - 1; i >= 0; i--)
3580 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
3581 return err;
3582 }
3583
3584 static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
3585 {
3586 int i, err;
3587
3588 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3589 /* Mask BM all interrupts */
3590 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
3591 /* Clear BM cause register */
3592 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
3593 }
3594
3595 /* Allocate and initialize BM pools */
3596 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
3597 sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
3598 if (!priv->bm_pools)
3599 return -ENOMEM;
3600
3601 err = mvpp2_bm_pools_init(pdev, priv);
3602 if (err < 0)
3603 return err;
3604 return 0;
3605 }
3606
3607 /* Attach long pool to rxq */
3608 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
3609 int lrxq, int long_pool)
3610 {
3611 u32 val;
3612 int prxq;
3613
3614 /* Get queue physical ID */
3615 prxq = port->rxqs[lrxq]->id;
3616
3617 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3618 val &= ~MVPP2_RXQ_POOL_LONG_MASK;
3619 val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) &
3620 MVPP2_RXQ_POOL_LONG_MASK);
3621
3622 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3623 }
3624
3625 /* Attach short pool to rxq */
3626 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
3627 int lrxq, int short_pool)
3628 {
3629 u32 val;
3630 int prxq;
3631
3632 /* Get queue physical ID */
3633 prxq = port->rxqs[lrxq]->id;
3634
3635 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3636 val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
3637 val |= ((short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) &
3638 MVPP2_RXQ_POOL_SHORT_MASK);
3639
3640 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3641 }
3642
3643 static void *mvpp2_buf_alloc(struct mvpp2_port *port,
3644 struct mvpp2_bm_pool *bm_pool,
3645 dma_addr_t *buf_dma_addr,
3646 phys_addr_t *buf_phys_addr,
3647 gfp_t gfp_mask)
3648 {
3649 dma_addr_t dma_addr;
3650 void *data;
3651
3652 data = mvpp2_frag_alloc(bm_pool);
3653 if (!data)
3654 return NULL;
3655
3656 dma_addr = dma_map_single(port->dev->dev.parent, data,
3657 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
3658 DMA_FROM_DEVICE);
3659 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
3660 mvpp2_frag_free(bm_pool, data);
3661 return NULL;
3662 }
3663 *buf_dma_addr = dma_addr;
3664 *buf_phys_addr = virt_to_phys(data);
3665
3666 return data;
3667 }
3668
3669 /* Set pool number in a BM cookie */
3670 static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
3671 {
3672 u32 bm;
3673
3674 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
3675 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
3676
3677 return bm;
3678 }
3679
3680 /* Get pool number from a BM cookie */
3681 static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
3682 {
3683 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
3684 }
3685
3686 /* Release buffer to BM */
3687 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
3688 dma_addr_t buf_dma_addr,
3689 phys_addr_t buf_phys_addr)
3690 {
3691 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
3692 * returned in the "cookie" field of the RX
3693 * descriptor. Instead of storing the virtual address, we
3694 * store the physical address
3695 */
3696 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
3697 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
3698 }
3699
3700 /* Refill BM pool */
3701 static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
3702 dma_addr_t dma_addr,
3703 phys_addr_t phys_addr)
3704 {
3705 int pool = mvpp2_bm_cookie_pool_get(bm);
3706
3707 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3708 }
3709
3710 /* Allocate buffers for the pool */
3711 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
3712 struct mvpp2_bm_pool *bm_pool, int buf_num)
3713 {
3714 int i, buf_size, total_size;
3715 dma_addr_t dma_addr;
3716 phys_addr_t phys_addr;
3717 void *buf;
3718
3719 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
3720 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
3721
3722 if (buf_num < 0 ||
3723 (buf_num + bm_pool->buf_num > bm_pool->size)) {
3724 netdev_err(port->dev,
3725 "cannot allocate %d buffers for pool %d\n",
3726 buf_num, bm_pool->id);
3727 return 0;
3728 }
3729
3730 for (i = 0; i < buf_num; i++) {
3731 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
3732 &phys_addr, GFP_KERNEL);
3733 if (!buf)
3734 break;
3735
3736 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
3737 phys_addr);
3738 }
3739
3740 /* Update BM driver with number of buffers added to pool */
3741 bm_pool->buf_num += i;
3742
3743 netdev_dbg(port->dev,
3744 "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
3745 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3746 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
3747
3748 netdev_dbg(port->dev,
3749 "%s pool %d: %d of %d buffers added\n",
3750 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3751 bm_pool->id, i, buf_num);
3752 return i;
3753 }
3754
3755 /* Notify the driver that BM pool is being used as specific type and return the
3756 * pool pointer on success
3757 */
3758 static struct mvpp2_bm_pool *
3759 mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3760 int pkt_size)
3761 {
3762 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
3763 int num;
3764
3765 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
3766 netdev_err(port->dev, "mixing pool types is forbidden\n");
3767 return NULL;
3768 }
3769
3770 if (new_pool->type == MVPP2_BM_FREE)
3771 new_pool->type = type;
3772
3773 /* Allocate buffers in case BM pool is used as long pool, but packet
3774 * size doesn't match MTU or BM pool hasn't being used yet
3775 */
3776 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
3777 (new_pool->pkt_size == 0)) {
3778 int pkts_num;
3779
3780 /* Set default buffer number or free all the buffers in case
3781 * the pool is not empty
3782 */
3783 pkts_num = new_pool->buf_num;
3784 if (pkts_num == 0)
3785 pkts_num = type == MVPP2_BM_SWF_LONG ?
3786 MVPP2_BM_LONG_BUF_NUM :
3787 MVPP2_BM_SHORT_BUF_NUM;
3788 else
3789 mvpp2_bm_bufs_free(port->dev->dev.parent,
3790 port->priv, new_pool);
3791
3792 new_pool->pkt_size = pkt_size;
3793 new_pool->frag_size =
3794 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
3795 MVPP2_SKB_SHINFO_SIZE;
3796
3797 /* Allocate buffers for this pool */
3798 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
3799 if (num != pkts_num) {
3800 WARN(1, "pool %d: %d of %d allocated\n",
3801 new_pool->id, num, pkts_num);
3802 return NULL;
3803 }
3804 }
3805
3806 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
3807 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
3808
3809 return new_pool;
3810 }
3811
3812 /* Initialize pools for swf */
3813 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
3814 {
3815 int rxq;
3816
3817 if (!port->pool_long) {
3818 port->pool_long =
3819 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
3820 MVPP2_BM_SWF_LONG,
3821 port->pkt_size);
3822 if (!port->pool_long)
3823 return -ENOMEM;
3824
3825 port->pool_long->port_map |= (1 << port->id);
3826
3827 for (rxq = 0; rxq < rxq_number; rxq++)
3828 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
3829 }
3830
3831 if (!port->pool_short) {
3832 port->pool_short =
3833 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
3834 MVPP2_BM_SWF_SHORT,
3835 MVPP2_BM_SHORT_PKT_SIZE);
3836 if (!port->pool_short)
3837 return -ENOMEM;
3838
3839 port->pool_short->port_map |= (1 << port->id);
3840
3841 for (rxq = 0; rxq < rxq_number; rxq++)
3842 mvpp2_rxq_short_pool_set(port, rxq,
3843 port->pool_short->id);
3844 }
3845
3846 return 0;
3847 }
3848
3849 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
3850 {
3851 struct mvpp2_port *port = netdev_priv(dev);
3852 struct mvpp2_bm_pool *port_pool = port->pool_long;
3853 int num, pkts_num = port_pool->buf_num;
3854 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
3855
3856 /* Update BM pool with new buffer size */
3857 mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
3858 if (port_pool->buf_num) {
3859 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
3860 return -EIO;
3861 }
3862
3863 port_pool->pkt_size = pkt_size;
3864 port_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
3865 MVPP2_SKB_SHINFO_SIZE;
3866 num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
3867 if (num != pkts_num) {
3868 WARN(1, "pool %d: %d of %d allocated\n",
3869 port_pool->id, num, pkts_num);
3870 return -EIO;
3871 }
3872
3873 mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
3874 MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
3875 dev->mtu = mtu;
3876 netdev_update_features(dev);
3877 return 0;
3878 }
3879
3880 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
3881 {
3882 int cpu, cpu_mask = 0;
3883
3884 for_each_present_cpu(cpu)
3885 cpu_mask |= 1 << cpu;
3886 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
3887 MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
3888 }
3889
3890 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
3891 {
3892 int cpu, cpu_mask = 0;
3893
3894 for_each_present_cpu(cpu)
3895 cpu_mask |= 1 << cpu;
3896 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
3897 MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
3898 }
3899
3900 /* Mask the current CPU's Rx/Tx interrupts */
3901 static void mvpp2_interrupts_mask(void *arg)
3902 {
3903 struct mvpp2_port *port = arg;
3904
3905 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
3906 }
3907
3908 /* Unmask the current CPU's Rx/Tx interrupts */
3909 static void mvpp2_interrupts_unmask(void *arg)
3910 {
3911 struct mvpp2_port *port = arg;
3912
3913 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
3914 (MVPP2_CAUSE_MISC_SUM_MASK |
3915 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
3916 }
3917
3918 /* Port configuration routines */
3919
3920 static void mvpp2_port_mii_set(struct mvpp2_port *port)
3921 {
3922 u32 val;
3923
3924 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
3925
3926 switch (port->phy_interface) {
3927 case PHY_INTERFACE_MODE_SGMII:
3928 val |= MVPP2_GMAC_INBAND_AN_MASK;
3929 break;
3930 case PHY_INTERFACE_MODE_RGMII:
3931 val |= MVPP2_GMAC_PORT_RGMII_MASK;
3932 default:
3933 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
3934 }
3935
3936 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3937 }
3938
3939 static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
3940 {
3941 u32 val;
3942
3943 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3944 val |= MVPP2_GMAC_FC_ADV_EN;
3945 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3946 }
3947
3948 static void mvpp2_port_enable(struct mvpp2_port *port)
3949 {
3950 u32 val;
3951
3952 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3953 val |= MVPP2_GMAC_PORT_EN_MASK;
3954 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
3955 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3956 }
3957
3958 static void mvpp2_port_disable(struct mvpp2_port *port)
3959 {
3960 u32 val;
3961
3962 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3963 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
3964 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3965 }
3966
3967 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
3968 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
3969 {
3970 u32 val;
3971
3972 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
3973 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
3974 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3975 }
3976
3977 /* Configure loopback port */
3978 static void mvpp2_port_loopback_set(struct mvpp2_port *port)
3979 {
3980 u32 val;
3981
3982 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
3983
3984 if (port->speed == 1000)
3985 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
3986 else
3987 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
3988
3989 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
3990 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
3991 else
3992 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
3993
3994 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3995 }
3996
3997 static void mvpp2_port_reset(struct mvpp2_port *port)
3998 {
3999 u32 val;
4000
4001 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
4002 ~MVPP2_GMAC_PORT_RESET_MASK;
4003 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4004
4005 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
4006 MVPP2_GMAC_PORT_RESET_MASK)
4007 continue;
4008 }
4009
4010 /* Change maximum receive size of the port */
4011 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
4012 {
4013 u32 val;
4014
4015 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4016 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
4017 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
4018 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
4019 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4020 }
4021
4022 /* Set defaults to the MVPP2 port */
4023 static void mvpp2_defaults_set(struct mvpp2_port *port)
4024 {
4025 int tx_port_num, val, queue, ptxq, lrxq;
4026
4027 /* Configure port to loopback if needed */
4028 if (port->flags & MVPP2_F_LOOPBACK)
4029 mvpp2_port_loopback_set(port);
4030
4031 /* Update TX FIFO MIN Threshold */
4032 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
4033 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
4034 /* Min. TX threshold must be less than minimal packet length */
4035 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
4036 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
4037
4038 /* Disable Legacy WRR, Disable EJP, Release from reset */
4039 tx_port_num = mvpp2_egress_port(port);
4040 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
4041 tx_port_num);
4042 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
4043
4044 /* Close bandwidth for all queues */
4045 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
4046 ptxq = mvpp2_txq_phys(port->id, queue);
4047 mvpp2_write(port->priv,
4048 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
4049 }
4050
4051 /* Set refill period to 1 usec, refill tokens
4052 * and bucket size to maximum
4053 */
4054 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
4055 port->priv->tclk / USEC_PER_SEC);
4056 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
4057 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
4058 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
4059 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
4060 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
4061 val = MVPP2_TXP_TOKEN_SIZE_MAX;
4062 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4063
4064 /* Set MaximumLowLatencyPacketSize value to 256 */
4065 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
4066 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
4067 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
4068
4069 /* Enable Rx cache snoop */
4070 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
4071 queue = port->rxqs[lrxq]->id;
4072 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4073 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
4074 MVPP2_SNOOP_BUF_HDR_MASK;
4075 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4076 }
4077
4078 /* At default, mask all interrupts to all present cpus */
4079 mvpp2_interrupts_disable(port);
4080 }
4081
4082 /* Enable/disable receiving packets */
4083 static void mvpp2_ingress_enable(struct mvpp2_port *port)
4084 {
4085 u32 val;
4086 int lrxq, queue;
4087
4088 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
4089 queue = port->rxqs[lrxq]->id;
4090 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4091 val &= ~MVPP2_RXQ_DISABLE_MASK;
4092 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4093 }
4094 }
4095
4096 static void mvpp2_ingress_disable(struct mvpp2_port *port)
4097 {
4098 u32 val;
4099 int lrxq, queue;
4100
4101 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
4102 queue = port->rxqs[lrxq]->id;
4103 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4104 val |= MVPP2_RXQ_DISABLE_MASK;
4105 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4106 }
4107 }
4108
4109 /* Enable transmit via physical egress queue
4110 * - HW starts take descriptors from DRAM
4111 */
4112 static void mvpp2_egress_enable(struct mvpp2_port *port)
4113 {
4114 u32 qmap;
4115 int queue;
4116 int tx_port_num = mvpp2_egress_port(port);
4117
4118 /* Enable all initialized TXs. */
4119 qmap = 0;
4120 for (queue = 0; queue < txq_number; queue++) {
4121 struct mvpp2_tx_queue *txq = port->txqs[queue];
4122
4123 if (txq->descs != NULL)
4124 qmap |= (1 << queue);
4125 }
4126
4127 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4128 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
4129 }
4130
4131 /* Disable transmit via physical egress queue
4132 * - HW doesn't take descriptors from DRAM
4133 */
4134 static void mvpp2_egress_disable(struct mvpp2_port *port)
4135 {
4136 u32 reg_data;
4137 int delay;
4138 int tx_port_num = mvpp2_egress_port(port);
4139
4140 /* Issue stop command for active channels only */
4141 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4142 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
4143 MVPP2_TXP_SCHED_ENQ_MASK;
4144 if (reg_data != 0)
4145 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
4146 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
4147
4148 /* Wait for all Tx activity to terminate. */
4149 delay = 0;
4150 do {
4151 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
4152 netdev_warn(port->dev,
4153 "Tx stop timed out, status=0x%08x\n",
4154 reg_data);
4155 break;
4156 }
4157 mdelay(1);
4158 delay++;
4159
4160 /* Check port TX Command register that all
4161 * Tx queues are stopped
4162 */
4163 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
4164 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
4165 }
4166
4167 /* Rx descriptors helper methods */
4168
4169 /* Get number of Rx descriptors occupied by received packets */
4170 static inline int
4171 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
4172 {
4173 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
4174
4175 return val & MVPP2_RXQ_OCCUPIED_MASK;
4176 }
4177
4178 /* Update Rx queue status with the number of occupied and available
4179 * Rx descriptor slots.
4180 */
4181 static inline void
4182 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
4183 int used_count, int free_count)
4184 {
4185 /* Decrement the number of used descriptors and increment count
4186 * increment the number of free descriptors.
4187 */
4188 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
4189
4190 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
4191 }
4192
4193 /* Get pointer to next RX descriptor to be processed by SW */
4194 static inline struct mvpp2_rx_desc *
4195 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
4196 {
4197 int rx_desc = rxq->next_desc_to_proc;
4198
4199 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
4200 prefetch(rxq->descs + rxq->next_desc_to_proc);
4201 return rxq->descs + rx_desc;
4202 }
4203
4204 /* Set rx queue offset */
4205 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
4206 int prxq, int offset)
4207 {
4208 u32 val;
4209
4210 /* Convert offset from bytes to units of 32 bytes */
4211 offset = offset >> 5;
4212
4213 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4214 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
4215
4216 /* Offset is in */
4217 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
4218 MVPP2_RXQ_PACKET_OFFSET_MASK);
4219
4220 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4221 }
4222
4223 /* Obtain BM cookie information from descriptor */
4224 static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port,
4225 struct mvpp2_rx_desc *rx_desc)
4226 {
4227 int cpu = smp_processor_id();
4228 int pool;
4229
4230 pool = (mvpp2_rxdesc_status_get(port, rx_desc) &
4231 MVPP2_RXD_BM_POOL_ID_MASK) >>
4232 MVPP2_RXD_BM_POOL_ID_OFFS;
4233
4234 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
4235 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
4236 }
4237
4238 /* Tx descriptors helper methods */
4239
4240 /* Get pointer to next Tx descriptor to be processed (send) by HW */
4241 static struct mvpp2_tx_desc *
4242 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
4243 {
4244 int tx_desc = txq->next_desc_to_proc;
4245
4246 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
4247 return txq->descs + tx_desc;
4248 }
4249
4250 /* Update HW with number of aggregated Tx descriptors to be sent */
4251 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
4252 {
4253 /* aggregated access - relevant TXQ number is written in TX desc */
4254 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
4255 }
4256
4257
4258 /* Check if there are enough free descriptors in aggregated txq.
4259 * If not, update the number of occupied descriptors and repeat the check.
4260 */
4261 static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
4262 struct mvpp2_tx_queue *aggr_txq, int num)
4263 {
4264 if ((aggr_txq->count + num) > aggr_txq->size) {
4265 /* Update number of occupied aggregated Tx descriptors */
4266 int cpu = smp_processor_id();
4267 u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
4268
4269 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
4270 }
4271
4272 if ((aggr_txq->count + num) > aggr_txq->size)
4273 return -ENOMEM;
4274
4275 return 0;
4276 }
4277
4278 /* Reserved Tx descriptors allocation request */
4279 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
4280 struct mvpp2_tx_queue *txq, int num)
4281 {
4282 u32 val;
4283
4284 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
4285 mvpp2_write(priv, MVPP2_TXQ_RSVD_REQ_REG, val);
4286
4287 val = mvpp2_read(priv, MVPP2_TXQ_RSVD_RSLT_REG);
4288
4289 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
4290 }
4291
4292 /* Check if there are enough reserved descriptors for transmission.
4293 * If not, request chunk of reserved descriptors and check again.
4294 */
4295 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
4296 struct mvpp2_tx_queue *txq,
4297 struct mvpp2_txq_pcpu *txq_pcpu,
4298 int num)
4299 {
4300 int req, cpu, desc_count;
4301
4302 if (txq_pcpu->reserved_num >= num)
4303 return 0;
4304
4305 /* Not enough descriptors reserved! Update the reserved descriptor
4306 * count and check again.
4307 */
4308
4309 desc_count = 0;
4310 /* Compute total of used descriptors */
4311 for_each_present_cpu(cpu) {
4312 struct mvpp2_txq_pcpu *txq_pcpu_aux;
4313
4314 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
4315 desc_count += txq_pcpu_aux->count;
4316 desc_count += txq_pcpu_aux->reserved_num;
4317 }
4318
4319 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
4320 desc_count += req;
4321
4322 if (desc_count >
4323 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
4324 return -ENOMEM;
4325
4326 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
4327
4328 /* OK, the descriptor cound has been updated: check again. */
4329 if (txq_pcpu->reserved_num < num)
4330 return -ENOMEM;
4331 return 0;
4332 }
4333
4334 /* Release the last allocated Tx descriptor. Useful to handle DMA
4335 * mapping failures in the Tx path.
4336 */
4337 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
4338 {
4339 if (txq->next_desc_to_proc == 0)
4340 txq->next_desc_to_proc = txq->last_desc - 1;
4341 else
4342 txq->next_desc_to_proc--;
4343 }
4344
4345 /* Set Tx descriptors fields relevant for CSUM calculation */
4346 static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
4347 int ip_hdr_len, int l4_proto)
4348 {
4349 u32 command;
4350
4351 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
4352 * G_L4_chk, L4_type required only for checksum calculation
4353 */
4354 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
4355 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
4356 command |= MVPP2_TXD_IP_CSUM_DISABLE;
4357
4358 if (l3_proto == swab16(ETH_P_IP)) {
4359 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
4360 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
4361 } else {
4362 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
4363 }
4364
4365 if (l4_proto == IPPROTO_TCP) {
4366 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
4367 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4368 } else if (l4_proto == IPPROTO_UDP) {
4369 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
4370 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4371 } else {
4372 command |= MVPP2_TXD_L4_CSUM_NOT;
4373 }
4374
4375 return command;
4376 }
4377
4378 /* Get number of sent descriptors and decrement counter.
4379 * The number of sent descriptors is returned.
4380 * Per-CPU access
4381 */
4382 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
4383 struct mvpp2_tx_queue *txq)
4384 {
4385 u32 val;
4386
4387 /* Reading status reg resets transmitted descriptor counter */
4388 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
4389
4390 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
4391 MVPP2_TRANSMITTED_COUNT_OFFSET;
4392 }
4393
4394 static void mvpp2_txq_sent_counter_clear(void *arg)
4395 {
4396 struct mvpp2_port *port = arg;
4397 int queue;
4398
4399 for (queue = 0; queue < txq_number; queue++) {
4400 int id = port->txqs[queue]->id;
4401
4402 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
4403 }
4404 }
4405
4406 /* Set max sizes for Tx queues */
4407 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
4408 {
4409 u32 val, size, mtu;
4410 int txq, tx_port_num;
4411
4412 mtu = port->pkt_size * 8;
4413 if (mtu > MVPP2_TXP_MTU_MAX)
4414 mtu = MVPP2_TXP_MTU_MAX;
4415
4416 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
4417 mtu = 3 * mtu;
4418
4419 /* Indirect access to registers */
4420 tx_port_num = mvpp2_egress_port(port);
4421 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4422
4423 /* Set MTU */
4424 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
4425 val &= ~MVPP2_TXP_MTU_MAX;
4426 val |= mtu;
4427 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
4428
4429 /* TXP token size and all TXQs token size must be larger that MTU */
4430 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
4431 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
4432 if (size < mtu) {
4433 size = mtu;
4434 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
4435 val |= size;
4436 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4437 }
4438
4439 for (txq = 0; txq < txq_number; txq++) {
4440 val = mvpp2_read(port->priv,
4441 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
4442 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
4443
4444 if (size < mtu) {
4445 size = mtu;
4446 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
4447 val |= size;
4448 mvpp2_write(port->priv,
4449 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
4450 val);
4451 }
4452 }
4453 }
4454
4455 /* Set the number of packets that will be received before Rx interrupt
4456 * will be generated by HW.
4457 */
4458 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
4459 struct mvpp2_rx_queue *rxq)
4460 {
4461 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
4462 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
4463
4464 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4465 mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG,
4466 rxq->pkts_coal);
4467 }
4468
4469 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
4470 {
4471 u64 tmp = (u64)clk_hz * usec;
4472
4473 do_div(tmp, USEC_PER_SEC);
4474
4475 return tmp > U32_MAX ? U32_MAX : tmp;
4476 }
4477
4478 static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
4479 {
4480 u64 tmp = (u64)cycles * USEC_PER_SEC;
4481
4482 do_div(tmp, clk_hz);
4483
4484 return tmp > U32_MAX ? U32_MAX : tmp;
4485 }
4486
4487 /* Set the time delay in usec before Rx interrupt */
4488 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
4489 struct mvpp2_rx_queue *rxq)
4490 {
4491 unsigned long freq = port->priv->tclk;
4492 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
4493
4494 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
4495 rxq->time_coal =
4496 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
4497
4498 /* re-evaluate to get actual register value */
4499 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
4500 }
4501
4502 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
4503 }
4504
4505 /* Free Tx queue skbuffs */
4506 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4507 struct mvpp2_tx_queue *txq,
4508 struct mvpp2_txq_pcpu *txq_pcpu, int num)
4509 {
4510 int i;
4511
4512 for (i = 0; i < num; i++) {
4513 struct mvpp2_txq_pcpu_buf *tx_buf =
4514 txq_pcpu->buffs + txq_pcpu->txq_get_index;
4515
4516 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
4517 tx_buf->size, DMA_TO_DEVICE);
4518 if (tx_buf->skb)
4519 dev_kfree_skb_any(tx_buf->skb);
4520
4521 mvpp2_txq_inc_get(txq_pcpu);
4522 }
4523 }
4524
4525 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4526 u32 cause)
4527 {
4528 int queue = fls(cause) - 1;
4529
4530 return port->rxqs[queue];
4531 }
4532
4533 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4534 u32 cause)
4535 {
4536 int queue = fls(cause) - 1;
4537
4538 return port->txqs[queue];
4539 }
4540
4541 /* Handle end of transmission */
4542 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4543 struct mvpp2_txq_pcpu *txq_pcpu)
4544 {
4545 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
4546 int tx_done;
4547
4548 if (txq_pcpu->cpu != smp_processor_id())
4549 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
4550
4551 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
4552 if (!tx_done)
4553 return;
4554 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
4555
4556 txq_pcpu->count -= tx_done;
4557
4558 if (netif_tx_queue_stopped(nq))
4559 if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
4560 netif_tx_wake_queue(nq);
4561 }
4562
4563 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
4564 {
4565 struct mvpp2_tx_queue *txq;
4566 struct mvpp2_txq_pcpu *txq_pcpu;
4567 unsigned int tx_todo = 0;
4568
4569 while (cause) {
4570 txq = mvpp2_get_tx_queue(port, cause);
4571 if (!txq)
4572 break;
4573
4574 txq_pcpu = this_cpu_ptr(txq->pcpu);
4575
4576 if (txq_pcpu->count) {
4577 mvpp2_txq_done(port, txq, txq_pcpu);
4578 tx_todo += txq_pcpu->count;
4579 }
4580
4581 cause &= ~(1 << txq->log_id);
4582 }
4583 return tx_todo;
4584 }
4585
4586 /* Rx/Tx queue initialization/cleanup methods */
4587
4588 /* Allocate and initialize descriptors for aggr TXQ */
4589 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
4590 struct mvpp2_tx_queue *aggr_txq,
4591 int desc_num, int cpu,
4592 struct mvpp2 *priv)
4593 {
4594 /* Allocate memory for TX descriptors */
4595 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
4596 desc_num * MVPP2_DESC_ALIGNED_SIZE,
4597 &aggr_txq->descs_dma, GFP_KERNEL);
4598 if (!aggr_txq->descs)
4599 return -ENOMEM;
4600
4601 aggr_txq->last_desc = aggr_txq->size - 1;
4602
4603 /* Aggr TXQ no reset WA */
4604 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
4605 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
4606
4607 /* Set Tx descriptors queue starting address */
4608 /* indirect access */
4609 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu),
4610 aggr_txq->descs_dma);
4611 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
4612
4613 return 0;
4614 }
4615
4616 /* Create a specified Rx queue */
4617 static int mvpp2_rxq_init(struct mvpp2_port *port,
4618 struct mvpp2_rx_queue *rxq)
4619
4620 {
4621 rxq->size = port->rx_ring_size;
4622
4623 /* Allocate memory for RX descriptors */
4624 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
4625 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4626 &rxq->descs_dma, GFP_KERNEL);
4627 if (!rxq->descs)
4628 return -ENOMEM;
4629
4630 rxq->last_desc = rxq->size - 1;
4631
4632 /* Zero occupied and non-occupied counters - direct access */
4633 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4634
4635 /* Set Rx descriptors queue starting address - indirect access */
4636 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4637 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_dma);
4638 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
4639 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
4640
4641 /* Set Offset */
4642 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
4643
4644 /* Set coalescing pkts and time */
4645 mvpp2_rx_pkts_coal_set(port, rxq);
4646 mvpp2_rx_time_coal_set(port, rxq);
4647
4648 /* Add number of descriptors ready for receiving packets */
4649 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
4650
4651 return 0;
4652 }
4653
4654 /* Push packets received by the RXQ to BM pool */
4655 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
4656 struct mvpp2_rx_queue *rxq)
4657 {
4658 int rx_received, i;
4659
4660 rx_received = mvpp2_rxq_received(port, rxq->id);
4661 if (!rx_received)
4662 return;
4663
4664 for (i = 0; i < rx_received; i++) {
4665 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
4666 u32 bm = mvpp2_bm_cookie_build(port, rx_desc);
4667
4668 mvpp2_pool_refill(port, bm,
4669 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
4670 mvpp2_rxdesc_cookie_get(port, rx_desc));
4671 }
4672 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
4673 }
4674
4675 /* Cleanup Rx queue */
4676 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
4677 struct mvpp2_rx_queue *rxq)
4678 {
4679 mvpp2_rxq_drop_pkts(port, rxq);
4680
4681 if (rxq->descs)
4682 dma_free_coherent(port->dev->dev.parent,
4683 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4684 rxq->descs,
4685 rxq->descs_dma);
4686
4687 rxq->descs = NULL;
4688 rxq->last_desc = 0;
4689 rxq->next_desc_to_proc = 0;
4690 rxq->descs_dma = 0;
4691
4692 /* Clear Rx descriptors queue starting address and size;
4693 * free descriptor number
4694 */
4695 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4696 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4697 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
4698 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
4699 }
4700
4701 /* Create and initialize a Tx queue */
4702 static int mvpp2_txq_init(struct mvpp2_port *port,
4703 struct mvpp2_tx_queue *txq)
4704 {
4705 u32 val;
4706 int cpu, desc, desc_per_txq, tx_port_num;
4707 struct mvpp2_txq_pcpu *txq_pcpu;
4708
4709 txq->size = port->tx_ring_size;
4710
4711 /* Allocate memory for Tx descriptors */
4712 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
4713 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4714 &txq->descs_dma, GFP_KERNEL);
4715 if (!txq->descs)
4716 return -ENOMEM;
4717
4718 txq->last_desc = txq->size - 1;
4719
4720 /* Set Tx descriptors queue starting address - indirect access */
4721 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4722 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma);
4723 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
4724 MVPP2_TXQ_DESC_SIZE_MASK);
4725 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
4726 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
4727 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
4728 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
4729 val &= ~MVPP2_TXQ_PENDING_MASK;
4730 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
4731
4732 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
4733 * for each existing TXQ.
4734 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
4735 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
4736 */
4737 desc_per_txq = 16;
4738 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
4739 (txq->log_id * desc_per_txq);
4740
4741 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
4742 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
4743 MVPP2_PREF_BUF_THRESH(desc_per_txq/2));
4744
4745 /* WRR / EJP configuration - indirect access */
4746 tx_port_num = mvpp2_egress_port(port);
4747 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4748
4749 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
4750 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
4751 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
4752 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
4753 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
4754
4755 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
4756 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
4757 val);
4758
4759 for_each_present_cpu(cpu) {
4760 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4761 txq_pcpu->size = txq->size;
4762 txq_pcpu->buffs = kmalloc(txq_pcpu->size *
4763 sizeof(struct mvpp2_txq_pcpu_buf),
4764 GFP_KERNEL);
4765 if (!txq_pcpu->buffs)
4766 goto error;
4767
4768 txq_pcpu->count = 0;
4769 txq_pcpu->reserved_num = 0;
4770 txq_pcpu->txq_put_index = 0;
4771 txq_pcpu->txq_get_index = 0;
4772 }
4773
4774 return 0;
4775
4776 error:
4777 for_each_present_cpu(cpu) {
4778 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4779 kfree(txq_pcpu->buffs);
4780 }
4781
4782 dma_free_coherent(port->dev->dev.parent,
4783 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4784 txq->descs, txq->descs_dma);
4785
4786 return -ENOMEM;
4787 }
4788
4789 /* Free allocated TXQ resources */
4790 static void mvpp2_txq_deinit(struct mvpp2_port *port,
4791 struct mvpp2_tx_queue *txq)
4792 {
4793 struct mvpp2_txq_pcpu *txq_pcpu;
4794 int cpu;
4795
4796 for_each_present_cpu(cpu) {
4797 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4798 kfree(txq_pcpu->buffs);
4799 }
4800
4801 if (txq->descs)
4802 dma_free_coherent(port->dev->dev.parent,
4803 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4804 txq->descs, txq->descs_dma);
4805
4806 txq->descs = NULL;
4807 txq->last_desc = 0;
4808 txq->next_desc_to_proc = 0;
4809 txq->descs_dma = 0;
4810
4811 /* Set minimum bandwidth for disabled TXQs */
4812 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
4813
4814 /* Set Tx descriptors queue starting address and size */
4815 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4816 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
4817 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
4818 }
4819
4820 /* Cleanup Tx ports */
4821 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
4822 {
4823 struct mvpp2_txq_pcpu *txq_pcpu;
4824 int delay, pending, cpu;
4825 u32 val;
4826
4827 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4828 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
4829 val |= MVPP2_TXQ_DRAIN_EN_MASK;
4830 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4831
4832 /* The napi queue has been stopped so wait for all packets
4833 * to be transmitted.
4834 */
4835 delay = 0;
4836 do {
4837 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
4838 netdev_warn(port->dev,
4839 "port %d: cleaning queue %d timed out\n",
4840 port->id, txq->log_id);
4841 break;
4842 }
4843 mdelay(1);
4844 delay++;
4845
4846 pending = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG) &
4847 MVPP2_TXQ_PENDING_MASK;
4848 } while (pending);
4849
4850 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
4851 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4852
4853 for_each_present_cpu(cpu) {
4854 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4855
4856 /* Release all packets */
4857 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
4858
4859 /* Reset queue */
4860 txq_pcpu->count = 0;
4861 txq_pcpu->txq_put_index = 0;
4862 txq_pcpu->txq_get_index = 0;
4863 }
4864 }
4865
4866 /* Cleanup all Tx queues */
4867 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
4868 {
4869 struct mvpp2_tx_queue *txq;
4870 int queue;
4871 u32 val;
4872
4873 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
4874
4875 /* Reset Tx ports and delete Tx queues */
4876 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
4877 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4878
4879 for (queue = 0; queue < txq_number; queue++) {
4880 txq = port->txqs[queue];
4881 mvpp2_txq_clean(port, txq);
4882 mvpp2_txq_deinit(port, txq);
4883 }
4884
4885 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
4886
4887 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
4888 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4889 }
4890
4891 /* Cleanup all Rx queues */
4892 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
4893 {
4894 int queue;
4895
4896 for (queue = 0; queue < rxq_number; queue++)
4897 mvpp2_rxq_deinit(port, port->rxqs[queue]);
4898 }
4899
4900 /* Init all Rx queues for port */
4901 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
4902 {
4903 int queue, err;
4904
4905 for (queue = 0; queue < rxq_number; queue++) {
4906 err = mvpp2_rxq_init(port, port->rxqs[queue]);
4907 if (err)
4908 goto err_cleanup;
4909 }
4910 return 0;
4911
4912 err_cleanup:
4913 mvpp2_cleanup_rxqs(port);
4914 return err;
4915 }
4916
4917 /* Init all tx queues for port */
4918 static int mvpp2_setup_txqs(struct mvpp2_port *port)
4919 {
4920 struct mvpp2_tx_queue *txq;
4921 int queue, err;
4922
4923 for (queue = 0; queue < txq_number; queue++) {
4924 txq = port->txqs[queue];
4925 err = mvpp2_txq_init(port, txq);
4926 if (err)
4927 goto err_cleanup;
4928 }
4929
4930 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
4931 return 0;
4932
4933 err_cleanup:
4934 mvpp2_cleanup_txqs(port);
4935 return err;
4936 }
4937
4938 /* The callback for per-port interrupt */
4939 static irqreturn_t mvpp2_isr(int irq, void *dev_id)
4940 {
4941 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
4942
4943 mvpp2_interrupts_disable(port);
4944
4945 napi_schedule(&port->napi);
4946
4947 return IRQ_HANDLED;
4948 }
4949
4950 /* Adjust link */
4951 static void mvpp2_link_event(struct net_device *dev)
4952 {
4953 struct mvpp2_port *port = netdev_priv(dev);
4954 struct phy_device *phydev = dev->phydev;
4955 int status_change = 0;
4956 u32 val;
4957
4958 if (phydev->link) {
4959 if ((port->speed != phydev->speed) ||
4960 (port->duplex != phydev->duplex)) {
4961 u32 val;
4962
4963 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4964 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
4965 MVPP2_GMAC_CONFIG_GMII_SPEED |
4966 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
4967 MVPP2_GMAC_AN_SPEED_EN |
4968 MVPP2_GMAC_AN_DUPLEX_EN);
4969
4970 if (phydev->duplex)
4971 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
4972
4973 if (phydev->speed == SPEED_1000)
4974 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
4975 else if (phydev->speed == SPEED_100)
4976 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
4977
4978 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4979
4980 port->duplex = phydev->duplex;
4981 port->speed = phydev->speed;
4982 }
4983 }
4984
4985 if (phydev->link != port->link) {
4986 if (!phydev->link) {
4987 port->duplex = -1;
4988 port->speed = 0;
4989 }
4990
4991 port->link = phydev->link;
4992 status_change = 1;
4993 }
4994
4995 if (status_change) {
4996 if (phydev->link) {
4997 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4998 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
4999 MVPP2_GMAC_FORCE_LINK_DOWN);
5000 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5001 mvpp2_egress_enable(port);
5002 mvpp2_ingress_enable(port);
5003 } else {
5004 mvpp2_ingress_disable(port);
5005 mvpp2_egress_disable(port);
5006 }
5007 phy_print_status(phydev);
5008 }
5009 }
5010
5011 static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
5012 {
5013 ktime_t interval;
5014
5015 if (!port_pcpu->timer_scheduled) {
5016 port_pcpu->timer_scheduled = true;
5017 interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
5018 hrtimer_start(&port_pcpu->tx_done_timer, interval,
5019 HRTIMER_MODE_REL_PINNED);
5020 }
5021 }
5022
5023 static void mvpp2_tx_proc_cb(unsigned long data)
5024 {
5025 struct net_device *dev = (struct net_device *)data;
5026 struct mvpp2_port *port = netdev_priv(dev);
5027 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5028 unsigned int tx_todo, cause;
5029
5030 if (!netif_running(dev))
5031 return;
5032 port_pcpu->timer_scheduled = false;
5033
5034 /* Process all the Tx queues */
5035 cause = (1 << txq_number) - 1;
5036 tx_todo = mvpp2_tx_done(port, cause);
5037
5038 /* Set the timer in case not all the packets were processed */
5039 if (tx_todo)
5040 mvpp2_timer_set(port_pcpu);
5041 }
5042
5043 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
5044 {
5045 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
5046 struct mvpp2_port_pcpu,
5047 tx_done_timer);
5048
5049 tasklet_schedule(&port_pcpu->tx_done_tasklet);
5050
5051 return HRTIMER_NORESTART;
5052 }
5053
5054 /* Main RX/TX processing routines */
5055
5056 /* Display more error info */
5057 static void mvpp2_rx_error(struct mvpp2_port *port,
5058 struct mvpp2_rx_desc *rx_desc)
5059 {
5060 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
5061 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
5062
5063 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
5064 case MVPP2_RXD_ERR_CRC:
5065 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
5066 status, sz);
5067 break;
5068 case MVPP2_RXD_ERR_OVERRUN:
5069 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
5070 status, sz);
5071 break;
5072 case MVPP2_RXD_ERR_RESOURCE:
5073 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
5074 status, sz);
5075 break;
5076 }
5077 }
5078
5079 /* Handle RX checksum offload */
5080 static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
5081 struct sk_buff *skb)
5082 {
5083 if (((status & MVPP2_RXD_L3_IP4) &&
5084 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
5085 (status & MVPP2_RXD_L3_IP6))
5086 if (((status & MVPP2_RXD_L4_UDP) ||
5087 (status & MVPP2_RXD_L4_TCP)) &&
5088 (status & MVPP2_RXD_L4_CSUM_OK)) {
5089 skb->csum = 0;
5090 skb->ip_summed = CHECKSUM_UNNECESSARY;
5091 return;
5092 }
5093
5094 skb->ip_summed = CHECKSUM_NONE;
5095 }
5096
5097 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
5098 static int mvpp2_rx_refill(struct mvpp2_port *port,
5099 struct mvpp2_bm_pool *bm_pool, u32 bm)
5100 {
5101 dma_addr_t dma_addr;
5102 phys_addr_t phys_addr;
5103 void *buf;
5104
5105 /* No recycle or too many buffers are in use, so allocate a new skb */
5106 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
5107 GFP_ATOMIC);
5108 if (!buf)
5109 return -ENOMEM;
5110
5111 mvpp2_pool_refill(port, bm, dma_addr, phys_addr);
5112
5113 return 0;
5114 }
5115
5116 /* Handle tx checksum */
5117 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
5118 {
5119 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5120 int ip_hdr_len = 0;
5121 u8 l4_proto;
5122
5123 if (skb->protocol == htons(ETH_P_IP)) {
5124 struct iphdr *ip4h = ip_hdr(skb);
5125
5126 /* Calculate IPv4 checksum and L4 checksum */
5127 ip_hdr_len = ip4h->ihl;
5128 l4_proto = ip4h->protocol;
5129 } else if (skb->protocol == htons(ETH_P_IPV6)) {
5130 struct ipv6hdr *ip6h = ipv6_hdr(skb);
5131
5132 /* Read l4_protocol from one of IPv6 extra headers */
5133 if (skb_network_header_len(skb) > 0)
5134 ip_hdr_len = (skb_network_header_len(skb) >> 2);
5135 l4_proto = ip6h->nexthdr;
5136 } else {
5137 return MVPP2_TXD_L4_CSUM_NOT;
5138 }
5139
5140 return mvpp2_txq_desc_csum(skb_network_offset(skb),
5141 skb->protocol, ip_hdr_len, l4_proto);
5142 }
5143
5144 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
5145 }
5146
5147 /* Main rx processing */
5148 static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5149 struct mvpp2_rx_queue *rxq)
5150 {
5151 struct net_device *dev = port->dev;
5152 int rx_received;
5153 int rx_done = 0;
5154 u32 rcvd_pkts = 0;
5155 u32 rcvd_bytes = 0;
5156
5157 /* Get number of received packets and clamp the to-do */
5158 rx_received = mvpp2_rxq_received(port, rxq->id);
5159 if (rx_todo > rx_received)
5160 rx_todo = rx_received;
5161
5162 while (rx_done < rx_todo) {
5163 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
5164 struct mvpp2_bm_pool *bm_pool;
5165 struct sk_buff *skb;
5166 unsigned int frag_size;
5167 dma_addr_t dma_addr;
5168 phys_addr_t phys_addr;
5169 u32 bm, rx_status;
5170 int pool, rx_bytes, err;
5171 void *data;
5172
5173 rx_done++;
5174 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
5175 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
5176 rx_bytes -= MVPP2_MH_SIZE;
5177 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
5178 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
5179 data = (void *)phys_to_virt(phys_addr);
5180
5181 bm = mvpp2_bm_cookie_build(port, rx_desc);
5182 pool = mvpp2_bm_cookie_pool_get(bm);
5183 bm_pool = &port->priv->bm_pools[pool];
5184
5185 /* In case of an error, release the requested buffer pointer
5186 * to the Buffer Manager. This request process is controlled
5187 * by the hardware, and the information about the buffer is
5188 * comprised by the RX descriptor.
5189 */
5190 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
5191 err_drop_frame:
5192 dev->stats.rx_errors++;
5193 mvpp2_rx_error(port, rx_desc);
5194 /* Return the buffer to the pool */
5195 mvpp2_pool_refill(port, bm, dma_addr, phys_addr);
5196 continue;
5197 }
5198
5199 if (bm_pool->frag_size > PAGE_SIZE)
5200 frag_size = 0;
5201 else
5202 frag_size = bm_pool->frag_size;
5203
5204 skb = build_skb(data, frag_size);
5205 if (!skb) {
5206 netdev_warn(port->dev, "skb build failed\n");
5207 goto err_drop_frame;
5208 }
5209
5210 err = mvpp2_rx_refill(port, bm_pool, bm);
5211 if (err) {
5212 netdev_err(port->dev, "failed to refill BM pools\n");
5213 goto err_drop_frame;
5214 }
5215
5216 dma_unmap_single(dev->dev.parent, dma_addr,
5217 bm_pool->buf_size, DMA_FROM_DEVICE);
5218
5219 rcvd_pkts++;
5220 rcvd_bytes += rx_bytes;
5221
5222 skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
5223 skb_put(skb, rx_bytes);
5224 skb->protocol = eth_type_trans(skb, dev);
5225 mvpp2_rx_csum(port, rx_status, skb);
5226
5227 napi_gro_receive(&port->napi, skb);
5228 }
5229
5230 if (rcvd_pkts) {
5231 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5232
5233 u64_stats_update_begin(&stats->syncp);
5234 stats->rx_packets += rcvd_pkts;
5235 stats->rx_bytes += rcvd_bytes;
5236 u64_stats_update_end(&stats->syncp);
5237 }
5238
5239 /* Update Rx queue management counters */
5240 wmb();
5241 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
5242
5243 return rx_todo;
5244 }
5245
5246 static inline void
5247 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
5248 struct mvpp2_tx_desc *desc)
5249 {
5250 dma_addr_t buf_dma_addr =
5251 mvpp2_txdesc_dma_addr_get(port, desc);
5252 size_t buf_sz =
5253 mvpp2_txdesc_size_get(port, desc);
5254 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
5255 buf_sz, DMA_TO_DEVICE);
5256 mvpp2_txq_desc_put(txq);
5257 }
5258
5259 /* Handle tx fragmentation processing */
5260 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
5261 struct mvpp2_tx_queue *aggr_txq,
5262 struct mvpp2_tx_queue *txq)
5263 {
5264 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
5265 struct mvpp2_tx_desc *tx_desc;
5266 int i;
5267 dma_addr_t buf_dma_addr;
5268
5269 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5270 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5271 void *addr = page_address(frag->page.p) + frag->page_offset;
5272
5273 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5274 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
5275 mvpp2_txdesc_size_set(port, tx_desc, frag->size);
5276
5277 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
5278 frag->size,
5279 DMA_TO_DEVICE);
5280 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
5281 mvpp2_txq_desc_put(txq);
5282 goto error;
5283 }
5284
5285 mvpp2_txdesc_offset_set(port, tx_desc,
5286 buf_dma_addr & MVPP2_TX_DESC_ALIGN);
5287 mvpp2_txdesc_dma_addr_set(port, tx_desc,
5288 buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
5289
5290 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
5291 /* Last descriptor */
5292 mvpp2_txdesc_cmd_set(port, tx_desc,
5293 MVPP2_TXD_L_DESC);
5294 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
5295 } else {
5296 /* Descriptor in the middle: Not First, Not Last */
5297 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
5298 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
5299 }
5300 }
5301
5302 return 0;
5303
5304 error:
5305 /* Release all descriptors that were used to map fragments of
5306 * this packet, as well as the corresponding DMA mappings
5307 */
5308 for (i = i - 1; i >= 0; i--) {
5309 tx_desc = txq->descs + i;
5310 tx_desc_unmap_put(port, txq, tx_desc);
5311 }
5312
5313 return -ENOMEM;
5314 }
5315
5316 /* Main tx processing */
5317 static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
5318 {
5319 struct mvpp2_port *port = netdev_priv(dev);
5320 struct mvpp2_tx_queue *txq, *aggr_txq;
5321 struct mvpp2_txq_pcpu *txq_pcpu;
5322 struct mvpp2_tx_desc *tx_desc;
5323 dma_addr_t buf_dma_addr;
5324 int frags = 0;
5325 u16 txq_id;
5326 u32 tx_cmd;
5327
5328 txq_id = skb_get_queue_mapping(skb);
5329 txq = port->txqs[txq_id];
5330 txq_pcpu = this_cpu_ptr(txq->pcpu);
5331 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
5332
5333 frags = skb_shinfo(skb)->nr_frags + 1;
5334
5335 /* Check number of available descriptors */
5336 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
5337 mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
5338 txq_pcpu, frags)) {
5339 frags = 0;
5340 goto out;
5341 }
5342
5343 /* Get a descriptor for the first part of the packet */
5344 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5345 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
5346 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
5347
5348 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
5349 skb_headlen(skb), DMA_TO_DEVICE);
5350 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
5351 mvpp2_txq_desc_put(txq);
5352 frags = 0;
5353 goto out;
5354 }
5355
5356 mvpp2_txdesc_offset_set(port, tx_desc,
5357 buf_dma_addr & MVPP2_TX_DESC_ALIGN);
5358 mvpp2_txdesc_dma_addr_set(port, tx_desc,
5359 buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
5360
5361 tx_cmd = mvpp2_skb_tx_csum(port, skb);
5362
5363 if (frags == 1) {
5364 /* First and Last descriptor */
5365 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
5366 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
5367 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
5368 } else {
5369 /* First but not Last */
5370 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
5371 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
5372 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
5373
5374 /* Continue with other skb fragments */
5375 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
5376 tx_desc_unmap_put(port, txq, tx_desc);
5377 frags = 0;
5378 goto out;
5379 }
5380 }
5381
5382 txq_pcpu->reserved_num -= frags;
5383 txq_pcpu->count += frags;
5384 aggr_txq->count += frags;
5385
5386 /* Enable transmit */
5387 wmb();
5388 mvpp2_aggr_txq_pend_desc_add(port, frags);
5389
5390 if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) {
5391 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
5392
5393 netif_tx_stop_queue(nq);
5394 }
5395 out:
5396 if (frags > 0) {
5397 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5398
5399 u64_stats_update_begin(&stats->syncp);
5400 stats->tx_packets++;
5401 stats->tx_bytes += skb->len;
5402 u64_stats_update_end(&stats->syncp);
5403 } else {
5404 dev->stats.tx_dropped++;
5405 dev_kfree_skb_any(skb);
5406 }
5407
5408 /* Finalize TX processing */
5409 if (txq_pcpu->count >= txq->done_pkts_coal)
5410 mvpp2_txq_done(port, txq, txq_pcpu);
5411
5412 /* Set the timer in case not all frags were processed */
5413 if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
5414 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5415
5416 mvpp2_timer_set(port_pcpu);
5417 }
5418
5419 return NETDEV_TX_OK;
5420 }
5421
5422 static inline void mvpp2_cause_error(struct net_device *dev, int cause)
5423 {
5424 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
5425 netdev_err(dev, "FCS error\n");
5426 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
5427 netdev_err(dev, "rx fifo overrun error\n");
5428 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
5429 netdev_err(dev, "tx fifo underrun error\n");
5430 }
5431
5432 static int mvpp2_poll(struct napi_struct *napi, int budget)
5433 {
5434 u32 cause_rx_tx, cause_rx, cause_misc;
5435 int rx_done = 0;
5436 struct mvpp2_port *port = netdev_priv(napi->dev);
5437
5438 /* Rx/Tx cause register
5439 *
5440 * Bits 0-15: each bit indicates received packets on the Rx queue
5441 * (bit 0 is for Rx queue 0).
5442 *
5443 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
5444 * (bit 16 is for Tx queue 0).
5445 *
5446 * Each CPU has its own Rx/Tx cause register
5447 */
5448 cause_rx_tx = mvpp2_read(port->priv,
5449 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
5450 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
5451 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
5452
5453 if (cause_misc) {
5454 mvpp2_cause_error(port->dev, cause_misc);
5455
5456 /* Clear the cause register */
5457 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
5458 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
5459 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
5460 }
5461
5462 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
5463
5464 /* Process RX packets */
5465 cause_rx |= port->pending_cause_rx;
5466 while (cause_rx && budget > 0) {
5467 int count;
5468 struct mvpp2_rx_queue *rxq;
5469
5470 rxq = mvpp2_get_rx_queue(port, cause_rx);
5471 if (!rxq)
5472 break;
5473
5474 count = mvpp2_rx(port, budget, rxq);
5475 rx_done += count;
5476 budget -= count;
5477 if (budget > 0) {
5478 /* Clear the bit associated to this Rx queue
5479 * so that next iteration will continue from
5480 * the next Rx queue.
5481 */
5482 cause_rx &= ~(1 << rxq->logic_rxq);
5483 }
5484 }
5485
5486 if (budget > 0) {
5487 cause_rx = 0;
5488 napi_complete_done(napi, rx_done);
5489
5490 mvpp2_interrupts_enable(port);
5491 }
5492 port->pending_cause_rx = cause_rx;
5493 return rx_done;
5494 }
5495
5496 /* Set hw internals when starting port */
5497 static void mvpp2_start_dev(struct mvpp2_port *port)
5498 {
5499 struct net_device *ndev = port->dev;
5500
5501 mvpp2_gmac_max_rx_size_set(port);
5502 mvpp2_txp_max_tx_size_set(port);
5503
5504 napi_enable(&port->napi);
5505
5506 /* Enable interrupts on all CPUs */
5507 mvpp2_interrupts_enable(port);
5508
5509 mvpp2_port_enable(port);
5510 phy_start(ndev->phydev);
5511 netif_tx_start_all_queues(port->dev);
5512 }
5513
5514 /* Set hw internals when stopping port */
5515 static void mvpp2_stop_dev(struct mvpp2_port *port)
5516 {
5517 struct net_device *ndev = port->dev;
5518
5519 /* Stop new packets from arriving to RXQs */
5520 mvpp2_ingress_disable(port);
5521
5522 mdelay(10);
5523
5524 /* Disable interrupts on all CPUs */
5525 mvpp2_interrupts_disable(port);
5526
5527 napi_disable(&port->napi);
5528
5529 netif_carrier_off(port->dev);
5530 netif_tx_stop_all_queues(port->dev);
5531
5532 mvpp2_egress_disable(port);
5533 mvpp2_port_disable(port);
5534 phy_stop(ndev->phydev);
5535 }
5536
5537 static int mvpp2_check_ringparam_valid(struct net_device *dev,
5538 struct ethtool_ringparam *ring)
5539 {
5540 u16 new_rx_pending = ring->rx_pending;
5541 u16 new_tx_pending = ring->tx_pending;
5542
5543 if (ring->rx_pending == 0 || ring->tx_pending == 0)
5544 return -EINVAL;
5545
5546 if (ring->rx_pending > MVPP2_MAX_RXD)
5547 new_rx_pending = MVPP2_MAX_RXD;
5548 else if (!IS_ALIGNED(ring->rx_pending, 16))
5549 new_rx_pending = ALIGN(ring->rx_pending, 16);
5550
5551 if (ring->tx_pending > MVPP2_MAX_TXD)
5552 new_tx_pending = MVPP2_MAX_TXD;
5553 else if (!IS_ALIGNED(ring->tx_pending, 32))
5554 new_tx_pending = ALIGN(ring->tx_pending, 32);
5555
5556 if (ring->rx_pending != new_rx_pending) {
5557 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
5558 ring->rx_pending, new_rx_pending);
5559 ring->rx_pending = new_rx_pending;
5560 }
5561
5562 if (ring->tx_pending != new_tx_pending) {
5563 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
5564 ring->tx_pending, new_tx_pending);
5565 ring->tx_pending = new_tx_pending;
5566 }
5567
5568 return 0;
5569 }
5570
5571 static void mvpp2_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
5572 {
5573 u32 mac_addr_l, mac_addr_m, mac_addr_h;
5574
5575 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
5576 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
5577 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
5578 addr[0] = (mac_addr_h >> 24) & 0xFF;
5579 addr[1] = (mac_addr_h >> 16) & 0xFF;
5580 addr[2] = (mac_addr_h >> 8) & 0xFF;
5581 addr[3] = mac_addr_h & 0xFF;
5582 addr[4] = mac_addr_m & 0xFF;
5583 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
5584 }
5585
5586 static int mvpp2_phy_connect(struct mvpp2_port *port)
5587 {
5588 struct phy_device *phy_dev;
5589
5590 phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
5591 port->phy_interface);
5592 if (!phy_dev) {
5593 netdev_err(port->dev, "cannot connect to phy\n");
5594 return -ENODEV;
5595 }
5596 phy_dev->supported &= PHY_GBIT_FEATURES;
5597 phy_dev->advertising = phy_dev->supported;
5598
5599 port->link = 0;
5600 port->duplex = 0;
5601 port->speed = 0;
5602
5603 return 0;
5604 }
5605
5606 static void mvpp2_phy_disconnect(struct mvpp2_port *port)
5607 {
5608 struct net_device *ndev = port->dev;
5609
5610 phy_disconnect(ndev->phydev);
5611 }
5612
5613 static int mvpp2_open(struct net_device *dev)
5614 {
5615 struct mvpp2_port *port = netdev_priv(dev);
5616 unsigned char mac_bcast[ETH_ALEN] = {
5617 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
5618 int err;
5619
5620 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
5621 if (err) {
5622 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
5623 return err;
5624 }
5625 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
5626 dev->dev_addr, true);
5627 if (err) {
5628 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
5629 return err;
5630 }
5631 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
5632 if (err) {
5633 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
5634 return err;
5635 }
5636 err = mvpp2_prs_def_flow(port);
5637 if (err) {
5638 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
5639 return err;
5640 }
5641
5642 /* Allocate the Rx/Tx queues */
5643 err = mvpp2_setup_rxqs(port);
5644 if (err) {
5645 netdev_err(port->dev, "cannot allocate Rx queues\n");
5646 return err;
5647 }
5648
5649 err = mvpp2_setup_txqs(port);
5650 if (err) {
5651 netdev_err(port->dev, "cannot allocate Tx queues\n");
5652 goto err_cleanup_rxqs;
5653 }
5654
5655 err = request_irq(port->irq, mvpp2_isr, 0, dev->name, port);
5656 if (err) {
5657 netdev_err(port->dev, "cannot request IRQ %d\n", port->irq);
5658 goto err_cleanup_txqs;
5659 }
5660
5661 /* In default link is down */
5662 netif_carrier_off(port->dev);
5663
5664 err = mvpp2_phy_connect(port);
5665 if (err < 0)
5666 goto err_free_irq;
5667
5668 /* Unmask interrupts on all CPUs */
5669 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
5670
5671 mvpp2_start_dev(port);
5672
5673 return 0;
5674
5675 err_free_irq:
5676 free_irq(port->irq, port);
5677 err_cleanup_txqs:
5678 mvpp2_cleanup_txqs(port);
5679 err_cleanup_rxqs:
5680 mvpp2_cleanup_rxqs(port);
5681 return err;
5682 }
5683
5684 static int mvpp2_stop(struct net_device *dev)
5685 {
5686 struct mvpp2_port *port = netdev_priv(dev);
5687 struct mvpp2_port_pcpu *port_pcpu;
5688 int cpu;
5689
5690 mvpp2_stop_dev(port);
5691 mvpp2_phy_disconnect(port);
5692
5693 /* Mask interrupts on all CPUs */
5694 on_each_cpu(mvpp2_interrupts_mask, port, 1);
5695
5696 free_irq(port->irq, port);
5697 for_each_present_cpu(cpu) {
5698 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
5699
5700 hrtimer_cancel(&port_pcpu->tx_done_timer);
5701 port_pcpu->timer_scheduled = false;
5702 tasklet_kill(&port_pcpu->tx_done_tasklet);
5703 }
5704 mvpp2_cleanup_rxqs(port);
5705 mvpp2_cleanup_txqs(port);
5706
5707 return 0;
5708 }
5709
5710 static void mvpp2_set_rx_mode(struct net_device *dev)
5711 {
5712 struct mvpp2_port *port = netdev_priv(dev);
5713 struct mvpp2 *priv = port->priv;
5714 struct netdev_hw_addr *ha;
5715 int id = port->id;
5716 bool allmulti = dev->flags & IFF_ALLMULTI;
5717
5718 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
5719 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
5720 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
5721
5722 /* Remove all port->id's mcast enries */
5723 mvpp2_prs_mcast_del_all(priv, id);
5724
5725 if (allmulti && !netdev_mc_empty(dev)) {
5726 netdev_for_each_mc_addr(ha, dev)
5727 mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
5728 }
5729 }
5730
5731 static int mvpp2_set_mac_address(struct net_device *dev, void *p)
5732 {
5733 struct mvpp2_port *port = netdev_priv(dev);
5734 const struct sockaddr *addr = p;
5735 int err;
5736
5737 if (!is_valid_ether_addr(addr->sa_data)) {
5738 err = -EADDRNOTAVAIL;
5739 goto error;
5740 }
5741
5742 if (!netif_running(dev)) {
5743 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
5744 if (!err)
5745 return 0;
5746 /* Reconfigure parser to accept the original MAC address */
5747 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
5748 if (err)
5749 goto error;
5750 }
5751
5752 mvpp2_stop_dev(port);
5753
5754 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
5755 if (!err)
5756 goto out_start;
5757
5758 /* Reconfigure parser accept the original MAC address */
5759 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
5760 if (err)
5761 goto error;
5762 out_start:
5763 mvpp2_start_dev(port);
5764 mvpp2_egress_enable(port);
5765 mvpp2_ingress_enable(port);
5766 return 0;
5767
5768 error:
5769 netdev_err(dev, "fail to change MAC address\n");
5770 return err;
5771 }
5772
5773 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
5774 {
5775 struct mvpp2_port *port = netdev_priv(dev);
5776 int err;
5777
5778 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
5779 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
5780 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
5781 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
5782 }
5783
5784 if (!netif_running(dev)) {
5785 err = mvpp2_bm_update_mtu(dev, mtu);
5786 if (!err) {
5787 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
5788 return 0;
5789 }
5790
5791 /* Reconfigure BM to the original MTU */
5792 err = mvpp2_bm_update_mtu(dev, dev->mtu);
5793 if (err)
5794 goto error;
5795 }
5796
5797 mvpp2_stop_dev(port);
5798
5799 err = mvpp2_bm_update_mtu(dev, mtu);
5800 if (!err) {
5801 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
5802 goto out_start;
5803 }
5804
5805 /* Reconfigure BM to the original MTU */
5806 err = mvpp2_bm_update_mtu(dev, dev->mtu);
5807 if (err)
5808 goto error;
5809
5810 out_start:
5811 mvpp2_start_dev(port);
5812 mvpp2_egress_enable(port);
5813 mvpp2_ingress_enable(port);
5814
5815 return 0;
5816
5817 error:
5818 netdev_err(dev, "fail to change MTU\n");
5819 return err;
5820 }
5821
5822 static void
5823 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5824 {
5825 struct mvpp2_port *port = netdev_priv(dev);
5826 unsigned int start;
5827 int cpu;
5828
5829 for_each_possible_cpu(cpu) {
5830 struct mvpp2_pcpu_stats *cpu_stats;
5831 u64 rx_packets;
5832 u64 rx_bytes;
5833 u64 tx_packets;
5834 u64 tx_bytes;
5835
5836 cpu_stats = per_cpu_ptr(port->stats, cpu);
5837 do {
5838 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
5839 rx_packets = cpu_stats->rx_packets;
5840 rx_bytes = cpu_stats->rx_bytes;
5841 tx_packets = cpu_stats->tx_packets;
5842 tx_bytes = cpu_stats->tx_bytes;
5843 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
5844
5845 stats->rx_packets += rx_packets;
5846 stats->rx_bytes += rx_bytes;
5847 stats->tx_packets += tx_packets;
5848 stats->tx_bytes += tx_bytes;
5849 }
5850
5851 stats->rx_errors = dev->stats.rx_errors;
5852 stats->rx_dropped = dev->stats.rx_dropped;
5853 stats->tx_dropped = dev->stats.tx_dropped;
5854 }
5855
5856 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5857 {
5858 int ret;
5859
5860 if (!dev->phydev)
5861 return -ENOTSUPP;
5862
5863 ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
5864 if (!ret)
5865 mvpp2_link_event(dev);
5866
5867 return ret;
5868 }
5869
5870 /* Ethtool methods */
5871
5872 /* Set interrupt coalescing for ethtools */
5873 static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
5874 struct ethtool_coalesce *c)
5875 {
5876 struct mvpp2_port *port = netdev_priv(dev);
5877 int queue;
5878
5879 for (queue = 0; queue < rxq_number; queue++) {
5880 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
5881
5882 rxq->time_coal = c->rx_coalesce_usecs;
5883 rxq->pkts_coal = c->rx_max_coalesced_frames;
5884 mvpp2_rx_pkts_coal_set(port, rxq);
5885 mvpp2_rx_time_coal_set(port, rxq);
5886 }
5887
5888 for (queue = 0; queue < txq_number; queue++) {
5889 struct mvpp2_tx_queue *txq = port->txqs[queue];
5890
5891 txq->done_pkts_coal = c->tx_max_coalesced_frames;
5892 }
5893
5894 return 0;
5895 }
5896
5897 /* get coalescing for ethtools */
5898 static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
5899 struct ethtool_coalesce *c)
5900 {
5901 struct mvpp2_port *port = netdev_priv(dev);
5902
5903 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
5904 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
5905 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
5906 return 0;
5907 }
5908
5909 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
5910 struct ethtool_drvinfo *drvinfo)
5911 {
5912 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
5913 sizeof(drvinfo->driver));
5914 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
5915 sizeof(drvinfo->version));
5916 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
5917 sizeof(drvinfo->bus_info));
5918 }
5919
5920 static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
5921 struct ethtool_ringparam *ring)
5922 {
5923 struct mvpp2_port *port = netdev_priv(dev);
5924
5925 ring->rx_max_pending = MVPP2_MAX_RXD;
5926 ring->tx_max_pending = MVPP2_MAX_TXD;
5927 ring->rx_pending = port->rx_ring_size;
5928 ring->tx_pending = port->tx_ring_size;
5929 }
5930
5931 static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
5932 struct ethtool_ringparam *ring)
5933 {
5934 struct mvpp2_port *port = netdev_priv(dev);
5935 u16 prev_rx_ring_size = port->rx_ring_size;
5936 u16 prev_tx_ring_size = port->tx_ring_size;
5937 int err;
5938
5939 err = mvpp2_check_ringparam_valid(dev, ring);
5940 if (err)
5941 return err;
5942
5943 if (!netif_running(dev)) {
5944 port->rx_ring_size = ring->rx_pending;
5945 port->tx_ring_size = ring->tx_pending;
5946 return 0;
5947 }
5948
5949 /* The interface is running, so we have to force a
5950 * reallocation of the queues
5951 */
5952 mvpp2_stop_dev(port);
5953 mvpp2_cleanup_rxqs(port);
5954 mvpp2_cleanup_txqs(port);
5955
5956 port->rx_ring_size = ring->rx_pending;
5957 port->tx_ring_size = ring->tx_pending;
5958
5959 err = mvpp2_setup_rxqs(port);
5960 if (err) {
5961 /* Reallocate Rx queues with the original ring size */
5962 port->rx_ring_size = prev_rx_ring_size;
5963 ring->rx_pending = prev_rx_ring_size;
5964 err = mvpp2_setup_rxqs(port);
5965 if (err)
5966 goto err_out;
5967 }
5968 err = mvpp2_setup_txqs(port);
5969 if (err) {
5970 /* Reallocate Tx queues with the original ring size */
5971 port->tx_ring_size = prev_tx_ring_size;
5972 ring->tx_pending = prev_tx_ring_size;
5973 err = mvpp2_setup_txqs(port);
5974 if (err)
5975 goto err_clean_rxqs;
5976 }
5977
5978 mvpp2_start_dev(port);
5979 mvpp2_egress_enable(port);
5980 mvpp2_ingress_enable(port);
5981
5982 return 0;
5983
5984 err_clean_rxqs:
5985 mvpp2_cleanup_rxqs(port);
5986 err_out:
5987 netdev_err(dev, "fail to change ring parameters");
5988 return err;
5989 }
5990
5991 /* Device ops */
5992
5993 static const struct net_device_ops mvpp2_netdev_ops = {
5994 .ndo_open = mvpp2_open,
5995 .ndo_stop = mvpp2_stop,
5996 .ndo_start_xmit = mvpp2_tx,
5997 .ndo_set_rx_mode = mvpp2_set_rx_mode,
5998 .ndo_set_mac_address = mvpp2_set_mac_address,
5999 .ndo_change_mtu = mvpp2_change_mtu,
6000 .ndo_get_stats64 = mvpp2_get_stats64,
6001 .ndo_do_ioctl = mvpp2_ioctl,
6002 };
6003
6004 static const struct ethtool_ops mvpp2_eth_tool_ops = {
6005 .nway_reset = phy_ethtool_nway_reset,
6006 .get_link = ethtool_op_get_link,
6007 .set_coalesce = mvpp2_ethtool_set_coalesce,
6008 .get_coalesce = mvpp2_ethtool_get_coalesce,
6009 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
6010 .get_ringparam = mvpp2_ethtool_get_ringparam,
6011 .set_ringparam = mvpp2_ethtool_set_ringparam,
6012 .get_link_ksettings = phy_ethtool_get_link_ksettings,
6013 .set_link_ksettings = phy_ethtool_set_link_ksettings,
6014 };
6015
6016 /* Driver initialization */
6017
6018 static void mvpp2_port_power_up(struct mvpp2_port *port)
6019 {
6020 mvpp2_port_mii_set(port);
6021 mvpp2_port_periodic_xon_disable(port);
6022 mvpp2_port_fc_adv_enable(port);
6023 mvpp2_port_reset(port);
6024 }
6025
6026 /* Initialize port HW */
6027 static int mvpp2_port_init(struct mvpp2_port *port)
6028 {
6029 struct device *dev = port->dev->dev.parent;
6030 struct mvpp2 *priv = port->priv;
6031 struct mvpp2_txq_pcpu *txq_pcpu;
6032 int queue, cpu, err;
6033
6034 if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
6035 return -EINVAL;
6036
6037 /* Disable port */
6038 mvpp2_egress_disable(port);
6039 mvpp2_port_disable(port);
6040
6041 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
6042 GFP_KERNEL);
6043 if (!port->txqs)
6044 return -ENOMEM;
6045
6046 /* Associate physical Tx queues to this port and initialize.
6047 * The mapping is predefined.
6048 */
6049 for (queue = 0; queue < txq_number; queue++) {
6050 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
6051 struct mvpp2_tx_queue *txq;
6052
6053 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
6054 if (!txq) {
6055 err = -ENOMEM;
6056 goto err_free_percpu;
6057 }
6058
6059 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
6060 if (!txq->pcpu) {
6061 err = -ENOMEM;
6062 goto err_free_percpu;
6063 }
6064
6065 txq->id = queue_phy_id;
6066 txq->log_id = queue;
6067 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
6068 for_each_present_cpu(cpu) {
6069 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6070 txq_pcpu->cpu = cpu;
6071 }
6072
6073 port->txqs[queue] = txq;
6074 }
6075
6076 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
6077 GFP_KERNEL);
6078 if (!port->rxqs) {
6079 err = -ENOMEM;
6080 goto err_free_percpu;
6081 }
6082
6083 /* Allocate and initialize Rx queue for this port */
6084 for (queue = 0; queue < rxq_number; queue++) {
6085 struct mvpp2_rx_queue *rxq;
6086
6087 /* Map physical Rx queue to port's logical Rx queue */
6088 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
6089 if (!rxq) {
6090 err = -ENOMEM;
6091 goto err_free_percpu;
6092 }
6093 /* Map this Rx queue to a physical queue */
6094 rxq->id = port->first_rxq + queue;
6095 rxq->port = port->id;
6096 rxq->logic_rxq = queue;
6097
6098 port->rxqs[queue] = rxq;
6099 }
6100
6101 /* Configure Rx queue group interrupt for this port */
6102 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), rxq_number);
6103
6104 /* Create Rx descriptor rings */
6105 for (queue = 0; queue < rxq_number; queue++) {
6106 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
6107
6108 rxq->size = port->rx_ring_size;
6109 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
6110 rxq->time_coal = MVPP2_RX_COAL_USEC;
6111 }
6112
6113 mvpp2_ingress_disable(port);
6114
6115 /* Port default configuration */
6116 mvpp2_defaults_set(port);
6117
6118 /* Port's classifier configuration */
6119 mvpp2_cls_oversize_rxq_set(port);
6120 mvpp2_cls_port_config(port);
6121
6122 /* Provide an initial Rx packet size */
6123 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
6124
6125 /* Initialize pools for swf */
6126 err = mvpp2_swf_bm_pool_init(port);
6127 if (err)
6128 goto err_free_percpu;
6129
6130 return 0;
6131
6132 err_free_percpu:
6133 for (queue = 0; queue < txq_number; queue++) {
6134 if (!port->txqs[queue])
6135 continue;
6136 free_percpu(port->txqs[queue]->pcpu);
6137 }
6138 return err;
6139 }
6140
6141 /* Ports initialization */
6142 static int mvpp2_port_probe(struct platform_device *pdev,
6143 struct device_node *port_node,
6144 struct mvpp2 *priv,
6145 int *next_first_rxq)
6146 {
6147 struct device_node *phy_node;
6148 struct mvpp2_port *port;
6149 struct mvpp2_port_pcpu *port_pcpu;
6150 struct net_device *dev;
6151 struct resource *res;
6152 const char *dt_mac_addr;
6153 const char *mac_from;
6154 char hw_mac_addr[ETH_ALEN];
6155 u32 id;
6156 int features;
6157 int phy_mode;
6158 int priv_common_regs_num = 2;
6159 int err, i, cpu;
6160
6161 dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
6162 rxq_number);
6163 if (!dev)
6164 return -ENOMEM;
6165
6166 phy_node = of_parse_phandle(port_node, "phy", 0);
6167 if (!phy_node) {
6168 dev_err(&pdev->dev, "missing phy\n");
6169 err = -ENODEV;
6170 goto err_free_netdev;
6171 }
6172
6173 phy_mode = of_get_phy_mode(port_node);
6174 if (phy_mode < 0) {
6175 dev_err(&pdev->dev, "incorrect phy mode\n");
6176 err = phy_mode;
6177 goto err_free_netdev;
6178 }
6179
6180 if (of_property_read_u32(port_node, "port-id", &id)) {
6181 err = -EINVAL;
6182 dev_err(&pdev->dev, "missing port-id value\n");
6183 goto err_free_netdev;
6184 }
6185
6186 dev->tx_queue_len = MVPP2_MAX_TXD;
6187 dev->watchdog_timeo = 5 * HZ;
6188 dev->netdev_ops = &mvpp2_netdev_ops;
6189 dev->ethtool_ops = &mvpp2_eth_tool_ops;
6190
6191 port = netdev_priv(dev);
6192
6193 port->irq = irq_of_parse_and_map(port_node, 0);
6194 if (port->irq <= 0) {
6195 err = -EINVAL;
6196 goto err_free_netdev;
6197 }
6198
6199 if (of_property_read_bool(port_node, "marvell,loopback"))
6200 port->flags |= MVPP2_F_LOOPBACK;
6201
6202 port->priv = priv;
6203 port->id = id;
6204 port->first_rxq = *next_first_rxq;
6205 port->phy_node = phy_node;
6206 port->phy_interface = phy_mode;
6207
6208 res = platform_get_resource(pdev, IORESOURCE_MEM,
6209 priv_common_regs_num + id);
6210 port->base = devm_ioremap_resource(&pdev->dev, res);
6211 if (IS_ERR(port->base)) {
6212 err = PTR_ERR(port->base);
6213 goto err_free_irq;
6214 }
6215
6216 /* Alloc per-cpu stats */
6217 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
6218 if (!port->stats) {
6219 err = -ENOMEM;
6220 goto err_free_irq;
6221 }
6222
6223 dt_mac_addr = of_get_mac_address(port_node);
6224 if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
6225 mac_from = "device tree";
6226 ether_addr_copy(dev->dev_addr, dt_mac_addr);
6227 } else {
6228 mvpp2_get_mac_address(port, hw_mac_addr);
6229 if (is_valid_ether_addr(hw_mac_addr)) {
6230 mac_from = "hardware";
6231 ether_addr_copy(dev->dev_addr, hw_mac_addr);
6232 } else {
6233 mac_from = "random";
6234 eth_hw_addr_random(dev);
6235 }
6236 }
6237
6238 port->tx_ring_size = MVPP2_MAX_TXD;
6239 port->rx_ring_size = MVPP2_MAX_RXD;
6240 port->dev = dev;
6241 SET_NETDEV_DEV(dev, &pdev->dev);
6242
6243 err = mvpp2_port_init(port);
6244 if (err < 0) {
6245 dev_err(&pdev->dev, "failed to init port %d\n", id);
6246 goto err_free_stats;
6247 }
6248 mvpp2_port_power_up(port);
6249
6250 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6251 if (!port->pcpu) {
6252 err = -ENOMEM;
6253 goto err_free_txq_pcpu;
6254 }
6255
6256 for_each_present_cpu(cpu) {
6257 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
6258
6259 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6260 HRTIMER_MODE_REL_PINNED);
6261 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6262 port_pcpu->timer_scheduled = false;
6263
6264 tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
6265 (unsigned long)dev);
6266 }
6267
6268 netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
6269 features = NETIF_F_SG | NETIF_F_IP_CSUM;
6270 dev->features = features | NETIF_F_RXCSUM;
6271 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
6272 dev->vlan_features |= features;
6273
6274 /* MTU range: 68 - 9676 */
6275 dev->min_mtu = ETH_MIN_MTU;
6276 /* 9676 == 9700 - 20 and rounding to 8 */
6277 dev->max_mtu = 9676;
6278
6279 err = register_netdev(dev);
6280 if (err < 0) {
6281 dev_err(&pdev->dev, "failed to register netdev\n");
6282 goto err_free_port_pcpu;
6283 }
6284 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6285
6286 /* Increment the first Rx queue number to be used by the next port */
6287 *next_first_rxq += rxq_number;
6288 priv->port_list[id] = port;
6289 return 0;
6290
6291 err_free_port_pcpu:
6292 free_percpu(port->pcpu);
6293 err_free_txq_pcpu:
6294 for (i = 0; i < txq_number; i++)
6295 free_percpu(port->txqs[i]->pcpu);
6296 err_free_stats:
6297 free_percpu(port->stats);
6298 err_free_irq:
6299 irq_dispose_mapping(port->irq);
6300 err_free_netdev:
6301 of_node_put(phy_node);
6302 free_netdev(dev);
6303 return err;
6304 }
6305
6306 /* Ports removal routine */
6307 static void mvpp2_port_remove(struct mvpp2_port *port)
6308 {
6309 int i;
6310
6311 unregister_netdev(port->dev);
6312 of_node_put(port->phy_node);
6313 free_percpu(port->pcpu);
6314 free_percpu(port->stats);
6315 for (i = 0; i < txq_number; i++)
6316 free_percpu(port->txqs[i]->pcpu);
6317 irq_dispose_mapping(port->irq);
6318 free_netdev(port->dev);
6319 }
6320
6321 /* Initialize decoding windows */
6322 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
6323 struct mvpp2 *priv)
6324 {
6325 u32 win_enable;
6326 int i;
6327
6328 for (i = 0; i < 6; i++) {
6329 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
6330 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
6331
6332 if (i < 4)
6333 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
6334 }
6335
6336 win_enable = 0;
6337
6338 for (i = 0; i < dram->num_cs; i++) {
6339 const struct mbus_dram_window *cs = dram->cs + i;
6340
6341 mvpp2_write(priv, MVPP2_WIN_BASE(i),
6342 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
6343 dram->mbus_dram_target_id);
6344
6345 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
6346 (cs->size - 1) & 0xffff0000);
6347
6348 win_enable |= (1 << i);
6349 }
6350
6351 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
6352 }
6353
6354 /* Initialize Rx FIFO's */
6355 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
6356 {
6357 int port;
6358
6359 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
6360 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
6361 MVPP2_RX_FIFO_PORT_DATA_SIZE);
6362 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
6363 MVPP2_RX_FIFO_PORT_ATTR_SIZE);
6364 }
6365
6366 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
6367 MVPP2_RX_FIFO_PORT_MIN_PKT);
6368 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
6369 }
6370
6371 /* Initialize network controller common part HW */
6372 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
6373 {
6374 const struct mbus_dram_target_info *dram_target_info;
6375 int err, i;
6376 u32 val;
6377
6378 /* Checks for hardware constraints */
6379 if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) ||
6380 (txq_number > MVPP2_MAX_TXQ)) {
6381 dev_err(&pdev->dev, "invalid queue size parameter\n");
6382 return -EINVAL;
6383 }
6384
6385 /* MBUS windows configuration */
6386 dram_target_info = mv_mbus_dram_info();
6387 if (dram_target_info)
6388 mvpp2_conf_mbus_windows(dram_target_info, priv);
6389
6390 /* Disable HW PHY polling */
6391 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6392 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
6393 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6394
6395 /* Allocate and initialize aggregated TXQs */
6396 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
6397 sizeof(struct mvpp2_tx_queue),
6398 GFP_KERNEL);
6399 if (!priv->aggr_txqs)
6400 return -ENOMEM;
6401
6402 for_each_present_cpu(i) {
6403 priv->aggr_txqs[i].id = i;
6404 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
6405 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i],
6406 MVPP2_AGGR_TXQ_SIZE, i, priv);
6407 if (err < 0)
6408 return err;
6409 }
6410
6411 /* Rx Fifo Init */
6412 mvpp2_rx_fifo_init(priv);
6413
6414 /* Reset Rx queue group interrupt configuration */
6415 for (i = 0; i < MVPP2_MAX_PORTS; i++)
6416 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number);
6417
6418 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
6419 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
6420
6421 /* Allow cache snoop when transmiting packets */
6422 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
6423
6424 /* Buffer Manager initialization */
6425 err = mvpp2_bm_init(pdev, priv);
6426 if (err < 0)
6427 return err;
6428
6429 /* Parser default initialization */
6430 err = mvpp2_prs_default_init(pdev, priv);
6431 if (err < 0)
6432 return err;
6433
6434 /* Classifier default initialization */
6435 mvpp2_cls_init(priv);
6436
6437 return 0;
6438 }
6439
6440 static int mvpp2_probe(struct platform_device *pdev)
6441 {
6442 struct device_node *dn = pdev->dev.of_node;
6443 struct device_node *port_node;
6444 struct mvpp2 *priv;
6445 struct resource *res;
6446 int port_count, first_rxq;
6447 int err;
6448
6449 priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL);
6450 if (!priv)
6451 return -ENOMEM;
6452
6453 priv->hw_version =
6454 (unsigned long)of_device_get_match_data(&pdev->dev);
6455
6456 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
6457 priv->base = devm_ioremap_resource(&pdev->dev, res);
6458 if (IS_ERR(priv->base))
6459 return PTR_ERR(priv->base);
6460
6461 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
6462 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
6463 if (IS_ERR(priv->lms_base))
6464 return PTR_ERR(priv->lms_base);
6465
6466 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
6467 if (IS_ERR(priv->pp_clk))
6468 return PTR_ERR(priv->pp_clk);
6469 err = clk_prepare_enable(priv->pp_clk);
6470 if (err < 0)
6471 return err;
6472
6473 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
6474 if (IS_ERR(priv->gop_clk)) {
6475 err = PTR_ERR(priv->gop_clk);
6476 goto err_pp_clk;
6477 }
6478 err = clk_prepare_enable(priv->gop_clk);
6479 if (err < 0)
6480 goto err_pp_clk;
6481
6482 /* Get system's tclk rate */
6483 priv->tclk = clk_get_rate(priv->pp_clk);
6484
6485 /* Initialize network controller */
6486 err = mvpp2_init(pdev, priv);
6487 if (err < 0) {
6488 dev_err(&pdev->dev, "failed to initialize controller\n");
6489 goto err_gop_clk;
6490 }
6491
6492 port_count = of_get_available_child_count(dn);
6493 if (port_count == 0) {
6494 dev_err(&pdev->dev, "no ports enabled\n");
6495 err = -ENODEV;
6496 goto err_gop_clk;
6497 }
6498
6499 priv->port_list = devm_kcalloc(&pdev->dev, port_count,
6500 sizeof(struct mvpp2_port *),
6501 GFP_KERNEL);
6502 if (!priv->port_list) {
6503 err = -ENOMEM;
6504 goto err_gop_clk;
6505 }
6506
6507 /* Initialize ports */
6508 first_rxq = 0;
6509 for_each_available_child_of_node(dn, port_node) {
6510 err = mvpp2_port_probe(pdev, port_node, priv, &first_rxq);
6511 if (err < 0)
6512 goto err_gop_clk;
6513 }
6514
6515 platform_set_drvdata(pdev, priv);
6516 return 0;
6517
6518 err_gop_clk:
6519 clk_disable_unprepare(priv->gop_clk);
6520 err_pp_clk:
6521 clk_disable_unprepare(priv->pp_clk);
6522 return err;
6523 }
6524
6525 static int mvpp2_remove(struct platform_device *pdev)
6526 {
6527 struct mvpp2 *priv = platform_get_drvdata(pdev);
6528 struct device_node *dn = pdev->dev.of_node;
6529 struct device_node *port_node;
6530 int i = 0;
6531
6532 for_each_available_child_of_node(dn, port_node) {
6533 if (priv->port_list[i])
6534 mvpp2_port_remove(priv->port_list[i]);
6535 i++;
6536 }
6537
6538 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
6539 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
6540
6541 mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
6542 }
6543
6544 for_each_present_cpu(i) {
6545 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
6546
6547 dma_free_coherent(&pdev->dev,
6548 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
6549 aggr_txq->descs,
6550 aggr_txq->descs_dma);
6551 }
6552
6553 clk_disable_unprepare(priv->pp_clk);
6554 clk_disable_unprepare(priv->gop_clk);
6555
6556 return 0;
6557 }
6558
6559 static const struct of_device_id mvpp2_match[] = {
6560 {
6561 .compatible = "marvell,armada-375-pp2",
6562 .data = (void *)MVPP21,
6563 },
6564 { }
6565 };
6566 MODULE_DEVICE_TABLE(of, mvpp2_match);
6567
6568 static struct platform_driver mvpp2_driver = {
6569 .probe = mvpp2_probe,
6570 .remove = mvpp2_remove,
6571 .driver = {
6572 .name = MVPP2_DRIVER_NAME,
6573 .of_match_table = mvpp2_match,
6574 },
6575 };
6576
6577 module_platform_driver(mvpp2_driver);
6578
6579 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
6580 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
6581 MODULE_LICENSE("GPL v2");