]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/net/ethernet/marvell/mvpp2.c
net: mvpp2: remove useless arguments in mvpp2_rx_{pkts, time}_coal_set
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / marvell / mvpp2.c
CommitLineData
3f518509
MW
1/*
2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
3 *
4 * Copyright (C) 2014 Marvell
5 *
6 * Marcin Wojtas <mw@semihalf.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/platform_device.h>
17#include <linux/skbuff.h>
18#include <linux/inetdevice.h>
19#include <linux/mbus.h>
20#include <linux/module.h>
21#include <linux/interrupt.h>
22#include <linux/cpumask.h>
23#include <linux/of.h>
24#include <linux/of_irq.h>
25#include <linux/of_mdio.h>
26#include <linux/of_net.h>
27#include <linux/of_address.h>
28#include <linux/phy.h>
29#include <linux/clk.h>
edc660fa
MW
30#include <linux/hrtimer.h>
31#include <linux/ktime.h>
3f518509
MW
32#include <uapi/linux/ppp_defs.h>
33#include <net/ip.h>
34#include <net/ipv6.h>
35
36/* RX Fifo Registers */
37#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
38#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
39#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
40#define MVPP2_RX_FIFO_INIT_REG 0x64
41
42/* RX DMA Top Registers */
43#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
44#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
45#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
46#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
47#define MVPP2_POOL_BUF_SIZE_OFFSET 5
48#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
49#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
50#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
51#define MVPP2_RXQ_POOL_SHORT_OFFS 20
52#define MVPP2_RXQ_POOL_SHORT_MASK 0x700000
53#define MVPP2_RXQ_POOL_LONG_OFFS 24
54#define MVPP2_RXQ_POOL_LONG_MASK 0x7000000
55#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
56#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
57#define MVPP2_RXQ_DISABLE_MASK BIT(31)
58
59/* Parser Registers */
60#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
61#define MVPP2_PRS_PORT_LU_MAX 0xf
62#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
63#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
64#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
65#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
66#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
67#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
68#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
69#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
70#define MVPP2_PRS_TCAM_IDX_REG 0x1100
71#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
72#define MVPP2_PRS_TCAM_INV_MASK BIT(31)
73#define MVPP2_PRS_SRAM_IDX_REG 0x1200
74#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
75#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
76#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
77
78/* Classifier Registers */
79#define MVPP2_CLS_MODE_REG 0x1800
80#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
81#define MVPP2_CLS_PORT_WAY_REG 0x1810
82#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
83#define MVPP2_CLS_LKP_INDEX_REG 0x1814
84#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
85#define MVPP2_CLS_LKP_TBL_REG 0x1818
86#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
87#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
88#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
89#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
90#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
91#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
92#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
93#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
94#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
95#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
96#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
97#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
98
99/* Descriptor Manager Top Registers */
100#define MVPP2_RXQ_NUM_REG 0x2040
101#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
102#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
103#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
104#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
105#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
106#define MVPP2_RXQ_NUM_NEW_OFFSET 16
107#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
108#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
109#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
110#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
111#define MVPP2_RXQ_THRESH_REG 0x204c
112#define MVPP2_OCCUPIED_THRESH_OFFSET 0
113#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
114#define MVPP2_RXQ_INDEX_REG 0x2050
115#define MVPP2_TXQ_NUM_REG 0x2080
116#define MVPP2_TXQ_DESC_ADDR_REG 0x2084
117#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
118#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
119#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
120#define MVPP2_TXQ_THRESH_REG 0x2094
121#define MVPP2_TRANSMITTED_THRESH_OFFSET 16
122#define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
123#define MVPP2_TXQ_INDEX_REG 0x2098
124#define MVPP2_TXQ_PREF_BUF_REG 0x209c
125#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
126#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
127#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
128#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
129#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
130#define MVPP2_TXQ_PENDING_REG 0x20a0
131#define MVPP2_TXQ_PENDING_MASK 0x3fff
132#define MVPP2_TXQ_INT_STATUS_REG 0x20a4
133#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
134#define MVPP2_TRANSMITTED_COUNT_OFFSET 16
135#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
136#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
137#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
138#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
139#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
140#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
141#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
142#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
143#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
144#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
145#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
146#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
147#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
148
149/* MBUS bridge registers */
150#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
151#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
152#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
153#define MVPP2_BASE_ADDR_ENABLE 0x4060
154
155/* Interrupt Cause and Mask registers */
156#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
157#define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
158#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
159#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
160#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
161#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
162#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
163#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
164#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
165#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
166#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
167#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
168#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
169#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
170#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
171#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
172#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
173#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
174#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
175#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
176
177/* Buffer Manager registers */
178#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
179#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
180#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
181#define MVPP2_BM_POOL_SIZE_MASK 0xfff0
182#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
183#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
184#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
185#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
186#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
187#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
188#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
189#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
190#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
191#define MVPP2_BM_START_MASK BIT(0)
192#define MVPP2_BM_STOP_MASK BIT(1)
193#define MVPP2_BM_STATE_MASK BIT(4)
194#define MVPP2_BM_LOW_THRESH_OFFS 8
195#define MVPP2_BM_LOW_THRESH_MASK 0x7f00
196#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
197 MVPP2_BM_LOW_THRESH_OFFS)
198#define MVPP2_BM_HIGH_THRESH_OFFS 16
199#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
200#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
201 MVPP2_BM_HIGH_THRESH_OFFS)
202#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
203#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
204#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
205#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
206#define MVPP2_BM_BPPE_FULL_MASK BIT(3)
207#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
208#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
209#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
210#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
211#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
212#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
213#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
214#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
215#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
216#define MVPP2_BM_VIRT_RLS_REG 0x64c0
217#define MVPP2_BM_MC_RLS_REG 0x64c4
218#define MVPP2_BM_MC_ID_MASK 0xfff
219#define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
220
221/* TX Scheduler registers */
222#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
223#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
224#define MVPP2_TXP_SCHED_ENQ_MASK 0xff
225#define MVPP2_TXP_SCHED_DISQ_OFFSET 8
226#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
227#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
228#define MVPP2_TXP_SCHED_MTU_REG 0x801c
229#define MVPP2_TXP_MTU_MAX 0x7FFFF
230#define MVPP2_TXP_SCHED_REFILL_REG 0x8020
231#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
232#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
233#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
234#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
235#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
236#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
237#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
238#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
239#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
240#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
241#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
242#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
243#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
244
245/* TX general registers */
246#define MVPP2_TX_SNOOP_REG 0x8800
247#define MVPP2_TX_PORT_FLUSH_REG 0x8810
248#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
249
250/* LMS registers */
251#define MVPP2_SRC_ADDR_MIDDLE 0x24
252#define MVPP2_SRC_ADDR_HIGH 0x28
08a23755
MW
253#define MVPP2_PHY_AN_CFG0_REG 0x34
254#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
3f518509
MW
255#define MVPP2_MIB_COUNTERS_BASE(port) (0x1000 + ((port) >> 1) * \
256 0x400 + (port) * 0x400)
257#define MVPP2_MIB_LATE_COLLISION 0x7c
258#define MVPP2_ISR_SUM_MASK_REG 0x220c
259#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
260#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
261
262/* Per-port registers */
263#define MVPP2_GMAC_CTRL_0_REG 0x0
264#define MVPP2_GMAC_PORT_EN_MASK BIT(0)
265#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
266#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
267#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
268#define MVPP2_GMAC_CTRL_1_REG 0x4
b5c0a800 269#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
3f518509
MW
270#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
271#define MVPP2_GMAC_PCS_LB_EN_BIT 6
272#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
273#define MVPP2_GMAC_SA_LOW_OFFS 7
274#define MVPP2_GMAC_CTRL_2_REG 0x8
275#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
276#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
277#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
278#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
279#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
280#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
281#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
282#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
283#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
284#define MVPP2_GMAC_AN_SPEED_EN BIT(7)
08a23755 285#define MVPP2_GMAC_FC_ADV_EN BIT(9)
3f518509
MW
286#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
287#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
288#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
289#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
290#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
291#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
292 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
293
294#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
295
296/* Descriptor ring Macros */
297#define MVPP2_QUEUE_NEXT_DESC(q, index) \
298 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
299
300/* Various constants */
301
302/* Coalescing */
303#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
edc660fa 304#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
3f518509
MW
305#define MVPP2_RX_COAL_PKTS 32
306#define MVPP2_RX_COAL_USEC 100
307
308/* The two bytes Marvell header. Either contains a special value used
309 * by Marvell switches when a specific hardware mode is enabled (not
310 * supported by this driver) or is filled automatically by zeroes on
311 * the RX side. Those two bytes being at the front of the Ethernet
312 * header, they allow to have the IP header aligned on a 4 bytes
313 * boundary automatically: the hardware skips those two bytes on its
314 * own.
315 */
316#define MVPP2_MH_SIZE 2
317#define MVPP2_ETH_TYPE_LEN 2
318#define MVPP2_PPPOE_HDR_SIZE 8
319#define MVPP2_VLAN_TAG_LEN 4
320
321/* Lbtd 802.3 type */
322#define MVPP2_IP_LBDT_TYPE 0xfffa
323
3f518509
MW
324#define MVPP2_TX_CSUM_MAX_SIZE 9800
325
326/* Timeout constants */
327#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
328#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
329
330#define MVPP2_TX_MTU_MAX 0x7ffff
331
332/* Maximum number of T-CONTs of PON port */
333#define MVPP2_MAX_TCONT 16
334
335/* Maximum number of supported ports */
336#define MVPP2_MAX_PORTS 4
337
338/* Maximum number of TXQs used by single port */
339#define MVPP2_MAX_TXQ 8
340
341/* Maximum number of RXQs used by single port */
342#define MVPP2_MAX_RXQ 8
343
344/* Dfault number of RXQs in use */
345#define MVPP2_DEFAULT_RXQ 4
346
347/* Total number of RXQs available to all ports */
348#define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
349
350/* Max number of Rx descriptors */
351#define MVPP2_MAX_RXD 128
352
353/* Max number of Tx descriptors */
354#define MVPP2_MAX_TXD 1024
355
356/* Amount of Tx descriptors that can be reserved at once by CPU */
357#define MVPP2_CPU_DESC_CHUNK 64
358
359/* Max number of Tx descriptors in each aggregated queue */
360#define MVPP2_AGGR_TXQ_SIZE 256
361
362/* Descriptor aligned size */
363#define MVPP2_DESC_ALIGNED_SIZE 32
364
365/* Descriptor alignment mask */
366#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
367
368/* RX FIFO constants */
369#define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
370#define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
371#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
372
373/* RX buffer constants */
374#define MVPP2_SKB_SHINFO_SIZE \
375 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
376
377#define MVPP2_RX_PKT_SIZE(mtu) \
378 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
4a0a12d2 379 ETH_HLEN + ETH_FCS_LEN, cache_line_size())
3f518509
MW
380
381#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
382#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
383#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
384 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
385
386#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
387
388/* IPv6 max L3 address size */
389#define MVPP2_MAX_L3_ADDR_SIZE 16
390
391/* Port flags */
392#define MVPP2_F_LOOPBACK BIT(0)
393
394/* Marvell tag types */
395enum mvpp2_tag_type {
396 MVPP2_TAG_TYPE_NONE = 0,
397 MVPP2_TAG_TYPE_MH = 1,
398 MVPP2_TAG_TYPE_DSA = 2,
399 MVPP2_TAG_TYPE_EDSA = 3,
400 MVPP2_TAG_TYPE_VLAN = 4,
401 MVPP2_TAG_TYPE_LAST = 5
402};
403
404/* Parser constants */
405#define MVPP2_PRS_TCAM_SRAM_SIZE 256
406#define MVPP2_PRS_TCAM_WORDS 6
407#define MVPP2_PRS_SRAM_WORDS 4
408#define MVPP2_PRS_FLOW_ID_SIZE 64
409#define MVPP2_PRS_FLOW_ID_MASK 0x3f
410#define MVPP2_PRS_TCAM_ENTRY_INVALID 1
411#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
412#define MVPP2_PRS_IPV4_HEAD 0x40
413#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
414#define MVPP2_PRS_IPV4_MC 0xe0
415#define MVPP2_PRS_IPV4_MC_MASK 0xf0
416#define MVPP2_PRS_IPV4_BC_MASK 0xff
417#define MVPP2_PRS_IPV4_IHL 0x5
418#define MVPP2_PRS_IPV4_IHL_MASK 0xf
419#define MVPP2_PRS_IPV6_MC 0xff
420#define MVPP2_PRS_IPV6_MC_MASK 0xff
421#define MVPP2_PRS_IPV6_HOP_MASK 0xff
422#define MVPP2_PRS_TCAM_PROTO_MASK 0xff
423#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
424#define MVPP2_PRS_DBL_VLANS_MAX 100
425
426/* Tcam structure:
427 * - lookup ID - 4 bits
428 * - port ID - 1 byte
429 * - additional information - 1 byte
430 * - header data - 8 bytes
431 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
432 */
433#define MVPP2_PRS_AI_BITS 8
434#define MVPP2_PRS_PORT_MASK 0xff
435#define MVPP2_PRS_LU_MASK 0xf
436#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
437 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
438#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
439 (((offs) * 2) - ((offs) % 2) + 2)
440#define MVPP2_PRS_TCAM_AI_BYTE 16
441#define MVPP2_PRS_TCAM_PORT_BYTE 17
442#define MVPP2_PRS_TCAM_LU_BYTE 20
443#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
444#define MVPP2_PRS_TCAM_INV_WORD 5
445/* Tcam entries ID */
446#define MVPP2_PE_DROP_ALL 0
447#define MVPP2_PE_FIRST_FREE_TID 1
448#define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
449#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
450#define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
451#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
452#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
453#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
454#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
455#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
456#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
457#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
458#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
459#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
460#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
461#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
462#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
463#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
464#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
465#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
466#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
467#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
468#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
469#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
470#define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
471#define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
472#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
473
474/* Sram structure
475 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
476 */
477#define MVPP2_PRS_SRAM_RI_OFFS 0
478#define MVPP2_PRS_SRAM_RI_WORD 0
479#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
480#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
481#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
482#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
483#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
484#define MVPP2_PRS_SRAM_UDF_OFFS 73
485#define MVPP2_PRS_SRAM_UDF_BITS 8
486#define MVPP2_PRS_SRAM_UDF_MASK 0xff
487#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
488#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
489#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
490#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
491#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
492#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
493#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
494#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
495#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
496#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
497#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
498#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
499#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
500#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
501#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
502#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
503#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
504#define MVPP2_PRS_SRAM_AI_OFFS 90
505#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
506#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
507#define MVPP2_PRS_SRAM_AI_MASK 0xff
508#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
509#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
510#define MVPP2_PRS_SRAM_LU_DONE_BIT 110
511#define MVPP2_PRS_SRAM_LU_GEN_BIT 111
512
513/* Sram result info bits assignment */
514#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
515#define MVPP2_PRS_RI_DSA_MASK 0x2
516#define MVPP2_PRS_RI_VLAN_MASK 0xc
517#define MVPP2_PRS_RI_VLAN_NONE ~(BIT(2) | BIT(3))
518#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
519#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
520#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
521#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
522#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
523#define MVPP2_PRS_RI_L2_CAST_MASK 0x600
524#define MVPP2_PRS_RI_L2_UCAST ~(BIT(9) | BIT(10))
525#define MVPP2_PRS_RI_L2_MCAST BIT(9)
526#define MVPP2_PRS_RI_L2_BCAST BIT(10)
527#define MVPP2_PRS_RI_PPPOE_MASK 0x800
528#define MVPP2_PRS_RI_L3_PROTO_MASK 0x7000
529#define MVPP2_PRS_RI_L3_UN ~(BIT(12) | BIT(13) | BIT(14))
530#define MVPP2_PRS_RI_L3_IP4 BIT(12)
531#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
532#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
533#define MVPP2_PRS_RI_L3_IP6 BIT(14)
534#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
535#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
536#define MVPP2_PRS_RI_L3_ADDR_MASK 0x18000
537#define MVPP2_PRS_RI_L3_UCAST ~(BIT(15) | BIT(16))
538#define MVPP2_PRS_RI_L3_MCAST BIT(15)
539#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
540#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
541#define MVPP2_PRS_RI_UDF3_MASK 0x300000
542#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
543#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
544#define MVPP2_PRS_RI_L4_TCP BIT(22)
545#define MVPP2_PRS_RI_L4_UDP BIT(23)
546#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
547#define MVPP2_PRS_RI_UDF7_MASK 0x60000000
548#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
549#define MVPP2_PRS_RI_DROP_MASK 0x80000000
550
551/* Sram additional info bits assignment */
552#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
553#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
554#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
555#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
556#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
557#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
558#define MVPP2_PRS_SINGLE_VLAN_AI 0
559#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
560
561/* DSA/EDSA type */
562#define MVPP2_PRS_TAGGED true
563#define MVPP2_PRS_UNTAGGED false
564#define MVPP2_PRS_EDSA true
565#define MVPP2_PRS_DSA false
566
567/* MAC entries, shadow udf */
568enum mvpp2_prs_udf {
569 MVPP2_PRS_UDF_MAC_DEF,
570 MVPP2_PRS_UDF_MAC_RANGE,
571 MVPP2_PRS_UDF_L2_DEF,
572 MVPP2_PRS_UDF_L2_DEF_COPY,
573 MVPP2_PRS_UDF_L2_USER,
574};
575
576/* Lookup ID */
577enum mvpp2_prs_lookup {
578 MVPP2_PRS_LU_MH,
579 MVPP2_PRS_LU_MAC,
580 MVPP2_PRS_LU_DSA,
581 MVPP2_PRS_LU_VLAN,
582 MVPP2_PRS_LU_L2,
583 MVPP2_PRS_LU_PPPOE,
584 MVPP2_PRS_LU_IP4,
585 MVPP2_PRS_LU_IP6,
586 MVPP2_PRS_LU_FLOWS,
587 MVPP2_PRS_LU_LAST,
588};
589
590/* L3 cast enum */
591enum mvpp2_prs_l3_cast {
592 MVPP2_PRS_L3_UNI_CAST,
593 MVPP2_PRS_L3_MULTI_CAST,
594 MVPP2_PRS_L3_BROAD_CAST
595};
596
597/* Classifier constants */
598#define MVPP2_CLS_FLOWS_TBL_SIZE 512
599#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
600#define MVPP2_CLS_LKP_TBL_SIZE 64
601
602/* BM constants */
603#define MVPP2_BM_POOLS_NUM 8
604#define MVPP2_BM_LONG_BUF_NUM 1024
605#define MVPP2_BM_SHORT_BUF_NUM 2048
606#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
607#define MVPP2_BM_POOL_PTR_ALIGN 128
608#define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
609#define MVPP2_BM_SWF_SHORT_POOL 3
610
611/* BM cookie (32 bits) definition */
612#define MVPP2_BM_COOKIE_POOL_OFFS 8
613#define MVPP2_BM_COOKIE_CPU_OFFS 24
614
615/* BM short pool packet size
616 * These value assure that for SWF the total number
617 * of bytes allocated for each buffer will be 512
618 */
619#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
620
621enum mvpp2_bm_type {
622 MVPP2_BM_FREE,
623 MVPP2_BM_SWF_LONG,
624 MVPP2_BM_SWF_SHORT
625};
626
627/* Definitions */
628
629/* Shared Packet Processor resources */
630struct mvpp2 {
631 /* Shared registers' base addresses */
632 void __iomem *base;
633 void __iomem *lms_base;
634
635 /* Common clocks */
636 struct clk *pp_clk;
637 struct clk *gop_clk;
638
639 /* List of pointers to port structures */
640 struct mvpp2_port **port_list;
641
642 /* Aggregated TXQs */
643 struct mvpp2_tx_queue *aggr_txqs;
644
645 /* BM pools */
646 struct mvpp2_bm_pool *bm_pools;
647
648 /* PRS shadow table */
649 struct mvpp2_prs_shadow *prs_shadow;
650 /* PRS auxiliary table for double vlan entries control */
651 bool *prs_double_vlans;
652
653 /* Tclk value */
654 u32 tclk;
655};
656
657struct mvpp2_pcpu_stats {
658 struct u64_stats_sync syncp;
659 u64 rx_packets;
660 u64 rx_bytes;
661 u64 tx_packets;
662 u64 tx_bytes;
663};
664
edc660fa
MW
665/* Per-CPU port control */
666struct mvpp2_port_pcpu {
667 struct hrtimer tx_done_timer;
668 bool timer_scheduled;
669 /* Tasklet for egress finalization */
670 struct tasklet_struct tx_done_tasklet;
671};
672
3f518509
MW
673struct mvpp2_port {
674 u8 id;
675
676 int irq;
677
678 struct mvpp2 *priv;
679
680 /* Per-port registers' base address */
681 void __iomem *base;
682
683 struct mvpp2_rx_queue **rxqs;
684 struct mvpp2_tx_queue **txqs;
685 struct net_device *dev;
686
687 int pkt_size;
688
689 u32 pending_cause_rx;
690 struct napi_struct napi;
691
edc660fa
MW
692 /* Per-CPU port control */
693 struct mvpp2_port_pcpu __percpu *pcpu;
694
3f518509
MW
695 /* Flags */
696 unsigned long flags;
697
698 u16 tx_ring_size;
699 u16 rx_ring_size;
700 struct mvpp2_pcpu_stats __percpu *stats;
701
3f518509
MW
702 phy_interface_t phy_interface;
703 struct device_node *phy_node;
704 unsigned int link;
705 unsigned int duplex;
706 unsigned int speed;
707
708 struct mvpp2_bm_pool *pool_long;
709 struct mvpp2_bm_pool *pool_short;
710
711 /* Index of first port's physical RXQ */
712 u8 first_rxq;
713};
714
715/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
716 * layout of the transmit and reception DMA descriptors, and their
717 * layout is therefore defined by the hardware design
718 */
719
720#define MVPP2_TXD_L3_OFF_SHIFT 0
721#define MVPP2_TXD_IP_HLEN_SHIFT 8
722#define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
723#define MVPP2_TXD_L4_CSUM_NOT BIT(14)
724#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
725#define MVPP2_TXD_PADDING_DISABLE BIT(23)
726#define MVPP2_TXD_L4_UDP BIT(24)
727#define MVPP2_TXD_L3_IP6 BIT(26)
728#define MVPP2_TXD_L_DESC BIT(28)
729#define MVPP2_TXD_F_DESC BIT(29)
730
731#define MVPP2_RXD_ERR_SUMMARY BIT(15)
732#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
733#define MVPP2_RXD_ERR_CRC 0x0
734#define MVPP2_RXD_ERR_OVERRUN BIT(13)
735#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
736#define MVPP2_RXD_BM_POOL_ID_OFFS 16
737#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
738#define MVPP2_RXD_HWF_SYNC BIT(21)
739#define MVPP2_RXD_L4_CSUM_OK BIT(22)
740#define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
741#define MVPP2_RXD_L4_TCP BIT(25)
742#define MVPP2_RXD_L4_UDP BIT(26)
743#define MVPP2_RXD_L3_IP4 BIT(28)
744#define MVPP2_RXD_L3_IP6 BIT(30)
745#define MVPP2_RXD_BUF_HDR BIT(31)
746
747struct mvpp2_tx_desc {
748 u32 command; /* Options used by HW for packet transmitting.*/
749 u8 packet_offset; /* the offset from the buffer beginning */
750 u8 phys_txq; /* destination queue ID */
751 u16 data_size; /* data size of transmitted packet in bytes */
752 u32 buf_phys_addr; /* physical addr of transmitted buffer */
753 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
754 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
755 u32 reserved2; /* reserved (for future use) */
756};
757
758struct mvpp2_rx_desc {
759 u32 status; /* info about received packet */
760 u16 reserved1; /* parser_info (for future use, PnC) */
761 u16 data_size; /* size of received packet in bytes */
762 u32 buf_phys_addr; /* physical address of the buffer */
763 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
764 u16 reserved2; /* gem_port_id (for future use, PON) */
765 u16 reserved3; /* csum_l4 (for future use, PnC) */
766 u8 reserved4; /* bm_qset (for future use, BM) */
767 u8 reserved5;
768 u16 reserved6; /* classify_info (for future use, PnC) */
769 u32 reserved7; /* flow_id (for future use, PnC) */
770 u32 reserved8;
771};
772
8354491c
TP
773struct mvpp2_txq_pcpu_buf {
774 /* Transmitted SKB */
775 struct sk_buff *skb;
776
777 /* Physical address of transmitted buffer */
778 dma_addr_t phys;
779
780 /* Size transmitted */
781 size_t size;
782};
783
3f518509
MW
784/* Per-CPU Tx queue control */
785struct mvpp2_txq_pcpu {
786 int cpu;
787
788 /* Number of Tx DMA descriptors in the descriptor ring */
789 int size;
790
791 /* Number of currently used Tx DMA descriptor in the
792 * descriptor ring
793 */
794 int count;
795
796 /* Number of Tx DMA descriptors reserved for each CPU */
797 int reserved_num;
798
8354491c
TP
799 /* Infos about transmitted buffers */
800 struct mvpp2_txq_pcpu_buf *buffs;
71ce391d 801
3f518509
MW
802 /* Index of last TX DMA descriptor that was inserted */
803 int txq_put_index;
804
805 /* Index of the TX DMA descriptor to be cleaned up */
806 int txq_get_index;
807};
808
809struct mvpp2_tx_queue {
810 /* Physical number of this Tx queue */
811 u8 id;
812
813 /* Logical number of this Tx queue */
814 u8 log_id;
815
816 /* Number of Tx DMA descriptors in the descriptor ring */
817 int size;
818
819 /* Number of currently used Tx DMA descriptor in the descriptor ring */
820 int count;
821
822 /* Per-CPU control of physical Tx queues */
823 struct mvpp2_txq_pcpu __percpu *pcpu;
824
825 /* Array of transmitted skb */
826 struct sk_buff **tx_skb;
827
828 u32 done_pkts_coal;
829
830 /* Virtual address of thex Tx DMA descriptors array */
831 struct mvpp2_tx_desc *descs;
832
833 /* DMA address of the Tx DMA descriptors array */
834 dma_addr_t descs_phys;
835
836 /* Index of the last Tx DMA descriptor */
837 int last_desc;
838
839 /* Index of the next Tx DMA descriptor to process */
840 int next_desc_to_proc;
841};
842
843struct mvpp2_rx_queue {
844 /* RX queue number, in the range 0-31 for physical RXQs */
845 u8 id;
846
847 /* Num of rx descriptors in the rx descriptor ring */
848 int size;
849
850 u32 pkts_coal;
851 u32 time_coal;
852
853 /* Virtual address of the RX DMA descriptors array */
854 struct mvpp2_rx_desc *descs;
855
856 /* DMA address of the RX DMA descriptors array */
857 dma_addr_t descs_phys;
858
859 /* Index of the last RX DMA descriptor */
860 int last_desc;
861
862 /* Index of the next RX DMA descriptor to process */
863 int next_desc_to_proc;
864
865 /* ID of port to which physical RXQ is mapped */
866 int port;
867
868 /* Port's logic RXQ number to which physical RXQ is mapped */
869 int logic_rxq;
870};
871
872union mvpp2_prs_tcam_entry {
873 u32 word[MVPP2_PRS_TCAM_WORDS];
874 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
875};
876
877union mvpp2_prs_sram_entry {
878 u32 word[MVPP2_PRS_SRAM_WORDS];
879 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
880};
881
882struct mvpp2_prs_entry {
883 u32 index;
884 union mvpp2_prs_tcam_entry tcam;
885 union mvpp2_prs_sram_entry sram;
886};
887
888struct mvpp2_prs_shadow {
889 bool valid;
890 bool finish;
891
892 /* Lookup ID */
893 int lu;
894
895 /* User defined offset */
896 int udf;
897
898 /* Result info */
899 u32 ri;
900 u32 ri_mask;
901};
902
903struct mvpp2_cls_flow_entry {
904 u32 index;
905 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
906};
907
908struct mvpp2_cls_lookup_entry {
909 u32 lkpid;
910 u32 way;
911 u32 data;
912};
913
914struct mvpp2_bm_pool {
915 /* Pool number in the range 0-7 */
916 int id;
917 enum mvpp2_bm_type type;
918
919 /* Buffer Pointers Pool External (BPPE) size */
920 int size;
921 /* Number of buffers for this pool */
922 int buf_num;
923 /* Pool buffer size */
924 int buf_size;
925 /* Packet size */
926 int pkt_size;
927
928 /* BPPE virtual base address */
929 u32 *virt_addr;
930 /* BPPE physical base address */
931 dma_addr_t phys_addr;
932
933 /* Ports using BM pool */
934 u32 port_map;
935
936 /* Occupied buffers indicator */
937 atomic_t in_use;
938 int in_use_thresh;
3f518509
MW
939};
940
941struct mvpp2_buff_hdr {
942 u32 next_buff_phys_addr;
943 u32 next_buff_virt_addr;
944 u16 byte_count;
945 u16 info;
946 u8 reserved1; /* bm_qset (for future use, BM) */
947};
948
949/* Buffer header info bits */
950#define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
951#define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
952#define MVPP2_B_HDR_INFO_LAST_OFFS 12
953#define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
954#define MVPP2_B_HDR_INFO_IS_LAST(info) \
955 ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
956
957/* Static declaractions */
958
959/* Number of RXQs used by single port */
960static int rxq_number = MVPP2_DEFAULT_RXQ;
961/* Number of TXQs used by single port */
962static int txq_number = MVPP2_MAX_TXQ;
963
964#define MVPP2_DRIVER_NAME "mvpp2"
965#define MVPP2_DRIVER_VERSION "1.0"
966
967/* Utility/helper methods */
968
969static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
970{
971 writel(data, priv->base + offset);
972}
973
974static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
975{
976 return readl(priv->base + offset);
977}
978
979static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
980{
981 txq_pcpu->txq_get_index++;
982 if (txq_pcpu->txq_get_index == txq_pcpu->size)
983 txq_pcpu->txq_get_index = 0;
984}
985
986static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
71ce391d
MW
987 struct sk_buff *skb,
988 struct mvpp2_tx_desc *tx_desc)
3f518509 989{
8354491c
TP
990 struct mvpp2_txq_pcpu_buf *tx_buf =
991 txq_pcpu->buffs + txq_pcpu->txq_put_index;
992 tx_buf->skb = skb;
993 tx_buf->size = tx_desc->data_size;
239a3b66 994 tx_buf->phys = tx_desc->buf_phys_addr + tx_desc->packet_offset;
3f518509
MW
995 txq_pcpu->txq_put_index++;
996 if (txq_pcpu->txq_put_index == txq_pcpu->size)
997 txq_pcpu->txq_put_index = 0;
998}
999
1000/* Get number of physical egress port */
1001static inline int mvpp2_egress_port(struct mvpp2_port *port)
1002{
1003 return MVPP2_MAX_TCONT + port->id;
1004}
1005
1006/* Get number of physical TXQ */
1007static inline int mvpp2_txq_phys(int port, int txq)
1008{
1009 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1010}
1011
1012/* Parser configuration routines */
1013
1014/* Update parser tcam and sram hw entries */
1015static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1016{
1017 int i;
1018
1019 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1020 return -EINVAL;
1021
1022 /* Clear entry invalidation bit */
1023 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1024
1025 /* Write tcam index - indirect access */
1026 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1027 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1028 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1029
1030 /* Write sram index - indirect access */
1031 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1032 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1033 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1034
1035 return 0;
1036}
1037
1038/* Read tcam entry from hw */
1039static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1040{
1041 int i;
1042
1043 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1044 return -EINVAL;
1045
1046 /* Write tcam index - indirect access */
1047 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1048
1049 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1050 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1051 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1052 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1053
1054 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1055 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1056
1057 /* Write sram index - indirect access */
1058 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1059 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1060 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1061
1062 return 0;
1063}
1064
1065/* Invalidate tcam hw entry */
1066static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1067{
1068 /* Write index - indirect access */
1069 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1070 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1071 MVPP2_PRS_TCAM_INV_MASK);
1072}
1073
1074/* Enable shadow table entry and set its lookup ID */
1075static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1076{
1077 priv->prs_shadow[index].valid = true;
1078 priv->prs_shadow[index].lu = lu;
1079}
1080
1081/* Update ri fields in shadow table entry */
1082static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1083 unsigned int ri, unsigned int ri_mask)
1084{
1085 priv->prs_shadow[index].ri_mask = ri_mask;
1086 priv->prs_shadow[index].ri = ri;
1087}
1088
1089/* Update lookup field in tcam sw entry */
1090static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1091{
1092 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1093
1094 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1095 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1096}
1097
1098/* Update mask for single port in tcam sw entry */
1099static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1100 unsigned int port, bool add)
1101{
1102 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1103
1104 if (add)
1105 pe->tcam.byte[enable_off] &= ~(1 << port);
1106 else
1107 pe->tcam.byte[enable_off] |= 1 << port;
1108}
1109
1110/* Update port map in tcam sw entry */
1111static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1112 unsigned int ports)
1113{
1114 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1115 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1116
1117 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1118 pe->tcam.byte[enable_off] &= ~port_mask;
1119 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1120}
1121
1122/* Obtain port map from tcam sw entry */
1123static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1124{
1125 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1126
1127 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1128}
1129
1130/* Set byte of data and its enable bits in tcam sw entry */
1131static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1132 unsigned int offs, unsigned char byte,
1133 unsigned char enable)
1134{
1135 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1136 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1137}
1138
1139/* Get byte of data and its enable bits from tcam sw entry */
1140static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1141 unsigned int offs, unsigned char *byte,
1142 unsigned char *enable)
1143{
1144 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1145 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1146}
1147
1148/* Compare tcam data bytes with a pattern */
1149static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1150 u16 data)
1151{
1152 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1153 u16 tcam_data;
1154
1155 tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
1156 if (tcam_data != data)
1157 return false;
1158 return true;
1159}
1160
1161/* Update ai bits in tcam sw entry */
1162static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1163 unsigned int bits, unsigned int enable)
1164{
1165 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1166
1167 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1168
1169 if (!(enable & BIT(i)))
1170 continue;
1171
1172 if (bits & BIT(i))
1173 pe->tcam.byte[ai_idx] |= 1 << i;
1174 else
1175 pe->tcam.byte[ai_idx] &= ~(1 << i);
1176 }
1177
1178 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1179}
1180
1181/* Get ai bits from tcam sw entry */
1182static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1183{
1184 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1185}
1186
1187/* Set ethertype in tcam sw entry */
1188static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1189 unsigned short ethertype)
1190{
1191 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1192 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1193}
1194
1195/* Set bits in sram sw entry */
1196static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1197 int val)
1198{
1199 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1200}
1201
1202/* Clear bits in sram sw entry */
1203static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1204 int val)
1205{
1206 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1207}
1208
1209/* Update ri bits in sram sw entry */
1210static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1211 unsigned int bits, unsigned int mask)
1212{
1213 unsigned int i;
1214
1215 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1216 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1217
1218 if (!(mask & BIT(i)))
1219 continue;
1220
1221 if (bits & BIT(i))
1222 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1223 else
1224 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1225
1226 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1227 }
1228}
1229
1230/* Obtain ri bits from sram sw entry */
1231static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1232{
1233 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1234}
1235
1236/* Update ai bits in sram sw entry */
1237static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1238 unsigned int bits, unsigned int mask)
1239{
1240 unsigned int i;
1241 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1242
1243 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1244
1245 if (!(mask & BIT(i)))
1246 continue;
1247
1248 if (bits & BIT(i))
1249 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1250 else
1251 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1252
1253 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1254 }
1255}
1256
1257/* Read ai bits from sram sw entry */
1258static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1259{
1260 u8 bits;
1261 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1262 int ai_en_off = ai_off + 1;
1263 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1264
1265 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1266 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1267
1268 return bits;
1269}
1270
1271/* In sram sw entry set lookup ID field of the tcam key to be used in the next
1272 * lookup interation
1273 */
1274static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1275 unsigned int lu)
1276{
1277 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1278
1279 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1280 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1281 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1282}
1283
1284/* In the sram sw entry set sign and value of the next lookup offset
1285 * and the offset value generated to the classifier
1286 */
1287static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1288 unsigned int op)
1289{
1290 /* Set sign */
1291 if (shift < 0) {
1292 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1293 shift = 0 - shift;
1294 } else {
1295 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1296 }
1297
1298 /* Set value */
1299 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1300 (unsigned char)shift;
1301
1302 /* Reset and set operation */
1303 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1304 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1305 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1306
1307 /* Set base offset as current */
1308 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1309}
1310
1311/* In the sram sw entry set sign and value of the user defined offset
1312 * generated to the classifier
1313 */
1314static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1315 unsigned int type, int offset,
1316 unsigned int op)
1317{
1318 /* Set sign */
1319 if (offset < 0) {
1320 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1321 offset = 0 - offset;
1322 } else {
1323 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1324 }
1325
1326 /* Set value */
1327 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1328 MVPP2_PRS_SRAM_UDF_MASK);
1329 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1330 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1331 MVPP2_PRS_SRAM_UDF_BITS)] &=
1332 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1333 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1334 MVPP2_PRS_SRAM_UDF_BITS)] |=
1335 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1336
1337 /* Set offset type */
1338 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1339 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1340 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1341
1342 /* Set offset operation */
1343 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1344 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1345 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1346
1347 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1348 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1349 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1350 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1351
1352 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1353 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1354 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1355
1356 /* Set base offset as current */
1357 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1358}
1359
1360/* Find parser flow entry */
1361static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1362{
1363 struct mvpp2_prs_entry *pe;
1364 int tid;
1365
1366 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1367 if (!pe)
1368 return NULL;
1369 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1370
1371 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1372 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1373 u8 bits;
1374
1375 if (!priv->prs_shadow[tid].valid ||
1376 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1377 continue;
1378
1379 pe->index = tid;
1380 mvpp2_prs_hw_read(priv, pe);
1381 bits = mvpp2_prs_sram_ai_get(pe);
1382
1383 /* Sram store classification lookup ID in AI bits [5:0] */
1384 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1385 return pe;
1386 }
1387 kfree(pe);
1388
1389 return NULL;
1390}
1391
1392/* Return first free tcam index, seeking from start to end */
1393static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1394 unsigned char end)
1395{
1396 int tid;
1397
1398 if (start > end)
1399 swap(start, end);
1400
1401 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1402 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1403
1404 for (tid = start; tid <= end; tid++) {
1405 if (!priv->prs_shadow[tid].valid)
1406 return tid;
1407 }
1408
1409 return -EINVAL;
1410}
1411
1412/* Enable/disable dropping all mac da's */
1413static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1414{
1415 struct mvpp2_prs_entry pe;
1416
1417 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1418 /* Entry exist - update port only */
1419 pe.index = MVPP2_PE_DROP_ALL;
1420 mvpp2_prs_hw_read(priv, &pe);
1421 } else {
1422 /* Entry doesn't exist - create new */
1423 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1424 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1425 pe.index = MVPP2_PE_DROP_ALL;
1426
1427 /* Non-promiscuous mode for all ports - DROP unknown packets */
1428 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1429 MVPP2_PRS_RI_DROP_MASK);
1430
1431 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1432 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1433
1434 /* Update shadow table */
1435 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1436
1437 /* Mask all ports */
1438 mvpp2_prs_tcam_port_map_set(&pe, 0);
1439 }
1440
1441 /* Update port mask */
1442 mvpp2_prs_tcam_port_set(&pe, port, add);
1443
1444 mvpp2_prs_hw_write(priv, &pe);
1445}
1446
1447/* Set port to promiscuous mode */
1448static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1449{
1450 struct mvpp2_prs_entry pe;
1451
dbedd44e 1452 /* Promiscuous mode - Accept unknown packets */
3f518509
MW
1453
1454 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1455 /* Entry exist - update port only */
1456 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1457 mvpp2_prs_hw_read(priv, &pe);
1458 } else {
1459 /* Entry doesn't exist - create new */
1460 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1461 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1462 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1463
1464 /* Continue - set next lookup */
1465 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1466
1467 /* Set result info bits */
1468 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1469 MVPP2_PRS_RI_L2_CAST_MASK);
1470
1471 /* Shift to ethertype */
1472 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1473 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1474
1475 /* Mask all ports */
1476 mvpp2_prs_tcam_port_map_set(&pe, 0);
1477
1478 /* Update shadow table */
1479 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1480 }
1481
1482 /* Update port mask */
1483 mvpp2_prs_tcam_port_set(&pe, port, add);
1484
1485 mvpp2_prs_hw_write(priv, &pe);
1486}
1487
1488/* Accept multicast */
1489static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1490 bool add)
1491{
1492 struct mvpp2_prs_entry pe;
1493 unsigned char da_mc;
1494
1495 /* Ethernet multicast address first byte is
1496 * 0x01 for IPv4 and 0x33 for IPv6
1497 */
1498 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1499
1500 if (priv->prs_shadow[index].valid) {
1501 /* Entry exist - update port only */
1502 pe.index = index;
1503 mvpp2_prs_hw_read(priv, &pe);
1504 } else {
1505 /* Entry doesn't exist - create new */
1506 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1507 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1508 pe.index = index;
1509
1510 /* Continue - set next lookup */
1511 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1512
1513 /* Set result info bits */
1514 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1515 MVPP2_PRS_RI_L2_CAST_MASK);
1516
1517 /* Update tcam entry data first byte */
1518 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1519
1520 /* Shift to ethertype */
1521 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1522 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1523
1524 /* Mask all ports */
1525 mvpp2_prs_tcam_port_map_set(&pe, 0);
1526
1527 /* Update shadow table */
1528 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1529 }
1530
1531 /* Update port mask */
1532 mvpp2_prs_tcam_port_set(&pe, port, add);
1533
1534 mvpp2_prs_hw_write(priv, &pe);
1535}
1536
1537/* Set entry for dsa packets */
1538static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
1539 bool tagged, bool extend)
1540{
1541 struct mvpp2_prs_entry pe;
1542 int tid, shift;
1543
1544 if (extend) {
1545 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
1546 shift = 8;
1547 } else {
1548 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
1549 shift = 4;
1550 }
1551
1552 if (priv->prs_shadow[tid].valid) {
1553 /* Entry exist - update port only */
1554 pe.index = tid;
1555 mvpp2_prs_hw_read(priv, &pe);
1556 } else {
1557 /* Entry doesn't exist - create new */
1558 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1559 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1560 pe.index = tid;
1561
1562 /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
1563 mvpp2_prs_sram_shift_set(&pe, shift,
1564 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1565
1566 /* Update shadow table */
1567 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1568
1569 if (tagged) {
1570 /* Set tagged bit in DSA tag */
1571 mvpp2_prs_tcam_data_byte_set(&pe, 0,
1572 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1573 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1574 /* Clear all ai bits for next iteration */
1575 mvpp2_prs_sram_ai_update(&pe, 0,
1576 MVPP2_PRS_SRAM_AI_MASK);
1577 /* If packet is tagged continue check vlans */
1578 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1579 } else {
1580 /* Set result info bits to 'no vlans' */
1581 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1582 MVPP2_PRS_RI_VLAN_MASK);
1583 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1584 }
1585
1586 /* Mask all ports */
1587 mvpp2_prs_tcam_port_map_set(&pe, 0);
1588 }
1589
1590 /* Update port mask */
1591 mvpp2_prs_tcam_port_set(&pe, port, add);
1592
1593 mvpp2_prs_hw_write(priv, &pe);
1594}
1595
1596/* Set entry for dsa ethertype */
1597static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
1598 bool add, bool tagged, bool extend)
1599{
1600 struct mvpp2_prs_entry pe;
1601 int tid, shift, port_mask;
1602
1603 if (extend) {
1604 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
1605 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
1606 port_mask = 0;
1607 shift = 8;
1608 } else {
1609 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
1610 MVPP2_PE_ETYPE_DSA_UNTAGGED;
1611 port_mask = MVPP2_PRS_PORT_MASK;
1612 shift = 4;
1613 }
1614
1615 if (priv->prs_shadow[tid].valid) {
1616 /* Entry exist - update port only */
1617 pe.index = tid;
1618 mvpp2_prs_hw_read(priv, &pe);
1619 } else {
1620 /* Entry doesn't exist - create new */
1621 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1622 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1623 pe.index = tid;
1624
1625 /* Set ethertype */
1626 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
1627 mvpp2_prs_match_etype(&pe, 2, 0);
1628
1629 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
1630 MVPP2_PRS_RI_DSA_MASK);
1631 /* Shift ethertype + 2 byte reserved + tag*/
1632 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
1633 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1634
1635 /* Update shadow table */
1636 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1637
1638 if (tagged) {
1639 /* Set tagged bit in DSA tag */
1640 mvpp2_prs_tcam_data_byte_set(&pe,
1641 MVPP2_ETH_TYPE_LEN + 2 + 3,
1642 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1643 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1644 /* Clear all ai bits for next iteration */
1645 mvpp2_prs_sram_ai_update(&pe, 0,
1646 MVPP2_PRS_SRAM_AI_MASK);
1647 /* If packet is tagged continue check vlans */
1648 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1649 } else {
1650 /* Set result info bits to 'no vlans' */
1651 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1652 MVPP2_PRS_RI_VLAN_MASK);
1653 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1654 }
1655 /* Mask/unmask all ports, depending on dsa type */
1656 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
1657 }
1658
1659 /* Update port mask */
1660 mvpp2_prs_tcam_port_set(&pe, port, add);
1661
1662 mvpp2_prs_hw_write(priv, &pe);
1663}
1664
1665/* Search for existing single/triple vlan entry */
1666static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
1667 unsigned short tpid, int ai)
1668{
1669 struct mvpp2_prs_entry *pe;
1670 int tid;
1671
1672 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1673 if (!pe)
1674 return NULL;
1675 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1676
1677 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1678 for (tid = MVPP2_PE_FIRST_FREE_TID;
1679 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1680 unsigned int ri_bits, ai_bits;
1681 bool match;
1682
1683 if (!priv->prs_shadow[tid].valid ||
1684 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1685 continue;
1686
1687 pe->index = tid;
1688
1689 mvpp2_prs_hw_read(priv, pe);
1690 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
1691 if (!match)
1692 continue;
1693
1694 /* Get vlan type */
1695 ri_bits = mvpp2_prs_sram_ri_get(pe);
1696 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
1697
1698 /* Get current ai value from tcam */
1699 ai_bits = mvpp2_prs_tcam_ai_get(pe);
1700 /* Clear double vlan bit */
1701 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
1702
1703 if (ai != ai_bits)
1704 continue;
1705
1706 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
1707 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
1708 return pe;
1709 }
1710 kfree(pe);
1711
1712 return NULL;
1713}
1714
1715/* Add/update single/triple vlan entry */
1716static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
1717 unsigned int port_map)
1718{
1719 struct mvpp2_prs_entry *pe;
1720 int tid_aux, tid;
43737473 1721 int ret = 0;
3f518509
MW
1722
1723 pe = mvpp2_prs_vlan_find(priv, tpid, ai);
1724
1725 if (!pe) {
1726 /* Create new tcam entry */
1727 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
1728 MVPP2_PE_FIRST_FREE_TID);
1729 if (tid < 0)
1730 return tid;
1731
1732 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1733 if (!pe)
1734 return -ENOMEM;
1735
1736 /* Get last double vlan tid */
1737 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
1738 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
1739 unsigned int ri_bits;
1740
1741 if (!priv->prs_shadow[tid_aux].valid ||
1742 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
1743 continue;
1744
1745 pe->index = tid_aux;
1746 mvpp2_prs_hw_read(priv, pe);
1747 ri_bits = mvpp2_prs_sram_ri_get(pe);
1748 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
1749 MVPP2_PRS_RI_VLAN_DOUBLE)
1750 break;
1751 }
1752
43737473
SM
1753 if (tid <= tid_aux) {
1754 ret = -EINVAL;
1755 goto error;
1756 }
3f518509
MW
1757
1758 memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
1759 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1760 pe->index = tid;
1761
1762 mvpp2_prs_match_etype(pe, 0, tpid);
1763
1764 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
1765 /* Shift 4 bytes - skip 1 vlan tag */
1766 mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
1767 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1768 /* Clear all ai bits for next iteration */
1769 mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1770
1771 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
1772 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
1773 MVPP2_PRS_RI_VLAN_MASK);
1774 } else {
1775 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
1776 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
1777 MVPP2_PRS_RI_VLAN_MASK);
1778 }
1779 mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
1780
1781 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
1782 }
1783 /* Update ports' mask */
1784 mvpp2_prs_tcam_port_map_set(pe, port_map);
1785
1786 mvpp2_prs_hw_write(priv, pe);
1787
43737473 1788error:
3f518509
MW
1789 kfree(pe);
1790
43737473 1791 return ret;
3f518509
MW
1792}
1793
1794/* Get first free double vlan ai number */
1795static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
1796{
1797 int i;
1798
1799 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
1800 if (!priv->prs_double_vlans[i])
1801 return i;
1802 }
1803
1804 return -EINVAL;
1805}
1806
1807/* Search for existing double vlan entry */
1808static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
1809 unsigned short tpid1,
1810 unsigned short tpid2)
1811{
1812 struct mvpp2_prs_entry *pe;
1813 int tid;
1814
1815 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1816 if (!pe)
1817 return NULL;
1818 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1819
1820 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1821 for (tid = MVPP2_PE_FIRST_FREE_TID;
1822 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1823 unsigned int ri_mask;
1824 bool match;
1825
1826 if (!priv->prs_shadow[tid].valid ||
1827 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1828 continue;
1829
1830 pe->index = tid;
1831 mvpp2_prs_hw_read(priv, pe);
1832
1833 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
1834 && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
1835
1836 if (!match)
1837 continue;
1838
1839 ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
1840 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
1841 return pe;
1842 }
1843 kfree(pe);
1844
1845 return NULL;
1846}
1847
1848/* Add or update double vlan entry */
1849static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
1850 unsigned short tpid2,
1851 unsigned int port_map)
1852{
1853 struct mvpp2_prs_entry *pe;
43737473 1854 int tid_aux, tid, ai, ret = 0;
3f518509
MW
1855
1856 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
1857
1858 if (!pe) {
1859 /* Create new tcam entry */
1860 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1861 MVPP2_PE_LAST_FREE_TID);
1862 if (tid < 0)
1863 return tid;
1864
1865 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1866 if (!pe)
1867 return -ENOMEM;
1868
1869 /* Set ai value for new double vlan entry */
1870 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
43737473
SM
1871 if (ai < 0) {
1872 ret = ai;
1873 goto error;
1874 }
3f518509
MW
1875
1876 /* Get first single/triple vlan tid */
1877 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
1878 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
1879 unsigned int ri_bits;
1880
1881 if (!priv->prs_shadow[tid_aux].valid ||
1882 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
1883 continue;
1884
1885 pe->index = tid_aux;
1886 mvpp2_prs_hw_read(priv, pe);
1887 ri_bits = mvpp2_prs_sram_ri_get(pe);
1888 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
1889 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
1890 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
1891 break;
1892 }
1893
43737473
SM
1894 if (tid >= tid_aux) {
1895 ret = -ERANGE;
1896 goto error;
1897 }
3f518509
MW
1898
1899 memset(pe, 0, sizeof(struct mvpp2_prs_entry));
1900 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1901 pe->index = tid;
1902
1903 priv->prs_double_vlans[ai] = true;
1904
1905 mvpp2_prs_match_etype(pe, 0, tpid1);
1906 mvpp2_prs_match_etype(pe, 4, tpid2);
1907
1908 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
1909 /* Shift 8 bytes - skip 2 vlan tags */
1910 mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
1911 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1912 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1913 MVPP2_PRS_RI_VLAN_MASK);
1914 mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
1915 MVPP2_PRS_SRAM_AI_MASK);
1916
1917 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
1918 }
1919
1920 /* Update ports' mask */
1921 mvpp2_prs_tcam_port_map_set(pe, port_map);
1922 mvpp2_prs_hw_write(priv, pe);
1923
43737473 1924error:
3f518509 1925 kfree(pe);
43737473 1926 return ret;
3f518509
MW
1927}
1928
1929/* IPv4 header parsing for fragmentation and L4 offset */
1930static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
1931 unsigned int ri, unsigned int ri_mask)
1932{
1933 struct mvpp2_prs_entry pe;
1934 int tid;
1935
1936 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
1937 (proto != IPPROTO_IGMP))
1938 return -EINVAL;
1939
1940 /* Fragmented packet */
1941 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1942 MVPP2_PE_LAST_FREE_TID);
1943 if (tid < 0)
1944 return tid;
1945
1946 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1947 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1948 pe.index = tid;
1949
1950 /* Set next lu to IPv4 */
1951 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1952 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1953 /* Set L4 offset */
1954 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1955 sizeof(struct iphdr) - 4,
1956 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1957 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1958 MVPP2_PRS_IPV4_DIP_AI_BIT);
1959 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
1960 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
1961
1962 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
1963 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1964 /* Unmask all ports */
1965 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1966
1967 /* Update shadow table and hw entry */
1968 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1969 mvpp2_prs_hw_write(priv, &pe);
1970
1971 /* Not fragmented packet */
1972 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1973 MVPP2_PE_LAST_FREE_TID);
1974 if (tid < 0)
1975 return tid;
1976
1977 pe.index = tid;
1978 /* Clear ri before updating */
1979 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1980 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1981 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
1982
1983 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
1984 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
1985
1986 /* Update shadow table and hw entry */
1987 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1988 mvpp2_prs_hw_write(priv, &pe);
1989
1990 return 0;
1991}
1992
1993/* IPv4 L3 multicast or broadcast */
1994static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
1995{
1996 struct mvpp2_prs_entry pe;
1997 int mask, tid;
1998
1999 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2000 MVPP2_PE_LAST_FREE_TID);
2001 if (tid < 0)
2002 return tid;
2003
2004 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2005 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2006 pe.index = tid;
2007
2008 switch (l3_cast) {
2009 case MVPP2_PRS_L3_MULTI_CAST:
2010 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
2011 MVPP2_PRS_IPV4_MC_MASK);
2012 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2013 MVPP2_PRS_RI_L3_ADDR_MASK);
2014 break;
2015 case MVPP2_PRS_L3_BROAD_CAST:
2016 mask = MVPP2_PRS_IPV4_BC_MASK;
2017 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
2018 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
2019 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
2020 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
2021 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
2022 MVPP2_PRS_RI_L3_ADDR_MASK);
2023 break;
2024 default:
2025 return -EINVAL;
2026 }
2027
2028 /* Finished: go to flowid generation */
2029 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2030 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2031
2032 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2033 MVPP2_PRS_IPV4_DIP_AI_BIT);
2034 /* Unmask all ports */
2035 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2036
2037 /* Update shadow table and hw entry */
2038 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2039 mvpp2_prs_hw_write(priv, &pe);
2040
2041 return 0;
2042}
2043
2044/* Set entries for protocols over IPv6 */
2045static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2046 unsigned int ri, unsigned int ri_mask)
2047{
2048 struct mvpp2_prs_entry pe;
2049 int tid;
2050
2051 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2052 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2053 return -EINVAL;
2054
2055 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2056 MVPP2_PE_LAST_FREE_TID);
2057 if (tid < 0)
2058 return tid;
2059
2060 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2061 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2062 pe.index = tid;
2063
2064 /* Finished: go to flowid generation */
2065 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2066 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2067 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2068 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2069 sizeof(struct ipv6hdr) - 6,
2070 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2071
2072 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2073 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2074 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2075 /* Unmask all ports */
2076 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2077
2078 /* Write HW */
2079 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2080 mvpp2_prs_hw_write(priv, &pe);
2081
2082 return 0;
2083}
2084
2085/* IPv6 L3 multicast entry */
2086static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2087{
2088 struct mvpp2_prs_entry pe;
2089 int tid;
2090
2091 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2092 return -EINVAL;
2093
2094 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2095 MVPP2_PE_LAST_FREE_TID);
2096 if (tid < 0)
2097 return tid;
2098
2099 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2100 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2101 pe.index = tid;
2102
2103 /* Finished: go to flowid generation */
2104 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2105 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2106 MVPP2_PRS_RI_L3_ADDR_MASK);
2107 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2108 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2109 /* Shift back to IPv6 NH */
2110 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2111
2112 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2113 MVPP2_PRS_IPV6_MC_MASK);
2114 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2115 /* Unmask all ports */
2116 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2117
2118 /* Update shadow table and hw entry */
2119 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2120 mvpp2_prs_hw_write(priv, &pe);
2121
2122 return 0;
2123}
2124
2125/* Parser per-port initialization */
2126static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2127 int lu_max, int offset)
2128{
2129 u32 val;
2130
2131 /* Set lookup ID */
2132 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2133 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2134 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2135 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2136
2137 /* Set maximum number of loops for packet received from port */
2138 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2139 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2140 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2141 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2142
2143 /* Set initial offset for packet header extraction for the first
2144 * searching loop
2145 */
2146 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2147 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2148 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2149 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2150}
2151
2152/* Default flow entries initialization for all ports */
2153static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2154{
2155 struct mvpp2_prs_entry pe;
2156 int port;
2157
2158 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
2159 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2160 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2161 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2162
2163 /* Mask all ports */
2164 mvpp2_prs_tcam_port_map_set(&pe, 0);
2165
2166 /* Set flow ID*/
2167 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2168 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2169
2170 /* Update shadow table and hw entry */
2171 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2172 mvpp2_prs_hw_write(priv, &pe);
2173 }
2174}
2175
2176/* Set default entry for Marvell Header field */
2177static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2178{
2179 struct mvpp2_prs_entry pe;
2180
2181 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2182
2183 pe.index = MVPP2_PE_MH_DEFAULT;
2184 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2185 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2186 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2187 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2188
2189 /* Unmask all ports */
2190 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2191
2192 /* Update shadow table and hw entry */
2193 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2194 mvpp2_prs_hw_write(priv, &pe);
2195}
2196
2197/* Set default entires (place holder) for promiscuous, non-promiscuous and
2198 * multicast MAC addresses
2199 */
2200static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2201{
2202 struct mvpp2_prs_entry pe;
2203
2204 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2205
2206 /* Non-promiscuous mode for all ports - DROP unknown packets */
2207 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2208 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2209
2210 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2211 MVPP2_PRS_RI_DROP_MASK);
2212 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2213 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2214
2215 /* Unmask all ports */
2216 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2217
2218 /* Update shadow table and hw entry */
2219 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2220 mvpp2_prs_hw_write(priv, &pe);
2221
2222 /* place holders only - no ports */
2223 mvpp2_prs_mac_drop_all_set(priv, 0, false);
2224 mvpp2_prs_mac_promisc_set(priv, 0, false);
2225 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
2226 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
2227}
2228
2229/* Set default entries for various types of dsa packets */
2230static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2231{
2232 struct mvpp2_prs_entry pe;
2233
2234 /* None tagged EDSA entry - place holder */
2235 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2236 MVPP2_PRS_EDSA);
2237
2238 /* Tagged EDSA entry - place holder */
2239 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2240
2241 /* None tagged DSA entry - place holder */
2242 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2243 MVPP2_PRS_DSA);
2244
2245 /* Tagged DSA entry - place holder */
2246 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2247
2248 /* None tagged EDSA ethertype entry - place holder*/
2249 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2250 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2251
2252 /* Tagged EDSA ethertype entry - place holder*/
2253 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2254 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2255
2256 /* None tagged DSA ethertype entry */
2257 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2258 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2259
2260 /* Tagged DSA ethertype entry */
2261 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2262 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2263
2264 /* Set default entry, in case DSA or EDSA tag not found */
2265 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2266 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2267 pe.index = MVPP2_PE_DSA_DEFAULT;
2268 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2269
2270 /* Shift 0 bytes */
2271 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2272 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2273
2274 /* Clear all sram ai bits for next iteration */
2275 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2276
2277 /* Unmask all ports */
2278 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2279
2280 mvpp2_prs_hw_write(priv, &pe);
2281}
2282
2283/* Match basic ethertypes */
2284static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2285{
2286 struct mvpp2_prs_entry pe;
2287 int tid;
2288
2289 /* Ethertype: PPPoE */
2290 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2291 MVPP2_PE_LAST_FREE_TID);
2292 if (tid < 0)
2293 return tid;
2294
2295 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2296 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2297 pe.index = tid;
2298
2299 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2300
2301 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2302 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2303 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2304 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2305 MVPP2_PRS_RI_PPPOE_MASK);
2306
2307 /* Update shadow table and hw entry */
2308 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2309 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2310 priv->prs_shadow[pe.index].finish = false;
2311 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2312 MVPP2_PRS_RI_PPPOE_MASK);
2313 mvpp2_prs_hw_write(priv, &pe);
2314
2315 /* Ethertype: ARP */
2316 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2317 MVPP2_PE_LAST_FREE_TID);
2318 if (tid < 0)
2319 return tid;
2320
2321 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2322 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2323 pe.index = tid;
2324
2325 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2326
2327 /* Generate flow in the next iteration*/
2328 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2329 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2330 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2331 MVPP2_PRS_RI_L3_PROTO_MASK);
2332 /* Set L3 offset */
2333 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2334 MVPP2_ETH_TYPE_LEN,
2335 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2336
2337 /* Update shadow table and hw entry */
2338 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2339 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2340 priv->prs_shadow[pe.index].finish = true;
2341 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2342 MVPP2_PRS_RI_L3_PROTO_MASK);
2343 mvpp2_prs_hw_write(priv, &pe);
2344
2345 /* Ethertype: LBTD */
2346 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2347 MVPP2_PE_LAST_FREE_TID);
2348 if (tid < 0)
2349 return tid;
2350
2351 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2352 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2353 pe.index = tid;
2354
2355 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2356
2357 /* Generate flow in the next iteration*/
2358 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2359 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2360 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2361 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2362 MVPP2_PRS_RI_CPU_CODE_MASK |
2363 MVPP2_PRS_RI_UDF3_MASK);
2364 /* Set L3 offset */
2365 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2366 MVPP2_ETH_TYPE_LEN,
2367 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2368
2369 /* Update shadow table and hw entry */
2370 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2371 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2372 priv->prs_shadow[pe.index].finish = true;
2373 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2374 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2375 MVPP2_PRS_RI_CPU_CODE_MASK |
2376 MVPP2_PRS_RI_UDF3_MASK);
2377 mvpp2_prs_hw_write(priv, &pe);
2378
2379 /* Ethertype: IPv4 without options */
2380 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2381 MVPP2_PE_LAST_FREE_TID);
2382 if (tid < 0)
2383 return tid;
2384
2385 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2386 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2387 pe.index = tid;
2388
2389 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2390 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2391 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2392 MVPP2_PRS_IPV4_HEAD_MASK |
2393 MVPP2_PRS_IPV4_IHL_MASK);
2394
2395 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2396 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2397 MVPP2_PRS_RI_L3_PROTO_MASK);
2398 /* Skip eth_type + 4 bytes of IP header */
2399 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2400 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2401 /* Set L3 offset */
2402 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2403 MVPP2_ETH_TYPE_LEN,
2404 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2405
2406 /* Update shadow table and hw entry */
2407 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2408 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2409 priv->prs_shadow[pe.index].finish = false;
2410 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2411 MVPP2_PRS_RI_L3_PROTO_MASK);
2412 mvpp2_prs_hw_write(priv, &pe);
2413
2414 /* Ethertype: IPv4 with options */
2415 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2416 MVPP2_PE_LAST_FREE_TID);
2417 if (tid < 0)
2418 return tid;
2419
2420 pe.index = tid;
2421
2422 /* Clear tcam data before updating */
2423 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2424 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2425
2426 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2427 MVPP2_PRS_IPV4_HEAD,
2428 MVPP2_PRS_IPV4_HEAD_MASK);
2429
2430 /* Clear ri before updating */
2431 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2432 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2433 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2434 MVPP2_PRS_RI_L3_PROTO_MASK);
2435
2436 /* Update shadow table and hw entry */
2437 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2438 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2439 priv->prs_shadow[pe.index].finish = false;
2440 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
2441 MVPP2_PRS_RI_L3_PROTO_MASK);
2442 mvpp2_prs_hw_write(priv, &pe);
2443
2444 /* Ethertype: IPv6 without options */
2445 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2446 MVPP2_PE_LAST_FREE_TID);
2447 if (tid < 0)
2448 return tid;
2449
2450 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2451 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2452 pe.index = tid;
2453
2454 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
2455
2456 /* Skip DIP of IPV6 header */
2457 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2458 MVPP2_MAX_L3_ADDR_SIZE,
2459 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2460 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2461 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2462 MVPP2_PRS_RI_L3_PROTO_MASK);
2463 /* Set L3 offset */
2464 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2465 MVPP2_ETH_TYPE_LEN,
2466 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2467
2468 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2469 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2470 priv->prs_shadow[pe.index].finish = false;
2471 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2472 MVPP2_PRS_RI_L3_PROTO_MASK);
2473 mvpp2_prs_hw_write(priv, &pe);
2474
2475 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2476 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2477 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2478 pe.index = MVPP2_PE_ETH_TYPE_UN;
2479
2480 /* Unmask all ports */
2481 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2482
2483 /* Generate flow in the next iteration*/
2484 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2485 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2486 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2487 MVPP2_PRS_RI_L3_PROTO_MASK);
2488 /* Set L3 offset even it's unknown L3 */
2489 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2490 MVPP2_ETH_TYPE_LEN,
2491 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2492
2493 /* Update shadow table and hw entry */
2494 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2495 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2496 priv->prs_shadow[pe.index].finish = true;
2497 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2498 MVPP2_PRS_RI_L3_PROTO_MASK);
2499 mvpp2_prs_hw_write(priv, &pe);
2500
2501 return 0;
2502}
2503
2504/* Configure vlan entries and detect up to 2 successive VLAN tags.
2505 * Possible options:
2506 * 0x8100, 0x88A8
2507 * 0x8100, 0x8100
2508 * 0x8100
2509 * 0x88A8
2510 */
2511static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
2512{
2513 struct mvpp2_prs_entry pe;
2514 int err;
2515
2516 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
2517 MVPP2_PRS_DBL_VLANS_MAX,
2518 GFP_KERNEL);
2519 if (!priv->prs_double_vlans)
2520 return -ENOMEM;
2521
2522 /* Double VLAN: 0x8100, 0x88A8 */
2523 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
2524 MVPP2_PRS_PORT_MASK);
2525 if (err)
2526 return err;
2527
2528 /* Double VLAN: 0x8100, 0x8100 */
2529 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
2530 MVPP2_PRS_PORT_MASK);
2531 if (err)
2532 return err;
2533
2534 /* Single VLAN: 0x88a8 */
2535 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
2536 MVPP2_PRS_PORT_MASK);
2537 if (err)
2538 return err;
2539
2540 /* Single VLAN: 0x8100 */
2541 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
2542 MVPP2_PRS_PORT_MASK);
2543 if (err)
2544 return err;
2545
2546 /* Set default double vlan entry */
2547 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2548 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2549 pe.index = MVPP2_PE_VLAN_DBL;
2550
2551 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2552 /* Clear ai for next iterations */
2553 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2554 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2555 MVPP2_PRS_RI_VLAN_MASK);
2556
2557 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
2558 MVPP2_PRS_DBL_VLAN_AI_BIT);
2559 /* Unmask all ports */
2560 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2561
2562 /* Update shadow table and hw entry */
2563 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2564 mvpp2_prs_hw_write(priv, &pe);
2565
2566 /* Set default vlan none entry */
2567 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2568 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2569 pe.index = MVPP2_PE_VLAN_NONE;
2570
2571 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2572 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2573 MVPP2_PRS_RI_VLAN_MASK);
2574
2575 /* Unmask all ports */
2576 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2577
2578 /* Update shadow table and hw entry */
2579 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2580 mvpp2_prs_hw_write(priv, &pe);
2581
2582 return 0;
2583}
2584
2585/* Set entries for PPPoE ethertype */
2586static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
2587{
2588 struct mvpp2_prs_entry pe;
2589 int tid;
2590
2591 /* IPv4 over PPPoE with options */
2592 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2593 MVPP2_PE_LAST_FREE_TID);
2594 if (tid < 0)
2595 return tid;
2596
2597 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2598 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2599 pe.index = tid;
2600
2601 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
2602
2603 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2604 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2605 MVPP2_PRS_RI_L3_PROTO_MASK);
2606 /* Skip eth_type + 4 bytes of IP header */
2607 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2608 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2609 /* Set L3 offset */
2610 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2611 MVPP2_ETH_TYPE_LEN,
2612 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2613
2614 /* Update shadow table and hw entry */
2615 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2616 mvpp2_prs_hw_write(priv, &pe);
2617
2618 /* IPv4 over PPPoE without options */
2619 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2620 MVPP2_PE_LAST_FREE_TID);
2621 if (tid < 0)
2622 return tid;
2623
2624 pe.index = tid;
2625
2626 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2627 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2628 MVPP2_PRS_IPV4_HEAD_MASK |
2629 MVPP2_PRS_IPV4_IHL_MASK);
2630
2631 /* Clear ri before updating */
2632 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2633 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2634 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2635 MVPP2_PRS_RI_L3_PROTO_MASK);
2636
2637 /* Update shadow table and hw entry */
2638 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2639 mvpp2_prs_hw_write(priv, &pe);
2640
2641 /* IPv6 over PPPoE */
2642 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2643 MVPP2_PE_LAST_FREE_TID);
2644 if (tid < 0)
2645 return tid;
2646
2647 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2648 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2649 pe.index = tid;
2650
2651 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
2652
2653 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2654 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2655 MVPP2_PRS_RI_L3_PROTO_MASK);
2656 /* Skip eth_type + 4 bytes of IPv6 header */
2657 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2658 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2659 /* Set L3 offset */
2660 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2661 MVPP2_ETH_TYPE_LEN,
2662 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2663
2664 /* Update shadow table and hw entry */
2665 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2666 mvpp2_prs_hw_write(priv, &pe);
2667
2668 /* Non-IP over PPPoE */
2669 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2670 MVPP2_PE_LAST_FREE_TID);
2671 if (tid < 0)
2672 return tid;
2673
2674 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2675 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2676 pe.index = tid;
2677
2678 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2679 MVPP2_PRS_RI_L3_PROTO_MASK);
2680
2681 /* Finished: go to flowid generation */
2682 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2683 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2684 /* Set L3 offset even if it's unknown L3 */
2685 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2686 MVPP2_ETH_TYPE_LEN,
2687 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2688
2689 /* Update shadow table and hw entry */
2690 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2691 mvpp2_prs_hw_write(priv, &pe);
2692
2693 return 0;
2694}
2695
2696/* Initialize entries for IPv4 */
2697static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
2698{
2699 struct mvpp2_prs_entry pe;
2700 int err;
2701
2702 /* Set entries for TCP, UDP and IGMP over IPv4 */
2703 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
2704 MVPP2_PRS_RI_L4_PROTO_MASK);
2705 if (err)
2706 return err;
2707
2708 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
2709 MVPP2_PRS_RI_L4_PROTO_MASK);
2710 if (err)
2711 return err;
2712
2713 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
2714 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2715 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2716 MVPP2_PRS_RI_CPU_CODE_MASK |
2717 MVPP2_PRS_RI_UDF3_MASK);
2718 if (err)
2719 return err;
2720
2721 /* IPv4 Broadcast */
2722 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
2723 if (err)
2724 return err;
2725
2726 /* IPv4 Multicast */
2727 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
2728 if (err)
2729 return err;
2730
2731 /* Default IPv4 entry for unknown protocols */
2732 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2733 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2734 pe.index = MVPP2_PE_IP4_PROTO_UN;
2735
2736 /* Set next lu to IPv4 */
2737 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2738 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2739 /* Set L4 offset */
2740 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2741 sizeof(struct iphdr) - 4,
2742 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2743 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2744 MVPP2_PRS_IPV4_DIP_AI_BIT);
2745 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2746 MVPP2_PRS_RI_L4_PROTO_MASK);
2747
2748 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2749 /* Unmask all ports */
2750 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2751
2752 /* Update shadow table and hw entry */
2753 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2754 mvpp2_prs_hw_write(priv, &pe);
2755
2756 /* Default IPv4 entry for unicast address */
2757 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2758 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2759 pe.index = MVPP2_PE_IP4_ADDR_UN;
2760
2761 /* Finished: go to flowid generation */
2762 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2763 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2764 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
2765 MVPP2_PRS_RI_L3_ADDR_MASK);
2766
2767 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2768 MVPP2_PRS_IPV4_DIP_AI_BIT);
2769 /* Unmask all ports */
2770 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2771
2772 /* Update shadow table and hw entry */
2773 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2774 mvpp2_prs_hw_write(priv, &pe);
2775
2776 return 0;
2777}
2778
2779/* Initialize entries for IPv6 */
2780static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
2781{
2782 struct mvpp2_prs_entry pe;
2783 int tid, err;
2784
2785 /* Set entries for TCP, UDP and ICMP over IPv6 */
2786 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
2787 MVPP2_PRS_RI_L4_TCP,
2788 MVPP2_PRS_RI_L4_PROTO_MASK);
2789 if (err)
2790 return err;
2791
2792 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
2793 MVPP2_PRS_RI_L4_UDP,
2794 MVPP2_PRS_RI_L4_PROTO_MASK);
2795 if (err)
2796 return err;
2797
2798 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
2799 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2800 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2801 MVPP2_PRS_RI_CPU_CODE_MASK |
2802 MVPP2_PRS_RI_UDF3_MASK);
2803 if (err)
2804 return err;
2805
2806 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
2807 /* Result Info: UDF7=1, DS lite */
2808 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
2809 MVPP2_PRS_RI_UDF7_IP6_LITE,
2810 MVPP2_PRS_RI_UDF7_MASK);
2811 if (err)
2812 return err;
2813
2814 /* IPv6 multicast */
2815 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
2816 if (err)
2817 return err;
2818
2819 /* Entry for checking hop limit */
2820 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2821 MVPP2_PE_LAST_FREE_TID);
2822 if (tid < 0)
2823 return tid;
2824
2825 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2826 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2827 pe.index = tid;
2828
2829 /* Finished: go to flowid generation */
2830 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2831 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2832 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
2833 MVPP2_PRS_RI_DROP_MASK,
2834 MVPP2_PRS_RI_L3_PROTO_MASK |
2835 MVPP2_PRS_RI_DROP_MASK);
2836
2837 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
2838 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2839 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2840
2841 /* Update shadow table and hw entry */
2842 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2843 mvpp2_prs_hw_write(priv, &pe);
2844
2845 /* Default IPv6 entry for unknown protocols */
2846 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2847 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2848 pe.index = MVPP2_PE_IP6_PROTO_UN;
2849
2850 /* Finished: go to flowid generation */
2851 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2852 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2853 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2854 MVPP2_PRS_RI_L4_PROTO_MASK);
2855 /* Set L4 offset relatively to our current place */
2856 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2857 sizeof(struct ipv6hdr) - 4,
2858 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2859
2860 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2861 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2862 /* Unmask all ports */
2863 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2864
2865 /* Update shadow table and hw entry */
2866 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2867 mvpp2_prs_hw_write(priv, &pe);
2868
2869 /* Default IPv6 entry for unknown ext protocols */
2870 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2871 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2872 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
2873
2874 /* Finished: go to flowid generation */
2875 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2876 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2877 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2878 MVPP2_PRS_RI_L4_PROTO_MASK);
2879
2880 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
2881 MVPP2_PRS_IPV6_EXT_AI_BIT);
2882 /* Unmask all ports */
2883 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2884
2885 /* Update shadow table and hw entry */
2886 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2887 mvpp2_prs_hw_write(priv, &pe);
2888
2889 /* Default IPv6 entry for unicast address */
2890 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2891 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2892 pe.index = MVPP2_PE_IP6_ADDR_UN;
2893
2894 /* Finished: go to IPv6 again */
2895 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2896 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
2897 MVPP2_PRS_RI_L3_ADDR_MASK);
2898 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2899 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2900 /* Shift back to IPV6 NH */
2901 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2902
2903 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2904 /* Unmask all ports */
2905 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2906
2907 /* Update shadow table and hw entry */
2908 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2909 mvpp2_prs_hw_write(priv, &pe);
2910
2911 return 0;
2912}
2913
2914/* Parser default initialization */
2915static int mvpp2_prs_default_init(struct platform_device *pdev,
2916 struct mvpp2 *priv)
2917{
2918 int err, index, i;
2919
2920 /* Enable tcam table */
2921 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2922
2923 /* Clear all tcam and sram entries */
2924 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2925 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2926 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2927 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2928
2929 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2930 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2931 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2932 }
2933
2934 /* Invalidate all tcam entries */
2935 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2936 mvpp2_prs_hw_inv(priv, index);
2937
2938 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2939 sizeof(struct mvpp2_prs_shadow),
2940 GFP_KERNEL);
2941 if (!priv->prs_shadow)
2942 return -ENOMEM;
2943
2944 /* Always start from lookup = 0 */
2945 for (index = 0; index < MVPP2_MAX_PORTS; index++)
2946 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2947 MVPP2_PRS_PORT_LU_MAX, 0);
2948
2949 mvpp2_prs_def_flow_init(priv);
2950
2951 mvpp2_prs_mh_init(priv);
2952
2953 mvpp2_prs_mac_init(priv);
2954
2955 mvpp2_prs_dsa_init(priv);
2956
2957 err = mvpp2_prs_etype_init(priv);
2958 if (err)
2959 return err;
2960
2961 err = mvpp2_prs_vlan_init(pdev, priv);
2962 if (err)
2963 return err;
2964
2965 err = mvpp2_prs_pppoe_init(priv);
2966 if (err)
2967 return err;
2968
2969 err = mvpp2_prs_ip6_init(priv);
2970 if (err)
2971 return err;
2972
2973 err = mvpp2_prs_ip4_init(priv);
2974 if (err)
2975 return err;
2976
2977 return 0;
2978}
2979
2980/* Compare MAC DA with tcam entry data */
2981static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2982 const u8 *da, unsigned char *mask)
2983{
2984 unsigned char tcam_byte, tcam_mask;
2985 int index;
2986
2987 for (index = 0; index < ETH_ALEN; index++) {
2988 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2989 if (tcam_mask != mask[index])
2990 return false;
2991
2992 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2993 return false;
2994 }
2995
2996 return true;
2997}
2998
2999/* Find tcam entry with matched pair <MAC DA, port> */
3000static struct mvpp2_prs_entry *
3001mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
3002 unsigned char *mask, int udf_type)
3003{
3004 struct mvpp2_prs_entry *pe;
3005 int tid;
3006
3007 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3008 if (!pe)
3009 return NULL;
3010 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3011
3012 /* Go through the all entires with MVPP2_PRS_LU_MAC */
3013 for (tid = MVPP2_PE_FIRST_FREE_TID;
3014 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3015 unsigned int entry_pmap;
3016
3017 if (!priv->prs_shadow[tid].valid ||
3018 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3019 (priv->prs_shadow[tid].udf != udf_type))
3020 continue;
3021
3022 pe->index = tid;
3023 mvpp2_prs_hw_read(priv, pe);
3024 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
3025
3026 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
3027 entry_pmap == pmap)
3028 return pe;
3029 }
3030 kfree(pe);
3031
3032 return NULL;
3033}
3034
3035/* Update parser's mac da entry */
3036static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3037 const u8 *da, bool add)
3038{
3039 struct mvpp2_prs_entry *pe;
3040 unsigned int pmap, len, ri;
3041 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3042 int tid;
3043
3044 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3045 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
3046 MVPP2_PRS_UDF_MAC_DEF);
3047
3048 /* No such entry */
3049 if (!pe) {
3050 if (!add)
3051 return 0;
3052
3053 /* Create new TCAM entry */
3054 /* Find first range mac entry*/
3055 for (tid = MVPP2_PE_FIRST_FREE_TID;
3056 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
3057 if (priv->prs_shadow[tid].valid &&
3058 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
3059 (priv->prs_shadow[tid].udf ==
3060 MVPP2_PRS_UDF_MAC_RANGE))
3061 break;
3062
3063 /* Go through the all entries from first to last */
3064 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3065 tid - 1);
3066 if (tid < 0)
3067 return tid;
3068
3069 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3070 if (!pe)
c2bb7bc5 3071 return -ENOMEM;
3f518509
MW
3072 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3073 pe->index = tid;
3074
3075 /* Mask all ports */
3076 mvpp2_prs_tcam_port_map_set(pe, 0);
3077 }
3078
3079 /* Update port mask */
3080 mvpp2_prs_tcam_port_set(pe, port, add);
3081
3082 /* Invalidate the entry if no ports are left enabled */
3083 pmap = mvpp2_prs_tcam_port_map_get(pe);
3084 if (pmap == 0) {
3085 if (add) {
3086 kfree(pe);
c2bb7bc5 3087 return -EINVAL;
3f518509
MW
3088 }
3089 mvpp2_prs_hw_inv(priv, pe->index);
3090 priv->prs_shadow[pe->index].valid = false;
3091 kfree(pe);
3092 return 0;
3093 }
3094
3095 /* Continue - set next lookup */
3096 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
3097
3098 /* Set match on DA */
3099 len = ETH_ALEN;
3100 while (len--)
3101 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
3102
3103 /* Set result info bits */
3104 if (is_broadcast_ether_addr(da))
3105 ri = MVPP2_PRS_RI_L2_BCAST;
3106 else if (is_multicast_ether_addr(da))
3107 ri = MVPP2_PRS_RI_L2_MCAST;
3108 else
3109 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
3110
3111 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3112 MVPP2_PRS_RI_MAC_ME_MASK);
3113 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3114 MVPP2_PRS_RI_MAC_ME_MASK);
3115
3116 /* Shift to ethertype */
3117 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
3118 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3119
3120 /* Update shadow table and hw entry */
3121 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
3122 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
3123 mvpp2_prs_hw_write(priv, pe);
3124
3125 kfree(pe);
3126
3127 return 0;
3128}
3129
3130static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3131{
3132 struct mvpp2_port *port = netdev_priv(dev);
3133 int err;
3134
3135 /* Remove old parser entry */
3136 err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
3137 false);
3138 if (err)
3139 return err;
3140
3141 /* Add new parser entry */
3142 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
3143 if (err)
3144 return err;
3145
3146 /* Set addr in the device */
3147 ether_addr_copy(dev->dev_addr, da);
3148
3149 return 0;
3150}
3151
3152/* Delete all port's multicast simple (not range) entries */
3153static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
3154{
3155 struct mvpp2_prs_entry pe;
3156 int index, tid;
3157
3158 for (tid = MVPP2_PE_FIRST_FREE_TID;
3159 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3160 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3161
3162 if (!priv->prs_shadow[tid].valid ||
3163 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3164 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3165 continue;
3166
3167 /* Only simple mac entries */
3168 pe.index = tid;
3169 mvpp2_prs_hw_read(priv, &pe);
3170
3171 /* Read mac addr from entry */
3172 for (index = 0; index < ETH_ALEN; index++)
3173 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3174 &da_mask[index]);
3175
3176 if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
3177 /* Delete this entry */
3178 mvpp2_prs_mac_da_accept(priv, port, da, false);
3179 }
3180}
3181
3182static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3183{
3184 switch (type) {
3185 case MVPP2_TAG_TYPE_EDSA:
3186 /* Add port to EDSA entries */
3187 mvpp2_prs_dsa_tag_set(priv, port, true,
3188 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3189 mvpp2_prs_dsa_tag_set(priv, port, true,
3190 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3191 /* Remove port from DSA entries */
3192 mvpp2_prs_dsa_tag_set(priv, port, false,
3193 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3194 mvpp2_prs_dsa_tag_set(priv, port, false,
3195 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3196 break;
3197
3198 case MVPP2_TAG_TYPE_DSA:
3199 /* Add port to DSA entries */
3200 mvpp2_prs_dsa_tag_set(priv, port, true,
3201 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3202 mvpp2_prs_dsa_tag_set(priv, port, true,
3203 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3204 /* Remove port from EDSA entries */
3205 mvpp2_prs_dsa_tag_set(priv, port, false,
3206 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3207 mvpp2_prs_dsa_tag_set(priv, port, false,
3208 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3209 break;
3210
3211 case MVPP2_TAG_TYPE_MH:
3212 case MVPP2_TAG_TYPE_NONE:
3213 /* Remove port form EDSA and DSA entries */
3214 mvpp2_prs_dsa_tag_set(priv, port, false,
3215 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3216 mvpp2_prs_dsa_tag_set(priv, port, false,
3217 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3218 mvpp2_prs_dsa_tag_set(priv, port, false,
3219 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3220 mvpp2_prs_dsa_tag_set(priv, port, false,
3221 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3222 break;
3223
3224 default:
3225 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
3226 return -EINVAL;
3227 }
3228
3229 return 0;
3230}
3231
3232/* Set prs flow for the port */
3233static int mvpp2_prs_def_flow(struct mvpp2_port *port)
3234{
3235 struct mvpp2_prs_entry *pe;
3236 int tid;
3237
3238 pe = mvpp2_prs_flow_find(port->priv, port->id);
3239
3240 /* Such entry not exist */
3241 if (!pe) {
3242 /* Go through the all entires from last to first */
3243 tid = mvpp2_prs_tcam_first_free(port->priv,
3244 MVPP2_PE_LAST_FREE_TID,
3245 MVPP2_PE_FIRST_FREE_TID);
3246 if (tid < 0)
3247 return tid;
3248
3249 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3250 if (!pe)
3251 return -ENOMEM;
3252
3253 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
3254 pe->index = tid;
3255
3256 /* Set flow ID*/
3257 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
3258 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
3259
3260 /* Update shadow table */
3261 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
3262 }
3263
3264 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
3265 mvpp2_prs_hw_write(port->priv, pe);
3266 kfree(pe);
3267
3268 return 0;
3269}
3270
3271/* Classifier configuration routines */
3272
3273/* Update classification flow table registers */
3274static void mvpp2_cls_flow_write(struct mvpp2 *priv,
3275 struct mvpp2_cls_flow_entry *fe)
3276{
3277 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
3278 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
3279 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
3280 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
3281}
3282
3283/* Update classification lookup table register */
3284static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
3285 struct mvpp2_cls_lookup_entry *le)
3286{
3287 u32 val;
3288
3289 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
3290 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
3291 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
3292}
3293
3294/* Classifier default initialization */
3295static void mvpp2_cls_init(struct mvpp2 *priv)
3296{
3297 struct mvpp2_cls_lookup_entry le;
3298 struct mvpp2_cls_flow_entry fe;
3299 int index;
3300
3301 /* Enable classifier */
3302 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
3303
3304 /* Clear classifier flow table */
e8f967c3 3305 memset(&fe.data, 0, sizeof(fe.data));
3f518509
MW
3306 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
3307 fe.index = index;
3308 mvpp2_cls_flow_write(priv, &fe);
3309 }
3310
3311 /* Clear classifier lookup table */
3312 le.data = 0;
3313 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
3314 le.lkpid = index;
3315 le.way = 0;
3316 mvpp2_cls_lookup_write(priv, &le);
3317
3318 le.way = 1;
3319 mvpp2_cls_lookup_write(priv, &le);
3320 }
3321}
3322
3323static void mvpp2_cls_port_config(struct mvpp2_port *port)
3324{
3325 struct mvpp2_cls_lookup_entry le;
3326 u32 val;
3327
3328 /* Set way for the port */
3329 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
3330 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
3331 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
3332
3333 /* Pick the entry to be accessed in lookup ID decoding table
3334 * according to the way and lkpid.
3335 */
3336 le.lkpid = port->id;
3337 le.way = 0;
3338 le.data = 0;
3339
3340 /* Set initial CPU queue for receiving packets */
3341 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
3342 le.data |= port->first_rxq;
3343
3344 /* Disable classification engines */
3345 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
3346
3347 /* Update lookup ID table entry */
3348 mvpp2_cls_lookup_write(port->priv, &le);
3349}
3350
3351/* Set CPU queue number for oversize packets */
3352static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
3353{
3354 u32 val;
3355
3356 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
3357 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
3358
3359 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
3360 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
3361
3362 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
3363 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
3364 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
3365}
3366
3367/* Buffer Manager configuration routines */
3368
3369/* Create pool */
3370static int mvpp2_bm_pool_create(struct platform_device *pdev,
3371 struct mvpp2 *priv,
3372 struct mvpp2_bm_pool *bm_pool, int size)
3373{
3374 int size_bytes;
3375 u32 val;
3376
3377 size_bytes = sizeof(u32) * size;
3378 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
3379 &bm_pool->phys_addr,
3380 GFP_KERNEL);
3381 if (!bm_pool->virt_addr)
3382 return -ENOMEM;
3383
3384 if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVPP2_BM_POOL_PTR_ALIGN)) {
3385 dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
3386 bm_pool->phys_addr);
3387 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
3388 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
3389 return -ENOMEM;
3390 }
3391
3392 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
3393 bm_pool->phys_addr);
3394 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
3395
3396 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3397 val |= MVPP2_BM_START_MASK;
3398 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3399
3400 bm_pool->type = MVPP2_BM_FREE;
3401 bm_pool->size = size;
3402 bm_pool->pkt_size = 0;
3403 bm_pool->buf_num = 0;
3404 atomic_set(&bm_pool->in_use, 0);
3f518509
MW
3405
3406 return 0;
3407}
3408
3409/* Set pool buffer size */
3410static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
3411 struct mvpp2_bm_pool *bm_pool,
3412 int buf_size)
3413{
3414 u32 val;
3415
3416 bm_pool->buf_size = buf_size;
3417
3418 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
3419 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
3420}
3421
7861f12b 3422/* Free all buffers from the pool */
4229d502
MW
3423static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
3424 struct mvpp2_bm_pool *bm_pool)
3f518509
MW
3425{
3426 int i;
3427
7861f12b 3428 for (i = 0; i < bm_pool->buf_num; i++) {
4229d502 3429 dma_addr_t buf_phys_addr;
3f518509
MW
3430 u32 vaddr;
3431
dbedd44e 3432 /* Get buffer virtual address (indirect access) */
4229d502
MW
3433 buf_phys_addr = mvpp2_read(priv,
3434 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
3f518509 3435 vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
4229d502
MW
3436
3437 dma_unmap_single(dev, buf_phys_addr,
3438 bm_pool->buf_size, DMA_FROM_DEVICE);
3439
3f518509
MW
3440 if (!vaddr)
3441 break;
3442 dev_kfree_skb_any((struct sk_buff *)vaddr);
3443 }
3444
3445 /* Update BM driver with number of buffers removed from pool */
3446 bm_pool->buf_num -= i;
3f518509
MW
3447}
3448
3449/* Cleanup pool */
3450static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
3451 struct mvpp2 *priv,
3452 struct mvpp2_bm_pool *bm_pool)
3453{
3f518509
MW
3454 u32 val;
3455
4229d502 3456 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
d74c96c1 3457 if (bm_pool->buf_num) {
3f518509
MW
3458 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
3459 return 0;
3460 }
3461
3462 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3463 val |= MVPP2_BM_STOP_MASK;
3464 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3465
3466 dma_free_coherent(&pdev->dev, sizeof(u32) * bm_pool->size,
3467 bm_pool->virt_addr,
3468 bm_pool->phys_addr);
3469 return 0;
3470}
3471
3472static int mvpp2_bm_pools_init(struct platform_device *pdev,
3473 struct mvpp2 *priv)
3474{
3475 int i, err, size;
3476 struct mvpp2_bm_pool *bm_pool;
3477
3478 /* Create all pools with maximum size */
3479 size = MVPP2_BM_POOL_SIZE_MAX;
3480 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3481 bm_pool = &priv->bm_pools[i];
3482 bm_pool->id = i;
3483 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
3484 if (err)
3485 goto err_unroll_pools;
3486 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
3487 }
3488 return 0;
3489
3490err_unroll_pools:
3491 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
3492 for (i = i - 1; i >= 0; i--)
3493 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
3494 return err;
3495}
3496
3497static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
3498{
3499 int i, err;
3500
3501 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3502 /* Mask BM all interrupts */
3503 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
3504 /* Clear BM cause register */
3505 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
3506 }
3507
3508 /* Allocate and initialize BM pools */
3509 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
3510 sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
3511 if (!priv->bm_pools)
3512 return -ENOMEM;
3513
3514 err = mvpp2_bm_pools_init(pdev, priv);
3515 if (err < 0)
3516 return err;
3517 return 0;
3518}
3519
3520/* Attach long pool to rxq */
3521static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
3522 int lrxq, int long_pool)
3523{
3524 u32 val;
3525 int prxq;
3526
3527 /* Get queue physical ID */
3528 prxq = port->rxqs[lrxq]->id;
3529
3530 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3531 val &= ~MVPP2_RXQ_POOL_LONG_MASK;
3532 val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) &
3533 MVPP2_RXQ_POOL_LONG_MASK);
3534
3535 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3536}
3537
3538/* Attach short pool to rxq */
3539static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
3540 int lrxq, int short_pool)
3541{
3542 u32 val;
3543 int prxq;
3544
3545 /* Get queue physical ID */
3546 prxq = port->rxqs[lrxq]->id;
3547
3548 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3549 val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
3550 val |= ((short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) &
3551 MVPP2_RXQ_POOL_SHORT_MASK);
3552
3553 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3554}
3555
3556/* Allocate skb for BM pool */
3557static struct sk_buff *mvpp2_skb_alloc(struct mvpp2_port *port,
3558 struct mvpp2_bm_pool *bm_pool,
3559 dma_addr_t *buf_phys_addr,
3560 gfp_t gfp_mask)
3561{
3562 struct sk_buff *skb;
3563 dma_addr_t phys_addr;
3564
3565 skb = __dev_alloc_skb(bm_pool->pkt_size, gfp_mask);
3566 if (!skb)
3567 return NULL;
3568
3569 phys_addr = dma_map_single(port->dev->dev.parent, skb->head,
3570 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
3571 DMA_FROM_DEVICE);
3572 if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) {
3573 dev_kfree_skb_any(skb);
3574 return NULL;
3575 }
3576 *buf_phys_addr = phys_addr;
3577
3578 return skb;
3579}
3580
3581/* Set pool number in a BM cookie */
3582static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
3583{
3584 u32 bm;
3585
3586 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
3587 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
3588
3589 return bm;
3590}
3591
3592/* Get pool number from a BM cookie */
3593static inline int mvpp2_bm_cookie_pool_get(u32 cookie)
3594{
3595 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
3596}
3597
3598/* Release buffer to BM */
3599static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
3600 u32 buf_phys_addr, u32 buf_virt_addr)
3601{
3602 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr);
3603 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr);
3604}
3605
3606/* Release multicast buffer */
3607static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool,
3608 u32 buf_phys_addr, u32 buf_virt_addr,
3609 int mc_id)
3610{
3611 u32 val = 0;
3612
3613 val |= (mc_id & MVPP2_BM_MC_ID_MASK);
3614 mvpp2_write(port->priv, MVPP2_BM_MC_RLS_REG, val);
3615
3616 mvpp2_bm_pool_put(port, pool,
3617 buf_phys_addr | MVPP2_BM_PHY_RLS_MC_BUFF_MASK,
3618 buf_virt_addr);
3619}
3620
3621/* Refill BM pool */
3622static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
3623 u32 phys_addr, u32 cookie)
3624{
3625 int pool = mvpp2_bm_cookie_pool_get(bm);
3626
3627 mvpp2_bm_pool_put(port, pool, phys_addr, cookie);
3628}
3629
3630/* Allocate buffers for the pool */
3631static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
3632 struct mvpp2_bm_pool *bm_pool, int buf_num)
3633{
3634 struct sk_buff *skb;
3635 int i, buf_size, total_size;
3636 u32 bm;
3637 dma_addr_t phys_addr;
3638
3639 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
3640 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
3641
3642 if (buf_num < 0 ||
3643 (buf_num + bm_pool->buf_num > bm_pool->size)) {
3644 netdev_err(port->dev,
3645 "cannot allocate %d buffers for pool %d\n",
3646 buf_num, bm_pool->id);
3647 return 0;
3648 }
3649
3650 bm = mvpp2_bm_cookie_pool_set(0, bm_pool->id);
3651 for (i = 0; i < buf_num; i++) {
3652 skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_KERNEL);
3653 if (!skb)
3654 break;
3655
3656 mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
3657 }
3658
3659 /* Update BM driver with number of buffers added to pool */
3660 bm_pool->buf_num += i;
3661 bm_pool->in_use_thresh = bm_pool->buf_num / 4;
3662
3663 netdev_dbg(port->dev,
3664 "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
3665 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3666 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
3667
3668 netdev_dbg(port->dev,
3669 "%s pool %d: %d of %d buffers added\n",
3670 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3671 bm_pool->id, i, buf_num);
3672 return i;
3673}
3674
3675/* Notify the driver that BM pool is being used as specific type and return the
3676 * pool pointer on success
3677 */
3678static struct mvpp2_bm_pool *
3679mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3680 int pkt_size)
3681{
3f518509
MW
3682 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
3683 int num;
3684
3685 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
3686 netdev_err(port->dev, "mixing pool types is forbidden\n");
3687 return NULL;
3688 }
3689
3f518509
MW
3690 if (new_pool->type == MVPP2_BM_FREE)
3691 new_pool->type = type;
3692
3693 /* Allocate buffers in case BM pool is used as long pool, but packet
3694 * size doesn't match MTU or BM pool hasn't being used yet
3695 */
3696 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
3697 (new_pool->pkt_size == 0)) {
3698 int pkts_num;
3699
3700 /* Set default buffer number or free all the buffers in case
3701 * the pool is not empty
3702 */
3703 pkts_num = new_pool->buf_num;
3704 if (pkts_num == 0)
3705 pkts_num = type == MVPP2_BM_SWF_LONG ?
3706 MVPP2_BM_LONG_BUF_NUM :
3707 MVPP2_BM_SHORT_BUF_NUM;
3708 else
4229d502
MW
3709 mvpp2_bm_bufs_free(port->dev->dev.parent,
3710 port->priv, new_pool);
3f518509
MW
3711
3712 new_pool->pkt_size = pkt_size;
3713
3714 /* Allocate buffers for this pool */
3715 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
3716 if (num != pkts_num) {
3717 WARN(1, "pool %d: %d of %d allocated\n",
3718 new_pool->id, num, pkts_num);
3f518509
MW
3719 return NULL;
3720 }
3721 }
3722
3723 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
3724 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
3725
3f518509
MW
3726 return new_pool;
3727}
3728
3729/* Initialize pools for swf */
3730static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
3731{
3f518509
MW
3732 int rxq;
3733
3734 if (!port->pool_long) {
3735 port->pool_long =
3736 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
3737 MVPP2_BM_SWF_LONG,
3738 port->pkt_size);
3739 if (!port->pool_long)
3740 return -ENOMEM;
3741
3f518509 3742 port->pool_long->port_map |= (1 << port->id);
3f518509
MW
3743
3744 for (rxq = 0; rxq < rxq_number; rxq++)
3745 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
3746 }
3747
3748 if (!port->pool_short) {
3749 port->pool_short =
3750 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
3751 MVPP2_BM_SWF_SHORT,
3752 MVPP2_BM_SHORT_PKT_SIZE);
3753 if (!port->pool_short)
3754 return -ENOMEM;
3755
3f518509 3756 port->pool_short->port_map |= (1 << port->id);
3f518509
MW
3757
3758 for (rxq = 0; rxq < rxq_number; rxq++)
3759 mvpp2_rxq_short_pool_set(port, rxq,
3760 port->pool_short->id);
3761 }
3762
3763 return 0;
3764}
3765
3766static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
3767{
3768 struct mvpp2_port *port = netdev_priv(dev);
3769 struct mvpp2_bm_pool *port_pool = port->pool_long;
3770 int num, pkts_num = port_pool->buf_num;
3771 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
3772
3773 /* Update BM pool with new buffer size */
4229d502 3774 mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
d74c96c1 3775 if (port_pool->buf_num) {
3f518509
MW
3776 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
3777 return -EIO;
3778 }
3779
3780 port_pool->pkt_size = pkt_size;
3781 num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
3782 if (num != pkts_num) {
3783 WARN(1, "pool %d: %d of %d allocated\n",
3784 port_pool->id, num, pkts_num);
3785 return -EIO;
3786 }
3787
3788 mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
3789 MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
3790 dev->mtu = mtu;
3791 netdev_update_features(dev);
3792 return 0;
3793}
3794
3795static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
3796{
3797 int cpu, cpu_mask = 0;
3798
3799 for_each_present_cpu(cpu)
3800 cpu_mask |= 1 << cpu;
3801 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
3802 MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
3803}
3804
3805static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
3806{
3807 int cpu, cpu_mask = 0;
3808
3809 for_each_present_cpu(cpu)
3810 cpu_mask |= 1 << cpu;
3811 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
3812 MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
3813}
3814
3815/* Mask the current CPU's Rx/Tx interrupts */
3816static void mvpp2_interrupts_mask(void *arg)
3817{
3818 struct mvpp2_port *port = arg;
3819
3820 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
3821}
3822
3823/* Unmask the current CPU's Rx/Tx interrupts */
3824static void mvpp2_interrupts_unmask(void *arg)
3825{
3826 struct mvpp2_port *port = arg;
3827
3828 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
3829 (MVPP2_CAUSE_MISC_SUM_MASK |
3f518509
MW
3830 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
3831}
3832
3833/* Port configuration routines */
3834
3835static void mvpp2_port_mii_set(struct mvpp2_port *port)
3836{
08a23755 3837 u32 val;
3f518509 3838
08a23755
MW
3839 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
3840
3841 switch (port->phy_interface) {
3842 case PHY_INTERFACE_MODE_SGMII:
3843 val |= MVPP2_GMAC_INBAND_AN_MASK;
3844 break;
3845 case PHY_INTERFACE_MODE_RGMII:
3846 val |= MVPP2_GMAC_PORT_RGMII_MASK;
3847 default:
3848 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
3849 }
3850
3851 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3852}
3f518509 3853
08a23755
MW
3854static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
3855{
3856 u32 val;
3857
3858 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3859 val |= MVPP2_GMAC_FC_ADV_EN;
3860 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3f518509
MW
3861}
3862
3863static void mvpp2_port_enable(struct mvpp2_port *port)
3864{
3865 u32 val;
3866
3867 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3868 val |= MVPP2_GMAC_PORT_EN_MASK;
3869 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
3870 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3871}
3872
3873static void mvpp2_port_disable(struct mvpp2_port *port)
3874{
3875 u32 val;
3876
3877 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3878 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
3879 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3880}
3881
3882/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
3883static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
3884{
3885 u32 val;
3886
3887 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
3888 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
3889 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3890}
3891
3892/* Configure loopback port */
3893static void mvpp2_port_loopback_set(struct mvpp2_port *port)
3894{
3895 u32 val;
3896
3897 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
3898
3899 if (port->speed == 1000)
3900 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
3901 else
3902 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
3903
3904 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
3905 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
3906 else
3907 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
3908
3909 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3910}
3911
3912static void mvpp2_port_reset(struct mvpp2_port *port)
3913{
3914 u32 val;
3915
3916 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
3917 ~MVPP2_GMAC_PORT_RESET_MASK;
3918 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3919
3920 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
3921 MVPP2_GMAC_PORT_RESET_MASK)
3922 continue;
3923}
3924
3925/* Change maximum receive size of the port */
3926static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
3927{
3928 u32 val;
3929
3930 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3931 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
3932 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
3933 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
3934 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3935}
3936
3937/* Set defaults to the MVPP2 port */
3938static void mvpp2_defaults_set(struct mvpp2_port *port)
3939{
3940 int tx_port_num, val, queue, ptxq, lrxq;
3941
3942 /* Configure port to loopback if needed */
3943 if (port->flags & MVPP2_F_LOOPBACK)
3944 mvpp2_port_loopback_set(port);
3945
3946 /* Update TX FIFO MIN Threshold */
3947 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3948 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3949 /* Min. TX threshold must be less than minimal packet length */
3950 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
3951 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3952
3953 /* Disable Legacy WRR, Disable EJP, Release from reset */
3954 tx_port_num = mvpp2_egress_port(port);
3955 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3956 tx_port_num);
3957 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
3958
3959 /* Close bandwidth for all queues */
3960 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
3961 ptxq = mvpp2_txq_phys(port->id, queue);
3962 mvpp2_write(port->priv,
3963 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
3964 }
3965
3966 /* Set refill period to 1 usec, refill tokens
3967 * and bucket size to maximum
3968 */
3969 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
3970 port->priv->tclk / USEC_PER_SEC);
3971 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
3972 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
3973 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
3974 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
3975 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
3976 val = MVPP2_TXP_TOKEN_SIZE_MAX;
3977 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3978
3979 /* Set MaximumLowLatencyPacketSize value to 256 */
3980 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
3981 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
3982 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
3983
3984 /* Enable Rx cache snoop */
3985 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3986 queue = port->rxqs[lrxq]->id;
3987 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3988 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
3989 MVPP2_SNOOP_BUF_HDR_MASK;
3990 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3991 }
3992
3993 /* At default, mask all interrupts to all present cpus */
3994 mvpp2_interrupts_disable(port);
3995}
3996
3997/* Enable/disable receiving packets */
3998static void mvpp2_ingress_enable(struct mvpp2_port *port)
3999{
4000 u32 val;
4001 int lrxq, queue;
4002
4003 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
4004 queue = port->rxqs[lrxq]->id;
4005 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4006 val &= ~MVPP2_RXQ_DISABLE_MASK;
4007 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4008 }
4009}
4010
4011static void mvpp2_ingress_disable(struct mvpp2_port *port)
4012{
4013 u32 val;
4014 int lrxq, queue;
4015
4016 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
4017 queue = port->rxqs[lrxq]->id;
4018 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4019 val |= MVPP2_RXQ_DISABLE_MASK;
4020 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4021 }
4022}
4023
4024/* Enable transmit via physical egress queue
4025 * - HW starts take descriptors from DRAM
4026 */
4027static void mvpp2_egress_enable(struct mvpp2_port *port)
4028{
4029 u32 qmap;
4030 int queue;
4031 int tx_port_num = mvpp2_egress_port(port);
4032
4033 /* Enable all initialized TXs. */
4034 qmap = 0;
4035 for (queue = 0; queue < txq_number; queue++) {
4036 struct mvpp2_tx_queue *txq = port->txqs[queue];
4037
4038 if (txq->descs != NULL)
4039 qmap |= (1 << queue);
4040 }
4041
4042 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4043 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
4044}
4045
4046/* Disable transmit via physical egress queue
4047 * - HW doesn't take descriptors from DRAM
4048 */
4049static void mvpp2_egress_disable(struct mvpp2_port *port)
4050{
4051 u32 reg_data;
4052 int delay;
4053 int tx_port_num = mvpp2_egress_port(port);
4054
4055 /* Issue stop command for active channels only */
4056 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4057 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
4058 MVPP2_TXP_SCHED_ENQ_MASK;
4059 if (reg_data != 0)
4060 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
4061 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
4062
4063 /* Wait for all Tx activity to terminate. */
4064 delay = 0;
4065 do {
4066 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
4067 netdev_warn(port->dev,
4068 "Tx stop timed out, status=0x%08x\n",
4069 reg_data);
4070 break;
4071 }
4072 mdelay(1);
4073 delay++;
4074
4075 /* Check port TX Command register that all
4076 * Tx queues are stopped
4077 */
4078 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
4079 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
4080}
4081
4082/* Rx descriptors helper methods */
4083
4084/* Get number of Rx descriptors occupied by received packets */
4085static inline int
4086mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
4087{
4088 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
4089
4090 return val & MVPP2_RXQ_OCCUPIED_MASK;
4091}
4092
4093/* Update Rx queue status with the number of occupied and available
4094 * Rx descriptor slots.
4095 */
4096static inline void
4097mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
4098 int used_count, int free_count)
4099{
4100 /* Decrement the number of used descriptors and increment count
4101 * increment the number of free descriptors.
4102 */
4103 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
4104
4105 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
4106}
4107
4108/* Get pointer to next RX descriptor to be processed by SW */
4109static inline struct mvpp2_rx_desc *
4110mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
4111{
4112 int rx_desc = rxq->next_desc_to_proc;
4113
4114 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
4115 prefetch(rxq->descs + rxq->next_desc_to_proc);
4116 return rxq->descs + rx_desc;
4117}
4118
4119/* Set rx queue offset */
4120static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
4121 int prxq, int offset)
4122{
4123 u32 val;
4124
4125 /* Convert offset from bytes to units of 32 bytes */
4126 offset = offset >> 5;
4127
4128 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4129 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
4130
4131 /* Offset is in */
4132 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
4133 MVPP2_RXQ_PACKET_OFFSET_MASK);
4134
4135 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4136}
4137
4138/* Obtain BM cookie information from descriptor */
4139static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
4140{
4141 int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >>
4142 MVPP2_RXD_BM_POOL_ID_OFFS;
4143 int cpu = smp_processor_id();
4144
4145 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
4146 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
4147}
4148
4149/* Tx descriptors helper methods */
4150
4151/* Get number of Tx descriptors waiting to be transmitted by HW */
4152static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
4153 struct mvpp2_tx_queue *txq)
4154{
4155 u32 val;
4156
4157 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4158 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
4159
4160 return val & MVPP2_TXQ_PENDING_MASK;
4161}
4162
4163/* Get pointer to next Tx descriptor to be processed (send) by HW */
4164static struct mvpp2_tx_desc *
4165mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
4166{
4167 int tx_desc = txq->next_desc_to_proc;
4168
4169 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
4170 return txq->descs + tx_desc;
4171}
4172
4173/* Update HW with number of aggregated Tx descriptors to be sent */
4174static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
4175{
4176 /* aggregated access - relevant TXQ number is written in TX desc */
4177 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
4178}
4179
4180
4181/* Check if there are enough free descriptors in aggregated txq.
4182 * If not, update the number of occupied descriptors and repeat the check.
4183 */
4184static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
4185 struct mvpp2_tx_queue *aggr_txq, int num)
4186{
4187 if ((aggr_txq->count + num) > aggr_txq->size) {
4188 /* Update number of occupied aggregated Tx descriptors */
4189 int cpu = smp_processor_id();
4190 u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
4191
4192 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
4193 }
4194
4195 if ((aggr_txq->count + num) > aggr_txq->size)
4196 return -ENOMEM;
4197
4198 return 0;
4199}
4200
4201/* Reserved Tx descriptors allocation request */
4202static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
4203 struct mvpp2_tx_queue *txq, int num)
4204{
4205 u32 val;
4206
4207 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
4208 mvpp2_write(priv, MVPP2_TXQ_RSVD_REQ_REG, val);
4209
4210 val = mvpp2_read(priv, MVPP2_TXQ_RSVD_RSLT_REG);
4211
4212 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
4213}
4214
4215/* Check if there are enough reserved descriptors for transmission.
4216 * If not, request chunk of reserved descriptors and check again.
4217 */
4218static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
4219 struct mvpp2_tx_queue *txq,
4220 struct mvpp2_txq_pcpu *txq_pcpu,
4221 int num)
4222{
4223 int req, cpu, desc_count;
4224
4225 if (txq_pcpu->reserved_num >= num)
4226 return 0;
4227
4228 /* Not enough descriptors reserved! Update the reserved descriptor
4229 * count and check again.
4230 */
4231
4232 desc_count = 0;
4233 /* Compute total of used descriptors */
4234 for_each_present_cpu(cpu) {
4235 struct mvpp2_txq_pcpu *txq_pcpu_aux;
4236
4237 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
4238 desc_count += txq_pcpu_aux->count;
4239 desc_count += txq_pcpu_aux->reserved_num;
4240 }
4241
4242 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
4243 desc_count += req;
4244
4245 if (desc_count >
4246 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
4247 return -ENOMEM;
4248
4249 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
4250
4251 /* OK, the descriptor cound has been updated: check again. */
4252 if (txq_pcpu->reserved_num < num)
4253 return -ENOMEM;
4254 return 0;
4255}
4256
4257/* Release the last allocated Tx descriptor. Useful to handle DMA
4258 * mapping failures in the Tx path.
4259 */
4260static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
4261{
4262 if (txq->next_desc_to_proc == 0)
4263 txq->next_desc_to_proc = txq->last_desc - 1;
4264 else
4265 txq->next_desc_to_proc--;
4266}
4267
4268/* Set Tx descriptors fields relevant for CSUM calculation */
4269static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
4270 int ip_hdr_len, int l4_proto)
4271{
4272 u32 command;
4273
4274 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
4275 * G_L4_chk, L4_type required only for checksum calculation
4276 */
4277 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
4278 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
4279 command |= MVPP2_TXD_IP_CSUM_DISABLE;
4280
4281 if (l3_proto == swab16(ETH_P_IP)) {
4282 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
4283 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
4284 } else {
4285 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
4286 }
4287
4288 if (l4_proto == IPPROTO_TCP) {
4289 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
4290 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4291 } else if (l4_proto == IPPROTO_UDP) {
4292 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
4293 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4294 } else {
4295 command |= MVPP2_TXD_L4_CSUM_NOT;
4296 }
4297
4298 return command;
4299}
4300
4301/* Get number of sent descriptors and decrement counter.
4302 * The number of sent descriptors is returned.
4303 * Per-CPU access
4304 */
4305static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
4306 struct mvpp2_tx_queue *txq)
4307{
4308 u32 val;
4309
4310 /* Reading status reg resets transmitted descriptor counter */
4311 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
4312
4313 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
4314 MVPP2_TRANSMITTED_COUNT_OFFSET;
4315}
4316
4317static void mvpp2_txq_sent_counter_clear(void *arg)
4318{
4319 struct mvpp2_port *port = arg;
4320 int queue;
4321
4322 for (queue = 0; queue < txq_number; queue++) {
4323 int id = port->txqs[queue]->id;
4324
4325 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
4326 }
4327}
4328
4329/* Set max sizes for Tx queues */
4330static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
4331{
4332 u32 val, size, mtu;
4333 int txq, tx_port_num;
4334
4335 mtu = port->pkt_size * 8;
4336 if (mtu > MVPP2_TXP_MTU_MAX)
4337 mtu = MVPP2_TXP_MTU_MAX;
4338
4339 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
4340 mtu = 3 * mtu;
4341
4342 /* Indirect access to registers */
4343 tx_port_num = mvpp2_egress_port(port);
4344 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4345
4346 /* Set MTU */
4347 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
4348 val &= ~MVPP2_TXP_MTU_MAX;
4349 val |= mtu;
4350 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
4351
4352 /* TXP token size and all TXQs token size must be larger that MTU */
4353 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
4354 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
4355 if (size < mtu) {
4356 size = mtu;
4357 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
4358 val |= size;
4359 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4360 }
4361
4362 for (txq = 0; txq < txq_number; txq++) {
4363 val = mvpp2_read(port->priv,
4364 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
4365 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
4366
4367 if (size < mtu) {
4368 size = mtu;
4369 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
4370 val |= size;
4371 mvpp2_write(port->priv,
4372 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
4373 val);
4374 }
4375 }
4376}
4377
4378/* Set the number of packets that will be received before Rx interrupt
4379 * will be generated by HW.
4380 */
4381static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
d63f9e41 4382 struct mvpp2_rx_queue *rxq)
3f518509
MW
4383{
4384 u32 val;
4385
d63f9e41 4386 val = (rxq->pkts_coal & MVPP2_OCCUPIED_THRESH_MASK);
3f518509
MW
4387 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4388 mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val);
3f518509
MW
4389}
4390
4391/* Set the time delay in usec before Rx interrupt */
4392static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
d63f9e41 4393 struct mvpp2_rx_queue *rxq)
3f518509
MW
4394{
4395 u32 val;
4396
d63f9e41 4397 val = (port->priv->tclk / USEC_PER_SEC) * rxq->time_coal;
3f518509 4398 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
3f518509
MW
4399}
4400
3f518509
MW
4401/* Free Tx queue skbuffs */
4402static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4403 struct mvpp2_tx_queue *txq,
4404 struct mvpp2_txq_pcpu *txq_pcpu, int num)
4405{
4406 int i;
4407
4408 for (i = 0; i < num; i++) {
8354491c
TP
4409 struct mvpp2_txq_pcpu_buf *tx_buf =
4410 txq_pcpu->buffs + txq_pcpu->txq_get_index;
3f518509
MW
4411
4412 mvpp2_txq_inc_get(txq_pcpu);
4413
8354491c
TP
4414 dma_unmap_single(port->dev->dev.parent, tx_buf->phys,
4415 tx_buf->size, DMA_TO_DEVICE);
4416 if (!tx_buf->skb)
e864b4c7 4417 continue;
8354491c 4418 dev_kfree_skb_any(tx_buf->skb);
3f518509
MW
4419 }
4420}
4421
4422static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4423 u32 cause)
4424{
4425 int queue = fls(cause) - 1;
4426
4427 return port->rxqs[queue];
4428}
4429
4430static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4431 u32 cause)
4432{
edc660fa 4433 int queue = fls(cause) - 1;
3f518509
MW
4434
4435 return port->txqs[queue];
4436}
4437
4438/* Handle end of transmission */
4439static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4440 struct mvpp2_txq_pcpu *txq_pcpu)
4441{
4442 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
4443 int tx_done;
4444
4445 if (txq_pcpu->cpu != smp_processor_id())
4446 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
4447
4448 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
4449 if (!tx_done)
4450 return;
4451 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
4452
4453 txq_pcpu->count -= tx_done;
4454
4455 if (netif_tx_queue_stopped(nq))
4456 if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
4457 netif_tx_wake_queue(nq);
4458}
4459
edc660fa
MW
4460static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
4461{
4462 struct mvpp2_tx_queue *txq;
4463 struct mvpp2_txq_pcpu *txq_pcpu;
4464 unsigned int tx_todo = 0;
4465
4466 while (cause) {
4467 txq = mvpp2_get_tx_queue(port, cause);
4468 if (!txq)
4469 break;
4470
4471 txq_pcpu = this_cpu_ptr(txq->pcpu);
4472
4473 if (txq_pcpu->count) {
4474 mvpp2_txq_done(port, txq, txq_pcpu);
4475 tx_todo += txq_pcpu->count;
4476 }
4477
4478 cause &= ~(1 << txq->log_id);
4479 }
4480 return tx_todo;
4481}
4482
3f518509
MW
4483/* Rx/Tx queue initialization/cleanup methods */
4484
4485/* Allocate and initialize descriptors for aggr TXQ */
4486static int mvpp2_aggr_txq_init(struct platform_device *pdev,
4487 struct mvpp2_tx_queue *aggr_txq,
4488 int desc_num, int cpu,
4489 struct mvpp2 *priv)
4490{
4491 /* Allocate memory for TX descriptors */
4492 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
4493 desc_num * MVPP2_DESC_ALIGNED_SIZE,
4494 &aggr_txq->descs_phys, GFP_KERNEL);
4495 if (!aggr_txq->descs)
4496 return -ENOMEM;
4497
3f518509
MW
4498 aggr_txq->last_desc = aggr_txq->size - 1;
4499
4500 /* Aggr TXQ no reset WA */
4501 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
4502 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
4503
4504 /* Set Tx descriptors queue starting address */
4505 /* indirect access */
4506 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu),
4507 aggr_txq->descs_phys);
4508 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
4509
4510 return 0;
4511}
4512
4513/* Create a specified Rx queue */
4514static int mvpp2_rxq_init(struct mvpp2_port *port,
4515 struct mvpp2_rx_queue *rxq)
4516
4517{
4518 rxq->size = port->rx_ring_size;
4519
4520 /* Allocate memory for RX descriptors */
4521 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
4522 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4523 &rxq->descs_phys, GFP_KERNEL);
4524 if (!rxq->descs)
4525 return -ENOMEM;
4526
3f518509
MW
4527 rxq->last_desc = rxq->size - 1;
4528
4529 /* Zero occupied and non-occupied counters - direct access */
4530 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4531
4532 /* Set Rx descriptors queue starting address - indirect access */
4533 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4534 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys);
4535 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
4536 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
4537
4538 /* Set Offset */
4539 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
4540
4541 /* Set coalescing pkts and time */
d63f9e41
TP
4542 mvpp2_rx_pkts_coal_set(port, rxq);
4543 mvpp2_rx_time_coal_set(port, rxq);
3f518509
MW
4544
4545 /* Add number of descriptors ready for receiving packets */
4546 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
4547
4548 return 0;
4549}
4550
4551/* Push packets received by the RXQ to BM pool */
4552static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
4553 struct mvpp2_rx_queue *rxq)
4554{
4555 int rx_received, i;
4556
4557 rx_received = mvpp2_rxq_received(port, rxq->id);
4558 if (!rx_received)
4559 return;
4560
4561 for (i = 0; i < rx_received; i++) {
4562 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
4563 u32 bm = mvpp2_bm_cookie_build(rx_desc);
4564
4565 mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
4566 rx_desc->buf_cookie);
4567 }
4568 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
4569}
4570
4571/* Cleanup Rx queue */
4572static void mvpp2_rxq_deinit(struct mvpp2_port *port,
4573 struct mvpp2_rx_queue *rxq)
4574{
4575 mvpp2_rxq_drop_pkts(port, rxq);
4576
4577 if (rxq->descs)
4578 dma_free_coherent(port->dev->dev.parent,
4579 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4580 rxq->descs,
4581 rxq->descs_phys);
4582
4583 rxq->descs = NULL;
4584 rxq->last_desc = 0;
4585 rxq->next_desc_to_proc = 0;
4586 rxq->descs_phys = 0;
4587
4588 /* Clear Rx descriptors queue starting address and size;
4589 * free descriptor number
4590 */
4591 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4592 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4593 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
4594 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
4595}
4596
4597/* Create and initialize a Tx queue */
4598static int mvpp2_txq_init(struct mvpp2_port *port,
4599 struct mvpp2_tx_queue *txq)
4600{
4601 u32 val;
4602 int cpu, desc, desc_per_txq, tx_port_num;
4603 struct mvpp2_txq_pcpu *txq_pcpu;
4604
4605 txq->size = port->tx_ring_size;
4606
4607 /* Allocate memory for Tx descriptors */
4608 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
4609 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4610 &txq->descs_phys, GFP_KERNEL);
4611 if (!txq->descs)
4612 return -ENOMEM;
4613
3f518509
MW
4614 txq->last_desc = txq->size - 1;
4615
4616 /* Set Tx descriptors queue starting address - indirect access */
4617 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4618 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys);
4619 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
4620 MVPP2_TXQ_DESC_SIZE_MASK);
4621 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
4622 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
4623 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
4624 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
4625 val &= ~MVPP2_TXQ_PENDING_MASK;
4626 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
4627
4628 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
4629 * for each existing TXQ.
4630 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
4631 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
4632 */
4633 desc_per_txq = 16;
4634 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
4635 (txq->log_id * desc_per_txq);
4636
4637 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
4638 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
4639 MVPP2_PREF_BUF_THRESH(desc_per_txq/2));
4640
4641 /* WRR / EJP configuration - indirect access */
4642 tx_port_num = mvpp2_egress_port(port);
4643 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4644
4645 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
4646 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
4647 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
4648 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
4649 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
4650
4651 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
4652 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
4653 val);
4654
4655 for_each_present_cpu(cpu) {
4656 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4657 txq_pcpu->size = txq->size;
8354491c
TP
4658 txq_pcpu->buffs = kmalloc(txq_pcpu->size *
4659 sizeof(struct mvpp2_txq_pcpu_buf),
4660 GFP_KERNEL);
4661 if (!txq_pcpu->buffs)
71ce391d 4662 goto error;
3f518509
MW
4663
4664 txq_pcpu->count = 0;
4665 txq_pcpu->reserved_num = 0;
4666 txq_pcpu->txq_put_index = 0;
4667 txq_pcpu->txq_get_index = 0;
4668 }
4669
4670 return 0;
71ce391d
MW
4671
4672error:
4673 for_each_present_cpu(cpu) {
4674 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
8354491c 4675 kfree(txq_pcpu->buffs);
71ce391d
MW
4676 }
4677
4678 dma_free_coherent(port->dev->dev.parent,
4679 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4680 txq->descs, txq->descs_phys);
4681
4682 return -ENOMEM;
3f518509
MW
4683}
4684
4685/* Free allocated TXQ resources */
4686static void mvpp2_txq_deinit(struct mvpp2_port *port,
4687 struct mvpp2_tx_queue *txq)
4688{
4689 struct mvpp2_txq_pcpu *txq_pcpu;
4690 int cpu;
4691
4692 for_each_present_cpu(cpu) {
4693 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
8354491c 4694 kfree(txq_pcpu->buffs);
3f518509
MW
4695 }
4696
4697 if (txq->descs)
4698 dma_free_coherent(port->dev->dev.parent,
4699 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4700 txq->descs, txq->descs_phys);
4701
4702 txq->descs = NULL;
4703 txq->last_desc = 0;
4704 txq->next_desc_to_proc = 0;
4705 txq->descs_phys = 0;
4706
4707 /* Set minimum bandwidth for disabled TXQs */
4708 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
4709
4710 /* Set Tx descriptors queue starting address and size */
4711 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4712 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
4713 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
4714}
4715
4716/* Cleanup Tx ports */
4717static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
4718{
4719 struct mvpp2_txq_pcpu *txq_pcpu;
4720 int delay, pending, cpu;
4721 u32 val;
4722
4723 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4724 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
4725 val |= MVPP2_TXQ_DRAIN_EN_MASK;
4726 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4727
4728 /* The napi queue has been stopped so wait for all packets
4729 * to be transmitted.
4730 */
4731 delay = 0;
4732 do {
4733 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
4734 netdev_warn(port->dev,
4735 "port %d: cleaning queue %d timed out\n",
4736 port->id, txq->log_id);
4737 break;
4738 }
4739 mdelay(1);
4740 delay++;
4741
4742 pending = mvpp2_txq_pend_desc_num_get(port, txq);
4743 } while (pending);
4744
4745 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
4746 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4747
4748 for_each_present_cpu(cpu) {
4749 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4750
4751 /* Release all packets */
4752 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
4753
4754 /* Reset queue */
4755 txq_pcpu->count = 0;
4756 txq_pcpu->txq_put_index = 0;
4757 txq_pcpu->txq_get_index = 0;
4758 }
4759}
4760
4761/* Cleanup all Tx queues */
4762static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
4763{
4764 struct mvpp2_tx_queue *txq;
4765 int queue;
4766 u32 val;
4767
4768 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
4769
4770 /* Reset Tx ports and delete Tx queues */
4771 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
4772 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4773
4774 for (queue = 0; queue < txq_number; queue++) {
4775 txq = port->txqs[queue];
4776 mvpp2_txq_clean(port, txq);
4777 mvpp2_txq_deinit(port, txq);
4778 }
4779
4780 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
4781
4782 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
4783 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4784}
4785
4786/* Cleanup all Rx queues */
4787static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
4788{
4789 int queue;
4790
4791 for (queue = 0; queue < rxq_number; queue++)
4792 mvpp2_rxq_deinit(port, port->rxqs[queue]);
4793}
4794
4795/* Init all Rx queues for port */
4796static int mvpp2_setup_rxqs(struct mvpp2_port *port)
4797{
4798 int queue, err;
4799
4800 for (queue = 0; queue < rxq_number; queue++) {
4801 err = mvpp2_rxq_init(port, port->rxqs[queue]);
4802 if (err)
4803 goto err_cleanup;
4804 }
4805 return 0;
4806
4807err_cleanup:
4808 mvpp2_cleanup_rxqs(port);
4809 return err;
4810}
4811
4812/* Init all tx queues for port */
4813static int mvpp2_setup_txqs(struct mvpp2_port *port)
4814{
4815 struct mvpp2_tx_queue *txq;
4816 int queue, err;
4817
4818 for (queue = 0; queue < txq_number; queue++) {
4819 txq = port->txqs[queue];
4820 err = mvpp2_txq_init(port, txq);
4821 if (err)
4822 goto err_cleanup;
4823 }
4824
3f518509
MW
4825 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
4826 return 0;
4827
4828err_cleanup:
4829 mvpp2_cleanup_txqs(port);
4830 return err;
4831}
4832
4833/* The callback for per-port interrupt */
4834static irqreturn_t mvpp2_isr(int irq, void *dev_id)
4835{
4836 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
4837
4838 mvpp2_interrupts_disable(port);
4839
4840 napi_schedule(&port->napi);
4841
4842 return IRQ_HANDLED;
4843}
4844
4845/* Adjust link */
4846static void mvpp2_link_event(struct net_device *dev)
4847{
4848 struct mvpp2_port *port = netdev_priv(dev);
8e07269d 4849 struct phy_device *phydev = dev->phydev;
3f518509
MW
4850 int status_change = 0;
4851 u32 val;
4852
4853 if (phydev->link) {
4854 if ((port->speed != phydev->speed) ||
4855 (port->duplex != phydev->duplex)) {
4856 u32 val;
4857
4858 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4859 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
4860 MVPP2_GMAC_CONFIG_GMII_SPEED |
4861 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
4862 MVPP2_GMAC_AN_SPEED_EN |
4863 MVPP2_GMAC_AN_DUPLEX_EN);
4864
4865 if (phydev->duplex)
4866 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
4867
4868 if (phydev->speed == SPEED_1000)
4869 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
2add511e 4870 else if (phydev->speed == SPEED_100)
3f518509
MW
4871 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
4872
4873 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4874
4875 port->duplex = phydev->duplex;
4876 port->speed = phydev->speed;
4877 }
4878 }
4879
4880 if (phydev->link != port->link) {
4881 if (!phydev->link) {
4882 port->duplex = -1;
4883 port->speed = 0;
4884 }
4885
4886 port->link = phydev->link;
4887 status_change = 1;
4888 }
4889
4890 if (status_change) {
4891 if (phydev->link) {
4892 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4893 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
4894 MVPP2_GMAC_FORCE_LINK_DOWN);
4895 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4896 mvpp2_egress_enable(port);
4897 mvpp2_ingress_enable(port);
4898 } else {
4899 mvpp2_ingress_disable(port);
4900 mvpp2_egress_disable(port);
4901 }
4902 phy_print_status(phydev);
4903 }
4904}
4905
edc660fa
MW
4906static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
4907{
4908 ktime_t interval;
4909
4910 if (!port_pcpu->timer_scheduled) {
4911 port_pcpu->timer_scheduled = true;
8b0e1953 4912 interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
edc660fa
MW
4913 hrtimer_start(&port_pcpu->tx_done_timer, interval,
4914 HRTIMER_MODE_REL_PINNED);
4915 }
4916}
4917
4918static void mvpp2_tx_proc_cb(unsigned long data)
4919{
4920 struct net_device *dev = (struct net_device *)data;
4921 struct mvpp2_port *port = netdev_priv(dev);
4922 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
4923 unsigned int tx_todo, cause;
4924
4925 if (!netif_running(dev))
4926 return;
4927 port_pcpu->timer_scheduled = false;
4928
4929 /* Process all the Tx queues */
4930 cause = (1 << txq_number) - 1;
4931 tx_todo = mvpp2_tx_done(port, cause);
4932
4933 /* Set the timer in case not all the packets were processed */
4934 if (tx_todo)
4935 mvpp2_timer_set(port_pcpu);
4936}
4937
4938static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
4939{
4940 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
4941 struct mvpp2_port_pcpu,
4942 tx_done_timer);
4943
4944 tasklet_schedule(&port_pcpu->tx_done_tasklet);
4945
4946 return HRTIMER_NORESTART;
4947}
4948
3f518509
MW
4949/* Main RX/TX processing routines */
4950
4951/* Display more error info */
4952static void mvpp2_rx_error(struct mvpp2_port *port,
4953 struct mvpp2_rx_desc *rx_desc)
4954{
4955 u32 status = rx_desc->status;
4956
4957 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
4958 case MVPP2_RXD_ERR_CRC:
4959 netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n",
4960 status, rx_desc->data_size);
4961 break;
4962 case MVPP2_RXD_ERR_OVERRUN:
4963 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n",
4964 status, rx_desc->data_size);
4965 break;
4966 case MVPP2_RXD_ERR_RESOURCE:
4967 netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n",
4968 status, rx_desc->data_size);
4969 break;
4970 }
4971}
4972
4973/* Handle RX checksum offload */
4974static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
4975 struct sk_buff *skb)
4976{
4977 if (((status & MVPP2_RXD_L3_IP4) &&
4978 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
4979 (status & MVPP2_RXD_L3_IP6))
4980 if (((status & MVPP2_RXD_L4_UDP) ||
4981 (status & MVPP2_RXD_L4_TCP)) &&
4982 (status & MVPP2_RXD_L4_CSUM_OK)) {
4983 skb->csum = 0;
4984 skb->ip_summed = CHECKSUM_UNNECESSARY;
4985 return;
4986 }
4987
4988 skb->ip_summed = CHECKSUM_NONE;
4989}
4990
4991/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
4992static int mvpp2_rx_refill(struct mvpp2_port *port,
4993 struct mvpp2_bm_pool *bm_pool,
4994 u32 bm, int is_recycle)
4995{
4996 struct sk_buff *skb;
4997 dma_addr_t phys_addr;
4998
4999 if (is_recycle &&
5000 (atomic_read(&bm_pool->in_use) < bm_pool->in_use_thresh))
5001 return 0;
5002
5003 /* No recycle or too many buffers are in use, so allocate a new skb */
5004 skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC);
5005 if (!skb)
5006 return -ENOMEM;
5007
5008 mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
5009 atomic_dec(&bm_pool->in_use);
5010 return 0;
5011}
5012
5013/* Handle tx checksum */
5014static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
5015{
5016 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5017 int ip_hdr_len = 0;
5018 u8 l4_proto;
5019
5020 if (skb->protocol == htons(ETH_P_IP)) {
5021 struct iphdr *ip4h = ip_hdr(skb);
5022
5023 /* Calculate IPv4 checksum and L4 checksum */
5024 ip_hdr_len = ip4h->ihl;
5025 l4_proto = ip4h->protocol;
5026 } else if (skb->protocol == htons(ETH_P_IPV6)) {
5027 struct ipv6hdr *ip6h = ipv6_hdr(skb);
5028
5029 /* Read l4_protocol from one of IPv6 extra headers */
5030 if (skb_network_header_len(skb) > 0)
5031 ip_hdr_len = (skb_network_header_len(skb) >> 2);
5032 l4_proto = ip6h->nexthdr;
5033 } else {
5034 return MVPP2_TXD_L4_CSUM_NOT;
5035 }
5036
5037 return mvpp2_txq_desc_csum(skb_network_offset(skb),
5038 skb->protocol, ip_hdr_len, l4_proto);
5039 }
5040
5041 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
5042}
5043
5044static void mvpp2_buff_hdr_rx(struct mvpp2_port *port,
5045 struct mvpp2_rx_desc *rx_desc)
5046{
5047 struct mvpp2_buff_hdr *buff_hdr;
5048 struct sk_buff *skb;
5049 u32 rx_status = rx_desc->status;
5050 u32 buff_phys_addr;
5051 u32 buff_virt_addr;
5052 u32 buff_phys_addr_next;
5053 u32 buff_virt_addr_next;
5054 int mc_id;
5055 int pool_id;
5056
5057 pool_id = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
5058 MVPP2_RXD_BM_POOL_ID_OFFS;
5059 buff_phys_addr = rx_desc->buf_phys_addr;
5060 buff_virt_addr = rx_desc->buf_cookie;
5061
5062 do {
5063 skb = (struct sk_buff *)buff_virt_addr;
5064 buff_hdr = (struct mvpp2_buff_hdr *)skb->head;
5065
5066 mc_id = MVPP2_B_HDR_INFO_MC_ID(buff_hdr->info);
5067
5068 buff_phys_addr_next = buff_hdr->next_buff_phys_addr;
5069 buff_virt_addr_next = buff_hdr->next_buff_virt_addr;
5070
5071 /* Release buffer */
5072 mvpp2_bm_pool_mc_put(port, pool_id, buff_phys_addr,
5073 buff_virt_addr, mc_id);
5074
5075 buff_phys_addr = buff_phys_addr_next;
5076 buff_virt_addr = buff_virt_addr_next;
5077
5078 } while (!MVPP2_B_HDR_INFO_IS_LAST(buff_hdr->info));
5079}
5080
5081/* Main rx processing */
5082static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5083 struct mvpp2_rx_queue *rxq)
5084{
5085 struct net_device *dev = port->dev;
b5015854
MW
5086 int rx_received;
5087 int rx_done = 0;
3f518509
MW
5088 u32 rcvd_pkts = 0;
5089 u32 rcvd_bytes = 0;
5090
5091 /* Get number of received packets and clamp the to-do */
5092 rx_received = mvpp2_rxq_received(port, rxq->id);
5093 if (rx_todo > rx_received)
5094 rx_todo = rx_received;
5095
b5015854 5096 while (rx_done < rx_todo) {
3f518509
MW
5097 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
5098 struct mvpp2_bm_pool *bm_pool;
5099 struct sk_buff *skb;
b5015854 5100 dma_addr_t phys_addr;
3f518509
MW
5101 u32 bm, rx_status;
5102 int pool, rx_bytes, err;
5103
b5015854 5104 rx_done++;
3f518509
MW
5105 rx_status = rx_desc->status;
5106 rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
b5015854 5107 phys_addr = rx_desc->buf_phys_addr;
3f518509
MW
5108
5109 bm = mvpp2_bm_cookie_build(rx_desc);
5110 pool = mvpp2_bm_cookie_pool_get(bm);
5111 bm_pool = &port->priv->bm_pools[pool];
5112 /* Check if buffer header is used */
5113 if (rx_status & MVPP2_RXD_BUF_HDR) {
5114 mvpp2_buff_hdr_rx(port, rx_desc);
5115 continue;
5116 }
5117
5118 /* In case of an error, release the requested buffer pointer
5119 * to the Buffer Manager. This request process is controlled
5120 * by the hardware, and the information about the buffer is
5121 * comprised by the RX descriptor.
5122 */
5123 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
b5015854 5124 err_drop_frame:
3f518509
MW
5125 dev->stats.rx_errors++;
5126 mvpp2_rx_error(port, rx_desc);
b5015854 5127 /* Return the buffer to the pool */
3f518509
MW
5128 mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
5129 rx_desc->buf_cookie);
5130 continue;
5131 }
5132
5133 skb = (struct sk_buff *)rx_desc->buf_cookie;
5134
b5015854
MW
5135 err = mvpp2_rx_refill(port, bm_pool, bm, 0);
5136 if (err) {
5137 netdev_err(port->dev, "failed to refill BM pools\n");
5138 goto err_drop_frame;
5139 }
5140
5141 dma_unmap_single(dev->dev.parent, phys_addr,
4229d502
MW
5142 bm_pool->buf_size, DMA_FROM_DEVICE);
5143
3f518509
MW
5144 rcvd_pkts++;
5145 rcvd_bytes += rx_bytes;
5146 atomic_inc(&bm_pool->in_use);
5147
5148 skb_reserve(skb, MVPP2_MH_SIZE);
5149 skb_put(skb, rx_bytes);
5150 skb->protocol = eth_type_trans(skb, dev);
5151 mvpp2_rx_csum(port, rx_status, skb);
5152
5153 napi_gro_receive(&port->napi, skb);
3f518509
MW
5154 }
5155
5156 if (rcvd_pkts) {
5157 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5158
5159 u64_stats_update_begin(&stats->syncp);
5160 stats->rx_packets += rcvd_pkts;
5161 stats->rx_bytes += rcvd_bytes;
5162 u64_stats_update_end(&stats->syncp);
5163 }
5164
5165 /* Update Rx queue management counters */
5166 wmb();
b5015854 5167 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
3f518509
MW
5168
5169 return rx_todo;
5170}
5171
5172static inline void
5173tx_desc_unmap_put(struct device *dev, struct mvpp2_tx_queue *txq,
5174 struct mvpp2_tx_desc *desc)
5175{
5176 dma_unmap_single(dev, desc->buf_phys_addr,
5177 desc->data_size, DMA_TO_DEVICE);
5178 mvpp2_txq_desc_put(txq);
5179}
5180
5181/* Handle tx fragmentation processing */
5182static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
5183 struct mvpp2_tx_queue *aggr_txq,
5184 struct mvpp2_tx_queue *txq)
5185{
5186 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
5187 struct mvpp2_tx_desc *tx_desc;
5188 int i;
5189 dma_addr_t buf_phys_addr;
5190
5191 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5192 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5193 void *addr = page_address(frag->page.p) + frag->page_offset;
5194
5195 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5196 tx_desc->phys_txq = txq->id;
5197 tx_desc->data_size = frag->size;
5198
5199 buf_phys_addr = dma_map_single(port->dev->dev.parent, addr,
5200 tx_desc->data_size,
5201 DMA_TO_DEVICE);
5202 if (dma_mapping_error(port->dev->dev.parent, buf_phys_addr)) {
5203 mvpp2_txq_desc_put(txq);
5204 goto error;
5205 }
5206
5207 tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
5208 tx_desc->buf_phys_addr = buf_phys_addr & (~MVPP2_TX_DESC_ALIGN);
5209
5210 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
5211 /* Last descriptor */
5212 tx_desc->command = MVPP2_TXD_L_DESC;
71ce391d 5213 mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
3f518509
MW
5214 } else {
5215 /* Descriptor in the middle: Not First, Not Last */
5216 tx_desc->command = 0;
71ce391d 5217 mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
3f518509
MW
5218 }
5219 }
5220
5221 return 0;
5222
5223error:
5224 /* Release all descriptors that were used to map fragments of
5225 * this packet, as well as the corresponding DMA mappings
5226 */
5227 for (i = i - 1; i >= 0; i--) {
5228 tx_desc = txq->descs + i;
5229 tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
5230 }
5231
5232 return -ENOMEM;
5233}
5234
5235/* Main tx processing */
5236static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
5237{
5238 struct mvpp2_port *port = netdev_priv(dev);
5239 struct mvpp2_tx_queue *txq, *aggr_txq;
5240 struct mvpp2_txq_pcpu *txq_pcpu;
5241 struct mvpp2_tx_desc *tx_desc;
5242 dma_addr_t buf_phys_addr;
5243 int frags = 0;
5244 u16 txq_id;
5245 u32 tx_cmd;
5246
5247 txq_id = skb_get_queue_mapping(skb);
5248 txq = port->txqs[txq_id];
5249 txq_pcpu = this_cpu_ptr(txq->pcpu);
5250 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
5251
5252 frags = skb_shinfo(skb)->nr_frags + 1;
5253
5254 /* Check number of available descriptors */
5255 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
5256 mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
5257 txq_pcpu, frags)) {
5258 frags = 0;
5259 goto out;
5260 }
5261
5262 /* Get a descriptor for the first part of the packet */
5263 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5264 tx_desc->phys_txq = txq->id;
5265 tx_desc->data_size = skb_headlen(skb);
5266
5267 buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
5268 tx_desc->data_size, DMA_TO_DEVICE);
5269 if (unlikely(dma_mapping_error(dev->dev.parent, buf_phys_addr))) {
5270 mvpp2_txq_desc_put(txq);
5271 frags = 0;
5272 goto out;
5273 }
5274 tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
5275 tx_desc->buf_phys_addr = buf_phys_addr & ~MVPP2_TX_DESC_ALIGN;
5276
5277 tx_cmd = mvpp2_skb_tx_csum(port, skb);
5278
5279 if (frags == 1) {
5280 /* First and Last descriptor */
5281 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
5282 tx_desc->command = tx_cmd;
71ce391d 5283 mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
3f518509
MW
5284 } else {
5285 /* First but not Last */
5286 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
5287 tx_desc->command = tx_cmd;
71ce391d 5288 mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
3f518509
MW
5289
5290 /* Continue with other skb fragments */
5291 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
5292 tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
5293 frags = 0;
5294 goto out;
5295 }
5296 }
5297
5298 txq_pcpu->reserved_num -= frags;
5299 txq_pcpu->count += frags;
5300 aggr_txq->count += frags;
5301
5302 /* Enable transmit */
5303 wmb();
5304 mvpp2_aggr_txq_pend_desc_add(port, frags);
5305
5306 if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) {
5307 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
5308
5309 netif_tx_stop_queue(nq);
5310 }
5311out:
5312 if (frags > 0) {
5313 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5314
5315 u64_stats_update_begin(&stats->syncp);
5316 stats->tx_packets++;
5317 stats->tx_bytes += skb->len;
5318 u64_stats_update_end(&stats->syncp);
5319 } else {
5320 dev->stats.tx_dropped++;
5321 dev_kfree_skb_any(skb);
5322 }
5323
edc660fa
MW
5324 /* Finalize TX processing */
5325 if (txq_pcpu->count >= txq->done_pkts_coal)
5326 mvpp2_txq_done(port, txq, txq_pcpu);
5327
5328 /* Set the timer in case not all frags were processed */
5329 if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
5330 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5331
5332 mvpp2_timer_set(port_pcpu);
5333 }
5334
3f518509
MW
5335 return NETDEV_TX_OK;
5336}
5337
5338static inline void mvpp2_cause_error(struct net_device *dev, int cause)
5339{
5340 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
5341 netdev_err(dev, "FCS error\n");
5342 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
5343 netdev_err(dev, "rx fifo overrun error\n");
5344 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
5345 netdev_err(dev, "tx fifo underrun error\n");
5346}
5347
edc660fa 5348static int mvpp2_poll(struct napi_struct *napi, int budget)
3f518509 5349{
edc660fa
MW
5350 u32 cause_rx_tx, cause_rx, cause_misc;
5351 int rx_done = 0;
5352 struct mvpp2_port *port = netdev_priv(napi->dev);
3f518509
MW
5353
5354 /* Rx/Tx cause register
5355 *
5356 * Bits 0-15: each bit indicates received packets on the Rx queue
5357 * (bit 0 is for Rx queue 0).
5358 *
5359 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
5360 * (bit 16 is for Tx queue 0).
5361 *
5362 * Each CPU has its own Rx/Tx cause register
5363 */
5364 cause_rx_tx = mvpp2_read(port->priv,
5365 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
edc660fa 5366 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
3f518509
MW
5367 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
5368
5369 if (cause_misc) {
5370 mvpp2_cause_error(port->dev, cause_misc);
5371
5372 /* Clear the cause register */
5373 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
5374 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
5375 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
5376 }
5377
3f518509
MW
5378 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
5379
5380 /* Process RX packets */
5381 cause_rx |= port->pending_cause_rx;
5382 while (cause_rx && budget > 0) {
5383 int count;
5384 struct mvpp2_rx_queue *rxq;
5385
5386 rxq = mvpp2_get_rx_queue(port, cause_rx);
5387 if (!rxq)
5388 break;
5389
5390 count = mvpp2_rx(port, budget, rxq);
5391 rx_done += count;
5392 budget -= count;
5393 if (budget > 0) {
5394 /* Clear the bit associated to this Rx queue
5395 * so that next iteration will continue from
5396 * the next Rx queue.
5397 */
5398 cause_rx &= ~(1 << rxq->logic_rxq);
5399 }
5400 }
5401
5402 if (budget > 0) {
5403 cause_rx = 0;
6ad20165 5404 napi_complete_done(napi, rx_done);
3f518509
MW
5405
5406 mvpp2_interrupts_enable(port);
5407 }
5408 port->pending_cause_rx = cause_rx;
5409 return rx_done;
5410}
5411
5412/* Set hw internals when starting port */
5413static void mvpp2_start_dev(struct mvpp2_port *port)
5414{
8e07269d
PR
5415 struct net_device *ndev = port->dev;
5416
3f518509
MW
5417 mvpp2_gmac_max_rx_size_set(port);
5418 mvpp2_txp_max_tx_size_set(port);
5419
5420 napi_enable(&port->napi);
5421
5422 /* Enable interrupts on all CPUs */
5423 mvpp2_interrupts_enable(port);
5424
5425 mvpp2_port_enable(port);
8e07269d 5426 phy_start(ndev->phydev);
3f518509
MW
5427 netif_tx_start_all_queues(port->dev);
5428}
5429
5430/* Set hw internals when stopping port */
5431static void mvpp2_stop_dev(struct mvpp2_port *port)
5432{
8e07269d
PR
5433 struct net_device *ndev = port->dev;
5434
3f518509
MW
5435 /* Stop new packets from arriving to RXQs */
5436 mvpp2_ingress_disable(port);
5437
5438 mdelay(10);
5439
5440 /* Disable interrupts on all CPUs */
5441 mvpp2_interrupts_disable(port);
5442
5443 napi_disable(&port->napi);
5444
5445 netif_carrier_off(port->dev);
5446 netif_tx_stop_all_queues(port->dev);
5447
5448 mvpp2_egress_disable(port);
5449 mvpp2_port_disable(port);
8e07269d 5450 phy_stop(ndev->phydev);
3f518509
MW
5451}
5452
3f518509
MW
5453static int mvpp2_check_ringparam_valid(struct net_device *dev,
5454 struct ethtool_ringparam *ring)
5455{
5456 u16 new_rx_pending = ring->rx_pending;
5457 u16 new_tx_pending = ring->tx_pending;
5458
5459 if (ring->rx_pending == 0 || ring->tx_pending == 0)
5460 return -EINVAL;
5461
5462 if (ring->rx_pending > MVPP2_MAX_RXD)
5463 new_rx_pending = MVPP2_MAX_RXD;
5464 else if (!IS_ALIGNED(ring->rx_pending, 16))
5465 new_rx_pending = ALIGN(ring->rx_pending, 16);
5466
5467 if (ring->tx_pending > MVPP2_MAX_TXD)
5468 new_tx_pending = MVPP2_MAX_TXD;
5469 else if (!IS_ALIGNED(ring->tx_pending, 32))
5470 new_tx_pending = ALIGN(ring->tx_pending, 32);
5471
5472 if (ring->rx_pending != new_rx_pending) {
5473 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
5474 ring->rx_pending, new_rx_pending);
5475 ring->rx_pending = new_rx_pending;
5476 }
5477
5478 if (ring->tx_pending != new_tx_pending) {
5479 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
5480 ring->tx_pending, new_tx_pending);
5481 ring->tx_pending = new_tx_pending;
5482 }
5483
5484 return 0;
5485}
5486
5487static void mvpp2_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
5488{
5489 u32 mac_addr_l, mac_addr_m, mac_addr_h;
5490
5491 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
5492 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
5493 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
5494 addr[0] = (mac_addr_h >> 24) & 0xFF;
5495 addr[1] = (mac_addr_h >> 16) & 0xFF;
5496 addr[2] = (mac_addr_h >> 8) & 0xFF;
5497 addr[3] = mac_addr_h & 0xFF;
5498 addr[4] = mac_addr_m & 0xFF;
5499 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
5500}
5501
5502static int mvpp2_phy_connect(struct mvpp2_port *port)
5503{
5504 struct phy_device *phy_dev;
5505
5506 phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
5507 port->phy_interface);
5508 if (!phy_dev) {
5509 netdev_err(port->dev, "cannot connect to phy\n");
5510 return -ENODEV;
5511 }
5512 phy_dev->supported &= PHY_GBIT_FEATURES;
5513 phy_dev->advertising = phy_dev->supported;
5514
3f518509
MW
5515 port->link = 0;
5516 port->duplex = 0;
5517 port->speed = 0;
5518
5519 return 0;
5520}
5521
5522static void mvpp2_phy_disconnect(struct mvpp2_port *port)
5523{
8e07269d
PR
5524 struct net_device *ndev = port->dev;
5525
5526 phy_disconnect(ndev->phydev);
3f518509
MW
5527}
5528
5529static int mvpp2_open(struct net_device *dev)
5530{
5531 struct mvpp2_port *port = netdev_priv(dev);
5532 unsigned char mac_bcast[ETH_ALEN] = {
5533 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
5534 int err;
5535
5536 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
5537 if (err) {
5538 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
5539 return err;
5540 }
5541 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
5542 dev->dev_addr, true);
5543 if (err) {
5544 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
5545 return err;
5546 }
5547 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
5548 if (err) {
5549 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
5550 return err;
5551 }
5552 err = mvpp2_prs_def_flow(port);
5553 if (err) {
5554 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
5555 return err;
5556 }
5557
5558 /* Allocate the Rx/Tx queues */
5559 err = mvpp2_setup_rxqs(port);
5560 if (err) {
5561 netdev_err(port->dev, "cannot allocate Rx queues\n");
5562 return err;
5563 }
5564
5565 err = mvpp2_setup_txqs(port);
5566 if (err) {
5567 netdev_err(port->dev, "cannot allocate Tx queues\n");
5568 goto err_cleanup_rxqs;
5569 }
5570
5571 err = request_irq(port->irq, mvpp2_isr, 0, dev->name, port);
5572 if (err) {
5573 netdev_err(port->dev, "cannot request IRQ %d\n", port->irq);
5574 goto err_cleanup_txqs;
5575 }
5576
5577 /* In default link is down */
5578 netif_carrier_off(port->dev);
5579
5580 err = mvpp2_phy_connect(port);
5581 if (err < 0)
5582 goto err_free_irq;
5583
5584 /* Unmask interrupts on all CPUs */
5585 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
5586
5587 mvpp2_start_dev(port);
5588
5589 return 0;
5590
5591err_free_irq:
5592 free_irq(port->irq, port);
5593err_cleanup_txqs:
5594 mvpp2_cleanup_txqs(port);
5595err_cleanup_rxqs:
5596 mvpp2_cleanup_rxqs(port);
5597 return err;
5598}
5599
5600static int mvpp2_stop(struct net_device *dev)
5601{
5602 struct mvpp2_port *port = netdev_priv(dev);
edc660fa
MW
5603 struct mvpp2_port_pcpu *port_pcpu;
5604 int cpu;
3f518509
MW
5605
5606 mvpp2_stop_dev(port);
5607 mvpp2_phy_disconnect(port);
5608
5609 /* Mask interrupts on all CPUs */
5610 on_each_cpu(mvpp2_interrupts_mask, port, 1);
5611
5612 free_irq(port->irq, port);
edc660fa
MW
5613 for_each_present_cpu(cpu) {
5614 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
5615
5616 hrtimer_cancel(&port_pcpu->tx_done_timer);
5617 port_pcpu->timer_scheduled = false;
5618 tasklet_kill(&port_pcpu->tx_done_tasklet);
5619 }
3f518509
MW
5620 mvpp2_cleanup_rxqs(port);
5621 mvpp2_cleanup_txqs(port);
5622
5623 return 0;
5624}
5625
5626static void mvpp2_set_rx_mode(struct net_device *dev)
5627{
5628 struct mvpp2_port *port = netdev_priv(dev);
5629 struct mvpp2 *priv = port->priv;
5630 struct netdev_hw_addr *ha;
5631 int id = port->id;
5632 bool allmulti = dev->flags & IFF_ALLMULTI;
5633
5634 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
5635 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
5636 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
5637
5638 /* Remove all port->id's mcast enries */
5639 mvpp2_prs_mcast_del_all(priv, id);
5640
5641 if (allmulti && !netdev_mc_empty(dev)) {
5642 netdev_for_each_mc_addr(ha, dev)
5643 mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
5644 }
5645}
5646
5647static int mvpp2_set_mac_address(struct net_device *dev, void *p)
5648{
5649 struct mvpp2_port *port = netdev_priv(dev);
5650 const struct sockaddr *addr = p;
5651 int err;
5652
5653 if (!is_valid_ether_addr(addr->sa_data)) {
5654 err = -EADDRNOTAVAIL;
5655 goto error;
5656 }
5657
5658 if (!netif_running(dev)) {
5659 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
5660 if (!err)
5661 return 0;
5662 /* Reconfigure parser to accept the original MAC address */
5663 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
5664 if (err)
5665 goto error;
5666 }
5667
5668 mvpp2_stop_dev(port);
5669
5670 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
5671 if (!err)
5672 goto out_start;
5673
5674 /* Reconfigure parser accept the original MAC address */
5675 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
5676 if (err)
5677 goto error;
5678out_start:
5679 mvpp2_start_dev(port);
5680 mvpp2_egress_enable(port);
5681 mvpp2_ingress_enable(port);
5682 return 0;
5683
5684error:
5685 netdev_err(dev, "fail to change MAC address\n");
5686 return err;
5687}
5688
5689static int mvpp2_change_mtu(struct net_device *dev, int mtu)
5690{
5691 struct mvpp2_port *port = netdev_priv(dev);
5692 int err;
5693
5777987e
JW
5694 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
5695 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
5696 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
5697 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
3f518509
MW
5698 }
5699
5700 if (!netif_running(dev)) {
5701 err = mvpp2_bm_update_mtu(dev, mtu);
5702 if (!err) {
5703 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
5704 return 0;
5705 }
5706
5707 /* Reconfigure BM to the original MTU */
5708 err = mvpp2_bm_update_mtu(dev, dev->mtu);
5709 if (err)
5710 goto error;
5711 }
5712
5713 mvpp2_stop_dev(port);
5714
5715 err = mvpp2_bm_update_mtu(dev, mtu);
5716 if (!err) {
5717 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
5718 goto out_start;
5719 }
5720
5721 /* Reconfigure BM to the original MTU */
5722 err = mvpp2_bm_update_mtu(dev, dev->mtu);
5723 if (err)
5724 goto error;
5725
5726out_start:
5727 mvpp2_start_dev(port);
5728 mvpp2_egress_enable(port);
5729 mvpp2_ingress_enable(port);
5730
5731 return 0;
5732
5733error:
5734 netdev_err(dev, "fail to change MTU\n");
5735 return err;
5736}
5737
bc1f4470 5738static void
3f518509
MW
5739mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5740{
5741 struct mvpp2_port *port = netdev_priv(dev);
5742 unsigned int start;
5743 int cpu;
5744
5745 for_each_possible_cpu(cpu) {
5746 struct mvpp2_pcpu_stats *cpu_stats;
5747 u64 rx_packets;
5748 u64 rx_bytes;
5749 u64 tx_packets;
5750 u64 tx_bytes;
5751
5752 cpu_stats = per_cpu_ptr(port->stats, cpu);
5753 do {
5754 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
5755 rx_packets = cpu_stats->rx_packets;
5756 rx_bytes = cpu_stats->rx_bytes;
5757 tx_packets = cpu_stats->tx_packets;
5758 tx_bytes = cpu_stats->tx_bytes;
5759 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
5760
5761 stats->rx_packets += rx_packets;
5762 stats->rx_bytes += rx_bytes;
5763 stats->tx_packets += tx_packets;
5764 stats->tx_bytes += tx_bytes;
5765 }
5766
5767 stats->rx_errors = dev->stats.rx_errors;
5768 stats->rx_dropped = dev->stats.rx_dropped;
5769 stats->tx_dropped = dev->stats.tx_dropped;
3f518509
MW
5770}
5771
bd695a5f
TP
5772static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5773{
bd695a5f
TP
5774 int ret;
5775
8e07269d 5776 if (!dev->phydev)
bd695a5f
TP
5777 return -ENOTSUPP;
5778
8e07269d 5779 ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
bd695a5f
TP
5780 if (!ret)
5781 mvpp2_link_event(dev);
5782
5783 return ret;
5784}
5785
3f518509
MW
5786/* Ethtool methods */
5787
3f518509
MW
5788/* Set interrupt coalescing for ethtools */
5789static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
5790 struct ethtool_coalesce *c)
5791{
5792 struct mvpp2_port *port = netdev_priv(dev);
5793 int queue;
5794
5795 for (queue = 0; queue < rxq_number; queue++) {
5796 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
5797
5798 rxq->time_coal = c->rx_coalesce_usecs;
5799 rxq->pkts_coal = c->rx_max_coalesced_frames;
d63f9e41
TP
5800 mvpp2_rx_pkts_coal_set(port, rxq);
5801 mvpp2_rx_time_coal_set(port, rxq);
3f518509
MW
5802 }
5803
5804 for (queue = 0; queue < txq_number; queue++) {
5805 struct mvpp2_tx_queue *txq = port->txqs[queue];
5806
5807 txq->done_pkts_coal = c->tx_max_coalesced_frames;
5808 }
5809
3f518509
MW
5810 return 0;
5811}
5812
5813/* get coalescing for ethtools */
5814static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
5815 struct ethtool_coalesce *c)
5816{
5817 struct mvpp2_port *port = netdev_priv(dev);
5818
5819 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
5820 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
5821 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
5822 return 0;
5823}
5824
5825static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
5826 struct ethtool_drvinfo *drvinfo)
5827{
5828 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
5829 sizeof(drvinfo->driver));
5830 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
5831 sizeof(drvinfo->version));
5832 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
5833 sizeof(drvinfo->bus_info));
5834}
5835
5836static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
5837 struct ethtool_ringparam *ring)
5838{
5839 struct mvpp2_port *port = netdev_priv(dev);
5840
5841 ring->rx_max_pending = MVPP2_MAX_RXD;
5842 ring->tx_max_pending = MVPP2_MAX_TXD;
5843 ring->rx_pending = port->rx_ring_size;
5844 ring->tx_pending = port->tx_ring_size;
5845}
5846
5847static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
5848 struct ethtool_ringparam *ring)
5849{
5850 struct mvpp2_port *port = netdev_priv(dev);
5851 u16 prev_rx_ring_size = port->rx_ring_size;
5852 u16 prev_tx_ring_size = port->tx_ring_size;
5853 int err;
5854
5855 err = mvpp2_check_ringparam_valid(dev, ring);
5856 if (err)
5857 return err;
5858
5859 if (!netif_running(dev)) {
5860 port->rx_ring_size = ring->rx_pending;
5861 port->tx_ring_size = ring->tx_pending;
5862 return 0;
5863 }
5864
5865 /* The interface is running, so we have to force a
5866 * reallocation of the queues
5867 */
5868 mvpp2_stop_dev(port);
5869 mvpp2_cleanup_rxqs(port);
5870 mvpp2_cleanup_txqs(port);
5871
5872 port->rx_ring_size = ring->rx_pending;
5873 port->tx_ring_size = ring->tx_pending;
5874
5875 err = mvpp2_setup_rxqs(port);
5876 if (err) {
5877 /* Reallocate Rx queues with the original ring size */
5878 port->rx_ring_size = prev_rx_ring_size;
5879 ring->rx_pending = prev_rx_ring_size;
5880 err = mvpp2_setup_rxqs(port);
5881 if (err)
5882 goto err_out;
5883 }
5884 err = mvpp2_setup_txqs(port);
5885 if (err) {
5886 /* Reallocate Tx queues with the original ring size */
5887 port->tx_ring_size = prev_tx_ring_size;
5888 ring->tx_pending = prev_tx_ring_size;
5889 err = mvpp2_setup_txqs(port);
5890 if (err)
5891 goto err_clean_rxqs;
5892 }
5893
5894 mvpp2_start_dev(port);
5895 mvpp2_egress_enable(port);
5896 mvpp2_ingress_enable(port);
5897
5898 return 0;
5899
5900err_clean_rxqs:
5901 mvpp2_cleanup_rxqs(port);
5902err_out:
5903 netdev_err(dev, "fail to change ring parameters");
5904 return err;
5905}
5906
5907/* Device ops */
5908
5909static const struct net_device_ops mvpp2_netdev_ops = {
5910 .ndo_open = mvpp2_open,
5911 .ndo_stop = mvpp2_stop,
5912 .ndo_start_xmit = mvpp2_tx,
5913 .ndo_set_rx_mode = mvpp2_set_rx_mode,
5914 .ndo_set_mac_address = mvpp2_set_mac_address,
5915 .ndo_change_mtu = mvpp2_change_mtu,
5916 .ndo_get_stats64 = mvpp2_get_stats64,
bd695a5f 5917 .ndo_do_ioctl = mvpp2_ioctl,
3f518509
MW
5918};
5919
5920static const struct ethtool_ops mvpp2_eth_tool_ops = {
00606c49 5921 .nway_reset = phy_ethtool_nway_reset,
3f518509 5922 .get_link = ethtool_op_get_link,
3f518509
MW
5923 .set_coalesce = mvpp2_ethtool_set_coalesce,
5924 .get_coalesce = mvpp2_ethtool_get_coalesce,
5925 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
5926 .get_ringparam = mvpp2_ethtool_get_ringparam,
5927 .set_ringparam = mvpp2_ethtool_set_ringparam,
fb773e97
PR
5928 .get_link_ksettings = phy_ethtool_get_link_ksettings,
5929 .set_link_ksettings = phy_ethtool_set_link_ksettings,
3f518509
MW
5930};
5931
5932/* Driver initialization */
5933
5934static void mvpp2_port_power_up(struct mvpp2_port *port)
5935{
5936 mvpp2_port_mii_set(port);
5937 mvpp2_port_periodic_xon_disable(port);
08a23755 5938 mvpp2_port_fc_adv_enable(port);
3f518509
MW
5939 mvpp2_port_reset(port);
5940}
5941
5942/* Initialize port HW */
5943static int mvpp2_port_init(struct mvpp2_port *port)
5944{
5945 struct device *dev = port->dev->dev.parent;
5946 struct mvpp2 *priv = port->priv;
5947 struct mvpp2_txq_pcpu *txq_pcpu;
5948 int queue, cpu, err;
5949
5950 if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
5951 return -EINVAL;
5952
5953 /* Disable port */
5954 mvpp2_egress_disable(port);
5955 mvpp2_port_disable(port);
5956
5957 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
5958 GFP_KERNEL);
5959 if (!port->txqs)
5960 return -ENOMEM;
5961
5962 /* Associate physical Tx queues to this port and initialize.
5963 * The mapping is predefined.
5964 */
5965 for (queue = 0; queue < txq_number; queue++) {
5966 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
5967 struct mvpp2_tx_queue *txq;
5968
5969 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
177c8d1c
CJ
5970 if (!txq) {
5971 err = -ENOMEM;
5972 goto err_free_percpu;
5973 }
3f518509
MW
5974
5975 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
5976 if (!txq->pcpu) {
5977 err = -ENOMEM;
5978 goto err_free_percpu;
5979 }
5980
5981 txq->id = queue_phy_id;
5982 txq->log_id = queue;
5983 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
5984 for_each_present_cpu(cpu) {
5985 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5986 txq_pcpu->cpu = cpu;
5987 }
5988
5989 port->txqs[queue] = txq;
5990 }
5991
5992 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
5993 GFP_KERNEL);
5994 if (!port->rxqs) {
5995 err = -ENOMEM;
5996 goto err_free_percpu;
5997 }
5998
5999 /* Allocate and initialize Rx queue for this port */
6000 for (queue = 0; queue < rxq_number; queue++) {
6001 struct mvpp2_rx_queue *rxq;
6002
6003 /* Map physical Rx queue to port's logical Rx queue */
6004 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
d82b0c21
JZ
6005 if (!rxq) {
6006 err = -ENOMEM;
3f518509 6007 goto err_free_percpu;
d82b0c21 6008 }
3f518509
MW
6009 /* Map this Rx queue to a physical queue */
6010 rxq->id = port->first_rxq + queue;
6011 rxq->port = port->id;
6012 rxq->logic_rxq = queue;
6013
6014 port->rxqs[queue] = rxq;
6015 }
6016
6017 /* Configure Rx queue group interrupt for this port */
6018 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), rxq_number);
6019
6020 /* Create Rx descriptor rings */
6021 for (queue = 0; queue < rxq_number; queue++) {
6022 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
6023
6024 rxq->size = port->rx_ring_size;
6025 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
6026 rxq->time_coal = MVPP2_RX_COAL_USEC;
6027 }
6028
6029 mvpp2_ingress_disable(port);
6030
6031 /* Port default configuration */
6032 mvpp2_defaults_set(port);
6033
6034 /* Port's classifier configuration */
6035 mvpp2_cls_oversize_rxq_set(port);
6036 mvpp2_cls_port_config(port);
6037
6038 /* Provide an initial Rx packet size */
6039 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
6040
6041 /* Initialize pools for swf */
6042 err = mvpp2_swf_bm_pool_init(port);
6043 if (err)
6044 goto err_free_percpu;
6045
6046 return 0;
6047
6048err_free_percpu:
6049 for (queue = 0; queue < txq_number; queue++) {
6050 if (!port->txqs[queue])
6051 continue;
6052 free_percpu(port->txqs[queue]->pcpu);
6053 }
6054 return err;
6055}
6056
6057/* Ports initialization */
6058static int mvpp2_port_probe(struct platform_device *pdev,
6059 struct device_node *port_node,
6060 struct mvpp2 *priv,
6061 int *next_first_rxq)
6062{
6063 struct device_node *phy_node;
6064 struct mvpp2_port *port;
edc660fa 6065 struct mvpp2_port_pcpu *port_pcpu;
3f518509
MW
6066 struct net_device *dev;
6067 struct resource *res;
6068 const char *dt_mac_addr;
6069 const char *mac_from;
6070 char hw_mac_addr[ETH_ALEN];
6071 u32 id;
6072 int features;
6073 int phy_mode;
6074 int priv_common_regs_num = 2;
edc660fa 6075 int err, i, cpu;
3f518509
MW
6076
6077 dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
6078 rxq_number);
6079 if (!dev)
6080 return -ENOMEM;
6081
6082 phy_node = of_parse_phandle(port_node, "phy", 0);
6083 if (!phy_node) {
6084 dev_err(&pdev->dev, "missing phy\n");
6085 err = -ENODEV;
6086 goto err_free_netdev;
6087 }
6088
6089 phy_mode = of_get_phy_mode(port_node);
6090 if (phy_mode < 0) {
6091 dev_err(&pdev->dev, "incorrect phy mode\n");
6092 err = phy_mode;
6093 goto err_free_netdev;
6094 }
6095
6096 if (of_property_read_u32(port_node, "port-id", &id)) {
6097 err = -EINVAL;
6098 dev_err(&pdev->dev, "missing port-id value\n");
6099 goto err_free_netdev;
6100 }
6101
6102 dev->tx_queue_len = MVPP2_MAX_TXD;
6103 dev->watchdog_timeo = 5 * HZ;
6104 dev->netdev_ops = &mvpp2_netdev_ops;
6105 dev->ethtool_ops = &mvpp2_eth_tool_ops;
6106
6107 port = netdev_priv(dev);
6108
6109 port->irq = irq_of_parse_and_map(port_node, 0);
6110 if (port->irq <= 0) {
6111 err = -EINVAL;
6112 goto err_free_netdev;
6113 }
6114
6115 if (of_property_read_bool(port_node, "marvell,loopback"))
6116 port->flags |= MVPP2_F_LOOPBACK;
6117
6118 port->priv = priv;
6119 port->id = id;
6120 port->first_rxq = *next_first_rxq;
6121 port->phy_node = phy_node;
6122 port->phy_interface = phy_mode;
6123
6124 res = platform_get_resource(pdev, IORESOURCE_MEM,
6125 priv_common_regs_num + id);
6126 port->base = devm_ioremap_resource(&pdev->dev, res);
6127 if (IS_ERR(port->base)) {
6128 err = PTR_ERR(port->base);
3f518509
MW
6129 goto err_free_irq;
6130 }
6131
6132 /* Alloc per-cpu stats */
6133 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
6134 if (!port->stats) {
6135 err = -ENOMEM;
6136 goto err_free_irq;
6137 }
6138
6139 dt_mac_addr = of_get_mac_address(port_node);
6140 if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
6141 mac_from = "device tree";
6142 ether_addr_copy(dev->dev_addr, dt_mac_addr);
6143 } else {
6144 mvpp2_get_mac_address(port, hw_mac_addr);
6145 if (is_valid_ether_addr(hw_mac_addr)) {
6146 mac_from = "hardware";
6147 ether_addr_copy(dev->dev_addr, hw_mac_addr);
6148 } else {
6149 mac_from = "random";
6150 eth_hw_addr_random(dev);
6151 }
6152 }
6153
6154 port->tx_ring_size = MVPP2_MAX_TXD;
6155 port->rx_ring_size = MVPP2_MAX_RXD;
6156 port->dev = dev;
6157 SET_NETDEV_DEV(dev, &pdev->dev);
6158
6159 err = mvpp2_port_init(port);
6160 if (err < 0) {
6161 dev_err(&pdev->dev, "failed to init port %d\n", id);
6162 goto err_free_stats;
6163 }
6164 mvpp2_port_power_up(port);
6165
edc660fa
MW
6166 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6167 if (!port->pcpu) {
6168 err = -ENOMEM;
6169 goto err_free_txq_pcpu;
6170 }
6171
6172 for_each_present_cpu(cpu) {
6173 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
6174
6175 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6176 HRTIMER_MODE_REL_PINNED);
6177 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6178 port_pcpu->timer_scheduled = false;
6179
6180 tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
6181 (unsigned long)dev);
6182 }
6183
3f518509
MW
6184 netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
6185 features = NETIF_F_SG | NETIF_F_IP_CSUM;
6186 dev->features = features | NETIF_F_RXCSUM;
6187 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
6188 dev->vlan_features |= features;
6189
5777987e
JW
6190 /* MTU range: 68 - 9676 */
6191 dev->min_mtu = ETH_MIN_MTU;
6192 /* 9676 == 9700 - 20 and rounding to 8 */
6193 dev->max_mtu = 9676;
6194
3f518509
MW
6195 err = register_netdev(dev);
6196 if (err < 0) {
6197 dev_err(&pdev->dev, "failed to register netdev\n");
edc660fa 6198 goto err_free_port_pcpu;
3f518509
MW
6199 }
6200 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6201
6202 /* Increment the first Rx queue number to be used by the next port */
6203 *next_first_rxq += rxq_number;
6204 priv->port_list[id] = port;
6205 return 0;
6206
edc660fa
MW
6207err_free_port_pcpu:
6208 free_percpu(port->pcpu);
3f518509
MW
6209err_free_txq_pcpu:
6210 for (i = 0; i < txq_number; i++)
6211 free_percpu(port->txqs[i]->pcpu);
6212err_free_stats:
6213 free_percpu(port->stats);
6214err_free_irq:
6215 irq_dispose_mapping(port->irq);
6216err_free_netdev:
ccb80393 6217 of_node_put(phy_node);
3f518509
MW
6218 free_netdev(dev);
6219 return err;
6220}
6221
6222/* Ports removal routine */
6223static void mvpp2_port_remove(struct mvpp2_port *port)
6224{
6225 int i;
6226
6227 unregister_netdev(port->dev);
ccb80393 6228 of_node_put(port->phy_node);
edc660fa 6229 free_percpu(port->pcpu);
3f518509
MW
6230 free_percpu(port->stats);
6231 for (i = 0; i < txq_number; i++)
6232 free_percpu(port->txqs[i]->pcpu);
6233 irq_dispose_mapping(port->irq);
6234 free_netdev(port->dev);
6235}
6236
6237/* Initialize decoding windows */
6238static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
6239 struct mvpp2 *priv)
6240{
6241 u32 win_enable;
6242 int i;
6243
6244 for (i = 0; i < 6; i++) {
6245 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
6246 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
6247
6248 if (i < 4)
6249 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
6250 }
6251
6252 win_enable = 0;
6253
6254 for (i = 0; i < dram->num_cs; i++) {
6255 const struct mbus_dram_window *cs = dram->cs + i;
6256
6257 mvpp2_write(priv, MVPP2_WIN_BASE(i),
6258 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
6259 dram->mbus_dram_target_id);
6260
6261 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
6262 (cs->size - 1) & 0xffff0000);
6263
6264 win_enable |= (1 << i);
6265 }
6266
6267 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
6268}
6269
6270/* Initialize Rx FIFO's */
6271static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
6272{
6273 int port;
6274
6275 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
6276 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
6277 MVPP2_RX_FIFO_PORT_DATA_SIZE);
6278 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
6279 MVPP2_RX_FIFO_PORT_ATTR_SIZE);
6280 }
6281
6282 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
6283 MVPP2_RX_FIFO_PORT_MIN_PKT);
6284 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
6285}
6286
6287/* Initialize network controller common part HW */
6288static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
6289{
6290 const struct mbus_dram_target_info *dram_target_info;
6291 int err, i;
08a23755 6292 u32 val;
3f518509
MW
6293
6294 /* Checks for hardware constraints */
6295 if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) ||
6296 (txq_number > MVPP2_MAX_TXQ)) {
6297 dev_err(&pdev->dev, "invalid queue size parameter\n");
6298 return -EINVAL;
6299 }
6300
6301 /* MBUS windows configuration */
6302 dram_target_info = mv_mbus_dram_info();
6303 if (dram_target_info)
6304 mvpp2_conf_mbus_windows(dram_target_info, priv);
6305
08a23755
MW
6306 /* Disable HW PHY polling */
6307 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6308 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
6309 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6310
3f518509
MW
6311 /* Allocate and initialize aggregated TXQs */
6312 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
6313 sizeof(struct mvpp2_tx_queue),
6314 GFP_KERNEL);
6315 if (!priv->aggr_txqs)
6316 return -ENOMEM;
6317
6318 for_each_present_cpu(i) {
6319 priv->aggr_txqs[i].id = i;
6320 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
6321 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i],
6322 MVPP2_AGGR_TXQ_SIZE, i, priv);
6323 if (err < 0)
6324 return err;
6325 }
6326
6327 /* Rx Fifo Init */
6328 mvpp2_rx_fifo_init(priv);
6329
6330 /* Reset Rx queue group interrupt configuration */
6331 for (i = 0; i < MVPP2_MAX_PORTS; i++)
6332 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number);
6333
6334 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
6335 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
6336
6337 /* Allow cache snoop when transmiting packets */
6338 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
6339
6340 /* Buffer Manager initialization */
6341 err = mvpp2_bm_init(pdev, priv);
6342 if (err < 0)
6343 return err;
6344
6345 /* Parser default initialization */
6346 err = mvpp2_prs_default_init(pdev, priv);
6347 if (err < 0)
6348 return err;
6349
6350 /* Classifier default initialization */
6351 mvpp2_cls_init(priv);
6352
6353 return 0;
6354}
6355
6356static int mvpp2_probe(struct platform_device *pdev)
6357{
6358 struct device_node *dn = pdev->dev.of_node;
6359 struct device_node *port_node;
6360 struct mvpp2 *priv;
6361 struct resource *res;
6362 int port_count, first_rxq;
6363 int err;
6364
6365 priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL);
6366 if (!priv)
6367 return -ENOMEM;
6368
6369 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
6370 priv->base = devm_ioremap_resource(&pdev->dev, res);
6371 if (IS_ERR(priv->base))
6372 return PTR_ERR(priv->base);
6373
6374 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
6375 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
6376 if (IS_ERR(priv->lms_base))
6377 return PTR_ERR(priv->lms_base);
6378
6379 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
6380 if (IS_ERR(priv->pp_clk))
6381 return PTR_ERR(priv->pp_clk);
6382 err = clk_prepare_enable(priv->pp_clk);
6383 if (err < 0)
6384 return err;
6385
6386 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
6387 if (IS_ERR(priv->gop_clk)) {
6388 err = PTR_ERR(priv->gop_clk);
6389 goto err_pp_clk;
6390 }
6391 err = clk_prepare_enable(priv->gop_clk);
6392 if (err < 0)
6393 goto err_pp_clk;
6394
6395 /* Get system's tclk rate */
6396 priv->tclk = clk_get_rate(priv->pp_clk);
6397
6398 /* Initialize network controller */
6399 err = mvpp2_init(pdev, priv);
6400 if (err < 0) {
6401 dev_err(&pdev->dev, "failed to initialize controller\n");
6402 goto err_gop_clk;
6403 }
6404
6405 port_count = of_get_available_child_count(dn);
6406 if (port_count == 0) {
6407 dev_err(&pdev->dev, "no ports enabled\n");
575a1935 6408 err = -ENODEV;
3f518509
MW
6409 goto err_gop_clk;
6410 }
6411
6412 priv->port_list = devm_kcalloc(&pdev->dev, port_count,
6413 sizeof(struct mvpp2_port *),
6414 GFP_KERNEL);
6415 if (!priv->port_list) {
6416 err = -ENOMEM;
6417 goto err_gop_clk;
6418 }
6419
6420 /* Initialize ports */
6421 first_rxq = 0;
6422 for_each_available_child_of_node(dn, port_node) {
6423 err = mvpp2_port_probe(pdev, port_node, priv, &first_rxq);
6424 if (err < 0)
6425 goto err_gop_clk;
6426 }
6427
6428 platform_set_drvdata(pdev, priv);
6429 return 0;
6430
6431err_gop_clk:
6432 clk_disable_unprepare(priv->gop_clk);
6433err_pp_clk:
6434 clk_disable_unprepare(priv->pp_clk);
6435 return err;
6436}
6437
6438static int mvpp2_remove(struct platform_device *pdev)
6439{
6440 struct mvpp2 *priv = platform_get_drvdata(pdev);
6441 struct device_node *dn = pdev->dev.of_node;
6442 struct device_node *port_node;
6443 int i = 0;
6444
6445 for_each_available_child_of_node(dn, port_node) {
6446 if (priv->port_list[i])
6447 mvpp2_port_remove(priv->port_list[i]);
6448 i++;
6449 }
6450
6451 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
6452 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
6453
6454 mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
6455 }
6456
6457 for_each_present_cpu(i) {
6458 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
6459
6460 dma_free_coherent(&pdev->dev,
6461 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
6462 aggr_txq->descs,
6463 aggr_txq->descs_phys);
6464 }
6465
6466 clk_disable_unprepare(priv->pp_clk);
6467 clk_disable_unprepare(priv->gop_clk);
6468
6469 return 0;
6470}
6471
6472static const struct of_device_id mvpp2_match[] = {
6473 { .compatible = "marvell,armada-375-pp2" },
6474 { }
6475};
6476MODULE_DEVICE_TABLE(of, mvpp2_match);
6477
6478static struct platform_driver mvpp2_driver = {
6479 .probe = mvpp2_probe,
6480 .remove = mvpp2_remove,
6481 .driver = {
6482 .name = MVPP2_DRIVER_NAME,
6483 .of_match_table = mvpp2_match,
6484 },
6485};
6486
6487module_platform_driver(mvpp2_driver);
6488
6489MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
6490MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
c634099d 6491MODULE_LICENSE("GPL v2");