]>
Commit | Line | Data |
---|---|---|
3f518509 MW |
1 | /* |
2 | * Driver for Marvell PPv2 network controller for Armada 375 SoC. | |
3 | * | |
4 | * Copyright (C) 2014 Marvell | |
5 | * | |
6 | * Marcin Wojtas <mw@semihalf.com> | |
7 | * | |
8 | * This file is licensed under the terms of the GNU General Public | |
9 | * License version 2. This program is licensed "as is" without any | |
10 | * warranty of any kind, whether express or implied. | |
11 | */ | |
12 | ||
13 | #include <linux/kernel.h> | |
14 | #include <linux/netdevice.h> | |
15 | #include <linux/etherdevice.h> | |
16 | #include <linux/platform_device.h> | |
17 | #include <linux/skbuff.h> | |
18 | #include <linux/inetdevice.h> | |
19 | #include <linux/mbus.h> | |
20 | #include <linux/module.h> | |
21 | #include <linux/interrupt.h> | |
22 | #include <linux/cpumask.h> | |
23 | #include <linux/of.h> | |
24 | #include <linux/of_irq.h> | |
25 | #include <linux/of_mdio.h> | |
26 | #include <linux/of_net.h> | |
27 | #include <linux/of_address.h> | |
faca9247 | 28 | #include <linux/of_device.h> |
3f518509 MW |
29 | #include <linux/phy.h> |
30 | #include <linux/clk.h> | |
edc660fa MW |
31 | #include <linux/hrtimer.h> |
32 | #include <linux/ktime.h> | |
3f518509 MW |
33 | #include <uapi/linux/ppp_defs.h> |
34 | #include <net/ip.h> | |
35 | #include <net/ipv6.h> | |
36 | ||
37 | /* RX Fifo Registers */ | |
38 | #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port)) | |
39 | #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port)) | |
40 | #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60 | |
41 | #define MVPP2_RX_FIFO_INIT_REG 0x64 | |
42 | ||
43 | /* RX DMA Top Registers */ | |
44 | #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port)) | |
45 | #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16) | |
46 | #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31) | |
47 | #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool)) | |
48 | #define MVPP2_POOL_BUF_SIZE_OFFSET 5 | |
49 | #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq)) | |
50 | #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff | |
51 | #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9) | |
52 | #define MVPP2_RXQ_POOL_SHORT_OFFS 20 | |
5eac892a TP |
53 | #define MVPP21_RXQ_POOL_SHORT_MASK 0x700000 |
54 | #define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000 | |
3f518509 | 55 | #define MVPP2_RXQ_POOL_LONG_OFFS 24 |
5eac892a TP |
56 | #define MVPP21_RXQ_POOL_LONG_MASK 0x7000000 |
57 | #define MVPP22_RXQ_POOL_LONG_MASK 0xf000000 | |
3f518509 MW |
58 | #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28 |
59 | #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000 | |
60 | #define MVPP2_RXQ_DISABLE_MASK BIT(31) | |
61 | ||
62 | /* Parser Registers */ | |
63 | #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000 | |
64 | #define MVPP2_PRS_PORT_LU_MAX 0xf | |
65 | #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4)) | |
66 | #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4)) | |
67 | #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4)) | |
68 | #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8)) | |
69 | #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8)) | |
70 | #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4)) | |
71 | #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8)) | |
72 | #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8)) | |
73 | #define MVPP2_PRS_TCAM_IDX_REG 0x1100 | |
74 | #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4) | |
75 | #define MVPP2_PRS_TCAM_INV_MASK BIT(31) | |
76 | #define MVPP2_PRS_SRAM_IDX_REG 0x1200 | |
77 | #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4) | |
78 | #define MVPP2_PRS_TCAM_CTRL_REG 0x1230 | |
79 | #define MVPP2_PRS_TCAM_EN_MASK BIT(0) | |
80 | ||
81 | /* Classifier Registers */ | |
82 | #define MVPP2_CLS_MODE_REG 0x1800 | |
83 | #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0) | |
84 | #define MVPP2_CLS_PORT_WAY_REG 0x1810 | |
85 | #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port)) | |
86 | #define MVPP2_CLS_LKP_INDEX_REG 0x1814 | |
87 | #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6 | |
88 | #define MVPP2_CLS_LKP_TBL_REG 0x1818 | |
89 | #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff | |
90 | #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25) | |
91 | #define MVPP2_CLS_FLOW_INDEX_REG 0x1820 | |
92 | #define MVPP2_CLS_FLOW_TBL0_REG 0x1824 | |
93 | #define MVPP2_CLS_FLOW_TBL1_REG 0x1828 | |
94 | #define MVPP2_CLS_FLOW_TBL2_REG 0x182c | |
95 | #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4)) | |
96 | #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3 | |
97 | #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7 | |
98 | #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4)) | |
99 | #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0 | |
100 | #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port)) | |
101 | ||
102 | /* Descriptor Manager Top Registers */ | |
103 | #define MVPP2_RXQ_NUM_REG 0x2040 | |
104 | #define MVPP2_RXQ_DESC_ADDR_REG 0x2044 | |
b02f31fb | 105 | #define MVPP22_DESC_ADDR_OFFS 8 |
3f518509 MW |
106 | #define MVPP2_RXQ_DESC_SIZE_REG 0x2048 |
107 | #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0 | |
108 | #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq)) | |
109 | #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0 | |
110 | #define MVPP2_RXQ_NUM_NEW_OFFSET 16 | |
111 | #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq)) | |
112 | #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff | |
113 | #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16 | |
114 | #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000 | |
115 | #define MVPP2_RXQ_THRESH_REG 0x204c | |
116 | #define MVPP2_OCCUPIED_THRESH_OFFSET 0 | |
117 | #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff | |
118 | #define MVPP2_RXQ_INDEX_REG 0x2050 | |
119 | #define MVPP2_TXQ_NUM_REG 0x2080 | |
120 | #define MVPP2_TXQ_DESC_ADDR_REG 0x2084 | |
121 | #define MVPP2_TXQ_DESC_SIZE_REG 0x2088 | |
122 | #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0 | |
123 | #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090 | |
3f518509 MW |
124 | #define MVPP2_TXQ_INDEX_REG 0x2098 |
125 | #define MVPP2_TXQ_PREF_BUF_REG 0x209c | |
126 | #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff) | |
127 | #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13)) | |
128 | #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14)) | |
129 | #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17) | |
130 | #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31) | |
131 | #define MVPP2_TXQ_PENDING_REG 0x20a0 | |
132 | #define MVPP2_TXQ_PENDING_MASK 0x3fff | |
133 | #define MVPP2_TXQ_INT_STATUS_REG 0x20a4 | |
134 | #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq)) | |
135 | #define MVPP2_TRANSMITTED_COUNT_OFFSET 16 | |
136 | #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000 | |
137 | #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0 | |
138 | #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16 | |
139 | #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4 | |
140 | #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff | |
141 | #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8 | |
142 | #define MVPP2_TXQ_RSVD_CLR_OFFSET 16 | |
143 | #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu)) | |
b02f31fb | 144 | #define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8 |
3f518509 MW |
145 | #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu)) |
146 | #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0 | |
147 | #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu)) | |
148 | #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff | |
149 | #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu)) | |
150 | ||
151 | /* MBUS bridge registers */ | |
152 | #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2)) | |
153 | #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2)) | |
154 | #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2)) | |
155 | #define MVPP2_BASE_ADDR_ENABLE 0x4060 | |
156 | ||
6763ce31 TP |
157 | /* AXI Bridge Registers */ |
158 | #define MVPP22_AXI_BM_WR_ATTR_REG 0x4100 | |
159 | #define MVPP22_AXI_BM_RD_ATTR_REG 0x4104 | |
160 | #define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110 | |
161 | #define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114 | |
162 | #define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118 | |
163 | #define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c | |
164 | #define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120 | |
165 | #define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130 | |
166 | #define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150 | |
167 | #define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154 | |
168 | #define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160 | |
169 | #define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164 | |
170 | ||
171 | /* Values for AXI Bridge registers */ | |
172 | #define MVPP22_AXI_ATTR_CACHE_OFFS 0 | |
173 | #define MVPP22_AXI_ATTR_DOMAIN_OFFS 12 | |
174 | ||
175 | #define MVPP22_AXI_CODE_CACHE_OFFS 0 | |
176 | #define MVPP22_AXI_CODE_DOMAIN_OFFS 4 | |
177 | ||
178 | #define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3 | |
179 | #define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7 | |
180 | #define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb | |
181 | ||
182 | #define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2 | |
183 | #define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3 | |
184 | ||
3f518509 MW |
185 | /* Interrupt Cause and Mask registers */ |
186 | #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) | |
ab42676a | 187 | #define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0 |
a73fef10 TP |
188 | #define MVPP21_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq)) |
189 | ||
190 | #define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400 | |
191 | #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf | |
192 | #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 | |
193 | #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7 | |
194 | ||
195 | #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf | |
196 | #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 | |
197 | ||
198 | #define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404 | |
199 | #define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f | |
200 | #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00 | |
201 | #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8 | |
202 | ||
3f518509 MW |
203 | #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) |
204 | #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) | |
205 | #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000) | |
206 | #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port)) | |
207 | #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff | |
208 | #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000 | |
209 | #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24) | |
210 | #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25) | |
211 | #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26) | |
212 | #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29) | |
213 | #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30) | |
214 | #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31) | |
215 | #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port)) | |
216 | #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc | |
217 | #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff | |
218 | #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000 | |
219 | #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31) | |
220 | #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0 | |
221 | ||
222 | /* Buffer Manager registers */ | |
223 | #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4)) | |
224 | #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80 | |
225 | #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4)) | |
226 | #define MVPP2_BM_POOL_SIZE_MASK 0xfff0 | |
227 | #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4)) | |
228 | #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0 | |
229 | #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4)) | |
230 | #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0 | |
231 | #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4)) | |
232 | #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4)) | |
233 | #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff | |
234 | #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16) | |
235 | #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4)) | |
236 | #define MVPP2_BM_START_MASK BIT(0) | |
237 | #define MVPP2_BM_STOP_MASK BIT(1) | |
238 | #define MVPP2_BM_STATE_MASK BIT(4) | |
239 | #define MVPP2_BM_LOW_THRESH_OFFS 8 | |
240 | #define MVPP2_BM_LOW_THRESH_MASK 0x7f00 | |
241 | #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \ | |
242 | MVPP2_BM_LOW_THRESH_OFFS) | |
243 | #define MVPP2_BM_HIGH_THRESH_OFFS 16 | |
244 | #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000 | |
245 | #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \ | |
246 | MVPP2_BM_HIGH_THRESH_OFFS) | |
247 | #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4)) | |
248 | #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0) | |
249 | #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1) | |
250 | #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2) | |
251 | #define MVPP2_BM_BPPE_FULL_MASK BIT(3) | |
252 | #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4) | |
253 | #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4)) | |
254 | #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4)) | |
255 | #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0) | |
256 | #define MVPP2_BM_VIRT_ALLOC_REG 0x6440 | |
d01524d8 TP |
257 | #define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444 |
258 | #define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff | |
259 | #define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00 | |
260 | #define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8 | |
3f518509 MW |
261 | #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4)) |
262 | #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0) | |
263 | #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1) | |
264 | #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2) | |
265 | #define MVPP2_BM_VIRT_RLS_REG 0x64c0 | |
d01524d8 TP |
266 | #define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4 |
267 | #define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff | |
268 | #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00 | |
269 | #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8 | |
3f518509 MW |
270 | |
271 | /* TX Scheduler registers */ | |
272 | #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000 | |
273 | #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004 | |
274 | #define MVPP2_TXP_SCHED_ENQ_MASK 0xff | |
275 | #define MVPP2_TXP_SCHED_DISQ_OFFSET 8 | |
276 | #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010 | |
277 | #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018 | |
278 | #define MVPP2_TXP_SCHED_MTU_REG 0x801c | |
279 | #define MVPP2_TXP_MTU_MAX 0x7FFFF | |
280 | #define MVPP2_TXP_SCHED_REFILL_REG 0x8020 | |
281 | #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff | |
282 | #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000 | |
283 | #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20) | |
284 | #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024 | |
285 | #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff | |
286 | #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2)) | |
287 | #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff | |
288 | #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000 | |
289 | #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20) | |
290 | #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2)) | |
291 | #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff | |
292 | #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2)) | |
293 | #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff | |
294 | ||
295 | /* TX general registers */ | |
296 | #define MVPP2_TX_SNOOP_REG 0x8800 | |
297 | #define MVPP2_TX_PORT_FLUSH_REG 0x8810 | |
298 | #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port)) | |
299 | ||
300 | /* LMS registers */ | |
301 | #define MVPP2_SRC_ADDR_MIDDLE 0x24 | |
302 | #define MVPP2_SRC_ADDR_HIGH 0x28 | |
08a23755 MW |
303 | #define MVPP2_PHY_AN_CFG0_REG 0x34 |
304 | #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7) | |
3f518509 | 305 | #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c |
31d7677b | 306 | #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 |
3f518509 MW |
307 | |
308 | /* Per-port registers */ | |
309 | #define MVPP2_GMAC_CTRL_0_REG 0x0 | |
310 | #define MVPP2_GMAC_PORT_EN_MASK BIT(0) | |
311 | #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2 | |
312 | #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc | |
313 | #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15) | |
314 | #define MVPP2_GMAC_CTRL_1_REG 0x4 | |
b5c0a800 | 315 | #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1) |
3f518509 MW |
316 | #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5) |
317 | #define MVPP2_GMAC_PCS_LB_EN_BIT 6 | |
318 | #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6) | |
319 | #define MVPP2_GMAC_SA_LOW_OFFS 7 | |
320 | #define MVPP2_GMAC_CTRL_2_REG 0x8 | |
321 | #define MVPP2_GMAC_INBAND_AN_MASK BIT(0) | |
322 | #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) | |
323 | #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4) | |
324 | #define MVPP2_GMAC_PORT_RESET_MASK BIT(6) | |
325 | #define MVPP2_GMAC_AUTONEG_CONFIG 0xc | |
326 | #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0) | |
327 | #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1) | |
328 | #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) | |
329 | #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) | |
330 | #define MVPP2_GMAC_AN_SPEED_EN BIT(7) | |
08a23755 | 331 | #define MVPP2_GMAC_FC_ADV_EN BIT(9) |
3f518509 MW |
332 | #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12) |
333 | #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13) | |
334 | #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c | |
335 | #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 | |
336 | #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 | |
337 | #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ | |
338 | MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK) | |
26975821 TP |
339 | #define MVPP22_GMAC_CTRL_4_REG 0x90 |
340 | #define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0) | |
341 | #define MVPP22_CTRL4_DP_CLK_SEL BIT(5) | |
342 | #define MVPP22_CTRL4_SYNC_BYPASS BIT(6) | |
343 | #define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7) | |
344 | ||
345 | /* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0, | |
346 | * relative to port->base. | |
347 | */ | |
348 | #define MVPP22_XLG_CTRL3_REG 0x11c | |
349 | #define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13) | |
350 | #define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13) | |
351 | ||
352 | /* SMI registers. PPv2.2 only, relative to priv->iface_base. */ | |
353 | #define MVPP22_SMI_MISC_CFG_REG 0x1204 | |
354 | #define MVPP22_SMI_POLLING_EN BIT(10) | |
3f518509 | 355 | |
a786841d TP |
356 | #define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00) |
357 | ||
3f518509 MW |
358 | #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff |
359 | ||
360 | /* Descriptor ring Macros */ | |
361 | #define MVPP2_QUEUE_NEXT_DESC(q, index) \ | |
362 | (((index) < (q)->last_desc) ? ((index) + 1) : 0) | |
363 | ||
364 | /* Various constants */ | |
365 | ||
366 | /* Coalescing */ | |
367 | #define MVPP2_TXDONE_COAL_PKTS_THRESH 15 | |
edc660fa | 368 | #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL |
3f518509 MW |
369 | #define MVPP2_RX_COAL_PKTS 32 |
370 | #define MVPP2_RX_COAL_USEC 100 | |
371 | ||
372 | /* The two bytes Marvell header. Either contains a special value used | |
373 | * by Marvell switches when a specific hardware mode is enabled (not | |
374 | * supported by this driver) or is filled automatically by zeroes on | |
375 | * the RX side. Those two bytes being at the front of the Ethernet | |
376 | * header, they allow to have the IP header aligned on a 4 bytes | |
377 | * boundary automatically: the hardware skips those two bytes on its | |
378 | * own. | |
379 | */ | |
380 | #define MVPP2_MH_SIZE 2 | |
381 | #define MVPP2_ETH_TYPE_LEN 2 | |
382 | #define MVPP2_PPPOE_HDR_SIZE 8 | |
383 | #define MVPP2_VLAN_TAG_LEN 4 | |
384 | ||
385 | /* Lbtd 802.3 type */ | |
386 | #define MVPP2_IP_LBDT_TYPE 0xfffa | |
387 | ||
3f518509 MW |
388 | #define MVPP2_TX_CSUM_MAX_SIZE 9800 |
389 | ||
390 | /* Timeout constants */ | |
391 | #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000 | |
392 | #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000 | |
393 | ||
394 | #define MVPP2_TX_MTU_MAX 0x7ffff | |
395 | ||
396 | /* Maximum number of T-CONTs of PON port */ | |
397 | #define MVPP2_MAX_TCONT 16 | |
398 | ||
399 | /* Maximum number of supported ports */ | |
400 | #define MVPP2_MAX_PORTS 4 | |
401 | ||
402 | /* Maximum number of TXQs used by single port */ | |
403 | #define MVPP2_MAX_TXQ 8 | |
404 | ||
3f518509 MW |
405 | /* Dfault number of RXQs in use */ |
406 | #define MVPP2_DEFAULT_RXQ 4 | |
407 | ||
3f518509 MW |
408 | /* Max number of Rx descriptors */ |
409 | #define MVPP2_MAX_RXD 128 | |
410 | ||
411 | /* Max number of Tx descriptors */ | |
412 | #define MVPP2_MAX_TXD 1024 | |
413 | ||
414 | /* Amount of Tx descriptors that can be reserved at once by CPU */ | |
415 | #define MVPP2_CPU_DESC_CHUNK 64 | |
416 | ||
417 | /* Max number of Tx descriptors in each aggregated queue */ | |
418 | #define MVPP2_AGGR_TXQ_SIZE 256 | |
419 | ||
420 | /* Descriptor aligned size */ | |
421 | #define MVPP2_DESC_ALIGNED_SIZE 32 | |
422 | ||
423 | /* Descriptor alignment mask */ | |
424 | #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1) | |
425 | ||
426 | /* RX FIFO constants */ | |
427 | #define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000 | |
428 | #define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80 | |
429 | #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80 | |
430 | ||
431 | /* RX buffer constants */ | |
432 | #define MVPP2_SKB_SHINFO_SIZE \ | |
433 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) | |
434 | ||
435 | #define MVPP2_RX_PKT_SIZE(mtu) \ | |
436 | ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \ | |
4a0a12d2 | 437 | ETH_HLEN + ETH_FCS_LEN, cache_line_size()) |
3f518509 MW |
438 | |
439 | #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) | |
440 | #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE) | |
441 | #define MVPP2_RX_MAX_PKT_SIZE(total_size) \ | |
442 | ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE) | |
443 | ||
444 | #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8) | |
445 | ||
446 | /* IPv6 max L3 address size */ | |
447 | #define MVPP2_MAX_L3_ADDR_SIZE 16 | |
448 | ||
449 | /* Port flags */ | |
450 | #define MVPP2_F_LOOPBACK BIT(0) | |
451 | ||
452 | /* Marvell tag types */ | |
453 | enum mvpp2_tag_type { | |
454 | MVPP2_TAG_TYPE_NONE = 0, | |
455 | MVPP2_TAG_TYPE_MH = 1, | |
456 | MVPP2_TAG_TYPE_DSA = 2, | |
457 | MVPP2_TAG_TYPE_EDSA = 3, | |
458 | MVPP2_TAG_TYPE_VLAN = 4, | |
459 | MVPP2_TAG_TYPE_LAST = 5 | |
460 | }; | |
461 | ||
462 | /* Parser constants */ | |
463 | #define MVPP2_PRS_TCAM_SRAM_SIZE 256 | |
464 | #define MVPP2_PRS_TCAM_WORDS 6 | |
465 | #define MVPP2_PRS_SRAM_WORDS 4 | |
466 | #define MVPP2_PRS_FLOW_ID_SIZE 64 | |
467 | #define MVPP2_PRS_FLOW_ID_MASK 0x3f | |
468 | #define MVPP2_PRS_TCAM_ENTRY_INVALID 1 | |
469 | #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5) | |
470 | #define MVPP2_PRS_IPV4_HEAD 0x40 | |
471 | #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0 | |
472 | #define MVPP2_PRS_IPV4_MC 0xe0 | |
473 | #define MVPP2_PRS_IPV4_MC_MASK 0xf0 | |
474 | #define MVPP2_PRS_IPV4_BC_MASK 0xff | |
475 | #define MVPP2_PRS_IPV4_IHL 0x5 | |
476 | #define MVPP2_PRS_IPV4_IHL_MASK 0xf | |
477 | #define MVPP2_PRS_IPV6_MC 0xff | |
478 | #define MVPP2_PRS_IPV6_MC_MASK 0xff | |
479 | #define MVPP2_PRS_IPV6_HOP_MASK 0xff | |
480 | #define MVPP2_PRS_TCAM_PROTO_MASK 0xff | |
481 | #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f | |
482 | #define MVPP2_PRS_DBL_VLANS_MAX 100 | |
483 | ||
484 | /* Tcam structure: | |
485 | * - lookup ID - 4 bits | |
486 | * - port ID - 1 byte | |
487 | * - additional information - 1 byte | |
488 | * - header data - 8 bytes | |
489 | * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0). | |
490 | */ | |
491 | #define MVPP2_PRS_AI_BITS 8 | |
492 | #define MVPP2_PRS_PORT_MASK 0xff | |
493 | #define MVPP2_PRS_LU_MASK 0xf | |
494 | #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \ | |
495 | (((offs) - ((offs) % 2)) * 2 + ((offs) % 2)) | |
496 | #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \ | |
497 | (((offs) * 2) - ((offs) % 2) + 2) | |
498 | #define MVPP2_PRS_TCAM_AI_BYTE 16 | |
499 | #define MVPP2_PRS_TCAM_PORT_BYTE 17 | |
500 | #define MVPP2_PRS_TCAM_LU_BYTE 20 | |
501 | #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2) | |
502 | #define MVPP2_PRS_TCAM_INV_WORD 5 | |
503 | /* Tcam entries ID */ | |
504 | #define MVPP2_PE_DROP_ALL 0 | |
505 | #define MVPP2_PE_FIRST_FREE_TID 1 | |
506 | #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31) | |
507 | #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) | |
508 | #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29) | |
509 | #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) | |
510 | #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27) | |
511 | #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26) | |
512 | #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19) | |
513 | #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18) | |
514 | #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17) | |
515 | #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16) | |
516 | #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15) | |
517 | #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14) | |
518 | #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13) | |
519 | #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12) | |
520 | #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11) | |
521 | #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10) | |
522 | #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9) | |
523 | #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8) | |
524 | #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7) | |
525 | #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6) | |
526 | #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5) | |
527 | #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4) | |
528 | #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3) | |
529 | #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2) | |
530 | #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1) | |
531 | ||
532 | /* Sram structure | |
533 | * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0). | |
534 | */ | |
535 | #define MVPP2_PRS_SRAM_RI_OFFS 0 | |
536 | #define MVPP2_PRS_SRAM_RI_WORD 0 | |
537 | #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32 | |
538 | #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1 | |
539 | #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32 | |
540 | #define MVPP2_PRS_SRAM_SHIFT_OFFS 64 | |
541 | #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72 | |
542 | #define MVPP2_PRS_SRAM_UDF_OFFS 73 | |
543 | #define MVPP2_PRS_SRAM_UDF_BITS 8 | |
544 | #define MVPP2_PRS_SRAM_UDF_MASK 0xff | |
545 | #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81 | |
546 | #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82 | |
547 | #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7 | |
548 | #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1 | |
549 | #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4 | |
550 | #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85 | |
551 | #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3 | |
552 | #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1 | |
553 | #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2 | |
554 | #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3 | |
555 | #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87 | |
556 | #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2 | |
557 | #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3 | |
558 | #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0 | |
559 | #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2 | |
560 | #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3 | |
561 | #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89 | |
562 | #define MVPP2_PRS_SRAM_AI_OFFS 90 | |
563 | #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98 | |
564 | #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8 | |
565 | #define MVPP2_PRS_SRAM_AI_MASK 0xff | |
566 | #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106 | |
567 | #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf | |
568 | #define MVPP2_PRS_SRAM_LU_DONE_BIT 110 | |
569 | #define MVPP2_PRS_SRAM_LU_GEN_BIT 111 | |
570 | ||
571 | /* Sram result info bits assignment */ | |
572 | #define MVPP2_PRS_RI_MAC_ME_MASK 0x1 | |
573 | #define MVPP2_PRS_RI_DSA_MASK 0x2 | |
8138affc TP |
574 | #define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3)) |
575 | #define MVPP2_PRS_RI_VLAN_NONE 0x0 | |
3f518509 MW |
576 | #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2) |
577 | #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3) | |
578 | #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3)) | |
579 | #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70 | |
580 | #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4) | |
8138affc TP |
581 | #define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10)) |
582 | #define MVPP2_PRS_RI_L2_UCAST 0x0 | |
3f518509 MW |
583 | #define MVPP2_PRS_RI_L2_MCAST BIT(9) |
584 | #define MVPP2_PRS_RI_L2_BCAST BIT(10) | |
585 | #define MVPP2_PRS_RI_PPPOE_MASK 0x800 | |
8138affc TP |
586 | #define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14)) |
587 | #define MVPP2_PRS_RI_L3_UN 0x0 | |
3f518509 MW |
588 | #define MVPP2_PRS_RI_L3_IP4 BIT(12) |
589 | #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13) | |
590 | #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13)) | |
591 | #define MVPP2_PRS_RI_L3_IP6 BIT(14) | |
592 | #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14)) | |
593 | #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14)) | |
8138affc TP |
594 | #define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16)) |
595 | #define MVPP2_PRS_RI_L3_UCAST 0x0 | |
3f518509 MW |
596 | #define MVPP2_PRS_RI_L3_MCAST BIT(15) |
597 | #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) | |
598 | #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 | |
599 | #define MVPP2_PRS_RI_UDF3_MASK 0x300000 | |
600 | #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21) | |
601 | #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000 | |
602 | #define MVPP2_PRS_RI_L4_TCP BIT(22) | |
603 | #define MVPP2_PRS_RI_L4_UDP BIT(23) | |
604 | #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23)) | |
605 | #define MVPP2_PRS_RI_UDF7_MASK 0x60000000 | |
606 | #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29) | |
607 | #define MVPP2_PRS_RI_DROP_MASK 0x80000000 | |
608 | ||
609 | /* Sram additional info bits assignment */ | |
610 | #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0) | |
611 | #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0) | |
612 | #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1) | |
613 | #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2) | |
614 | #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3) | |
615 | #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4) | |
616 | #define MVPP2_PRS_SINGLE_VLAN_AI 0 | |
617 | #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7) | |
618 | ||
619 | /* DSA/EDSA type */ | |
620 | #define MVPP2_PRS_TAGGED true | |
621 | #define MVPP2_PRS_UNTAGGED false | |
622 | #define MVPP2_PRS_EDSA true | |
623 | #define MVPP2_PRS_DSA false | |
624 | ||
625 | /* MAC entries, shadow udf */ | |
626 | enum mvpp2_prs_udf { | |
627 | MVPP2_PRS_UDF_MAC_DEF, | |
628 | MVPP2_PRS_UDF_MAC_RANGE, | |
629 | MVPP2_PRS_UDF_L2_DEF, | |
630 | MVPP2_PRS_UDF_L2_DEF_COPY, | |
631 | MVPP2_PRS_UDF_L2_USER, | |
632 | }; | |
633 | ||
634 | /* Lookup ID */ | |
635 | enum mvpp2_prs_lookup { | |
636 | MVPP2_PRS_LU_MH, | |
637 | MVPP2_PRS_LU_MAC, | |
638 | MVPP2_PRS_LU_DSA, | |
639 | MVPP2_PRS_LU_VLAN, | |
640 | MVPP2_PRS_LU_L2, | |
641 | MVPP2_PRS_LU_PPPOE, | |
642 | MVPP2_PRS_LU_IP4, | |
643 | MVPP2_PRS_LU_IP6, | |
644 | MVPP2_PRS_LU_FLOWS, | |
645 | MVPP2_PRS_LU_LAST, | |
646 | }; | |
647 | ||
648 | /* L3 cast enum */ | |
649 | enum mvpp2_prs_l3_cast { | |
650 | MVPP2_PRS_L3_UNI_CAST, | |
651 | MVPP2_PRS_L3_MULTI_CAST, | |
652 | MVPP2_PRS_L3_BROAD_CAST | |
653 | }; | |
654 | ||
655 | /* Classifier constants */ | |
656 | #define MVPP2_CLS_FLOWS_TBL_SIZE 512 | |
657 | #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3 | |
658 | #define MVPP2_CLS_LKP_TBL_SIZE 64 | |
659 | ||
660 | /* BM constants */ | |
661 | #define MVPP2_BM_POOLS_NUM 8 | |
662 | #define MVPP2_BM_LONG_BUF_NUM 1024 | |
663 | #define MVPP2_BM_SHORT_BUF_NUM 2048 | |
664 | #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4) | |
665 | #define MVPP2_BM_POOL_PTR_ALIGN 128 | |
666 | #define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port) | |
667 | #define MVPP2_BM_SWF_SHORT_POOL 3 | |
668 | ||
669 | /* BM cookie (32 bits) definition */ | |
670 | #define MVPP2_BM_COOKIE_POOL_OFFS 8 | |
671 | #define MVPP2_BM_COOKIE_CPU_OFFS 24 | |
672 | ||
673 | /* BM short pool packet size | |
674 | * These value assure that for SWF the total number | |
675 | * of bytes allocated for each buffer will be 512 | |
676 | */ | |
677 | #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512) | |
678 | ||
a786841d TP |
679 | #define MVPP21_ADDR_SPACE_SZ 0 |
680 | #define MVPP22_ADDR_SPACE_SZ SZ_64K | |
681 | ||
682 | #define MVPP2_MAX_CPUS 4 | |
683 | ||
3f518509 MW |
684 | enum mvpp2_bm_type { |
685 | MVPP2_BM_FREE, | |
686 | MVPP2_BM_SWF_LONG, | |
687 | MVPP2_BM_SWF_SHORT | |
688 | }; | |
689 | ||
690 | /* Definitions */ | |
691 | ||
692 | /* Shared Packet Processor resources */ | |
693 | struct mvpp2 { | |
694 | /* Shared registers' base addresses */ | |
3f518509 | 695 | void __iomem *lms_base; |
a786841d TP |
696 | void __iomem *iface_base; |
697 | ||
698 | /* On PPv2.2, each CPU can access the base register through a | |
699 | * separate address space, each 64 KB apart from each | |
700 | * other. | |
701 | */ | |
702 | void __iomem *cpu_base[MVPP2_MAX_CPUS]; | |
3f518509 MW |
703 | |
704 | /* Common clocks */ | |
705 | struct clk *pp_clk; | |
706 | struct clk *gop_clk; | |
fceb55d4 | 707 | struct clk *mg_clk; |
3f518509 MW |
708 | |
709 | /* List of pointers to port structures */ | |
710 | struct mvpp2_port **port_list; | |
711 | ||
712 | /* Aggregated TXQs */ | |
713 | struct mvpp2_tx_queue *aggr_txqs; | |
714 | ||
715 | /* BM pools */ | |
716 | struct mvpp2_bm_pool *bm_pools; | |
717 | ||
718 | /* PRS shadow table */ | |
719 | struct mvpp2_prs_shadow *prs_shadow; | |
720 | /* PRS auxiliary table for double vlan entries control */ | |
721 | bool *prs_double_vlans; | |
722 | ||
723 | /* Tclk value */ | |
724 | u32 tclk; | |
faca9247 TP |
725 | |
726 | /* HW version */ | |
727 | enum { MVPP21, MVPP22 } hw_version; | |
59b9a31e TP |
728 | |
729 | /* Maximum number of RXQs per port */ | |
730 | unsigned int max_port_rxqs; | |
3f518509 MW |
731 | }; |
732 | ||
733 | struct mvpp2_pcpu_stats { | |
734 | struct u64_stats_sync syncp; | |
735 | u64 rx_packets; | |
736 | u64 rx_bytes; | |
737 | u64 tx_packets; | |
738 | u64 tx_bytes; | |
739 | }; | |
740 | ||
edc660fa MW |
741 | /* Per-CPU port control */ |
742 | struct mvpp2_port_pcpu { | |
743 | struct hrtimer tx_done_timer; | |
744 | bool timer_scheduled; | |
745 | /* Tasklet for egress finalization */ | |
746 | struct tasklet_struct tx_done_tasklet; | |
747 | }; | |
748 | ||
3f518509 MW |
749 | struct mvpp2_port { |
750 | u8 id; | |
751 | ||
a786841d TP |
752 | /* Index of the port from the "group of ports" complex point |
753 | * of view | |
754 | */ | |
755 | int gop_id; | |
756 | ||
3f518509 MW |
757 | int irq; |
758 | ||
759 | struct mvpp2 *priv; | |
760 | ||
761 | /* Per-port registers' base address */ | |
762 | void __iomem *base; | |
763 | ||
764 | struct mvpp2_rx_queue **rxqs; | |
765 | struct mvpp2_tx_queue **txqs; | |
766 | struct net_device *dev; | |
767 | ||
768 | int pkt_size; | |
769 | ||
770 | u32 pending_cause_rx; | |
771 | struct napi_struct napi; | |
772 | ||
edc660fa MW |
773 | /* Per-CPU port control */ |
774 | struct mvpp2_port_pcpu __percpu *pcpu; | |
775 | ||
3f518509 MW |
776 | /* Flags */ |
777 | unsigned long flags; | |
778 | ||
779 | u16 tx_ring_size; | |
780 | u16 rx_ring_size; | |
781 | struct mvpp2_pcpu_stats __percpu *stats; | |
782 | ||
3f518509 MW |
783 | phy_interface_t phy_interface; |
784 | struct device_node *phy_node; | |
785 | unsigned int link; | |
786 | unsigned int duplex; | |
787 | unsigned int speed; | |
788 | ||
789 | struct mvpp2_bm_pool *pool_long; | |
790 | struct mvpp2_bm_pool *pool_short; | |
791 | ||
792 | /* Index of first port's physical RXQ */ | |
793 | u8 first_rxq; | |
794 | }; | |
795 | ||
796 | /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the | |
797 | * layout of the transmit and reception DMA descriptors, and their | |
798 | * layout is therefore defined by the hardware design | |
799 | */ | |
800 | ||
801 | #define MVPP2_TXD_L3_OFF_SHIFT 0 | |
802 | #define MVPP2_TXD_IP_HLEN_SHIFT 8 | |
803 | #define MVPP2_TXD_L4_CSUM_FRAG BIT(13) | |
804 | #define MVPP2_TXD_L4_CSUM_NOT BIT(14) | |
805 | #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15) | |
806 | #define MVPP2_TXD_PADDING_DISABLE BIT(23) | |
807 | #define MVPP2_TXD_L4_UDP BIT(24) | |
808 | #define MVPP2_TXD_L3_IP6 BIT(26) | |
809 | #define MVPP2_TXD_L_DESC BIT(28) | |
810 | #define MVPP2_TXD_F_DESC BIT(29) | |
811 | ||
812 | #define MVPP2_RXD_ERR_SUMMARY BIT(15) | |
813 | #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14)) | |
814 | #define MVPP2_RXD_ERR_CRC 0x0 | |
815 | #define MVPP2_RXD_ERR_OVERRUN BIT(13) | |
816 | #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14)) | |
817 | #define MVPP2_RXD_BM_POOL_ID_OFFS 16 | |
818 | #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18)) | |
819 | #define MVPP2_RXD_HWF_SYNC BIT(21) | |
820 | #define MVPP2_RXD_L4_CSUM_OK BIT(22) | |
821 | #define MVPP2_RXD_IP4_HEADER_ERR BIT(24) | |
822 | #define MVPP2_RXD_L4_TCP BIT(25) | |
823 | #define MVPP2_RXD_L4_UDP BIT(26) | |
824 | #define MVPP2_RXD_L3_IP4 BIT(28) | |
825 | #define MVPP2_RXD_L3_IP6 BIT(30) | |
826 | #define MVPP2_RXD_BUF_HDR BIT(31) | |
827 | ||
054f6372 TP |
828 | /* HW TX descriptor for PPv2.1 */ |
829 | struct mvpp21_tx_desc { | |
3f518509 MW |
830 | u32 command; /* Options used by HW for packet transmitting.*/ |
831 | u8 packet_offset; /* the offset from the buffer beginning */ | |
832 | u8 phys_txq; /* destination queue ID */ | |
833 | u16 data_size; /* data size of transmitted packet in bytes */ | |
20396136 | 834 | u32 buf_dma_addr; /* physical addr of transmitted buffer */ |
3f518509 MW |
835 | u32 buf_cookie; /* cookie for access to TX buffer in tx path */ |
836 | u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ | |
837 | u32 reserved2; /* reserved (for future use) */ | |
838 | }; | |
839 | ||
054f6372 TP |
840 | /* HW RX descriptor for PPv2.1 */ |
841 | struct mvpp21_rx_desc { | |
3f518509 MW |
842 | u32 status; /* info about received packet */ |
843 | u16 reserved1; /* parser_info (for future use, PnC) */ | |
844 | u16 data_size; /* size of received packet in bytes */ | |
20396136 | 845 | u32 buf_dma_addr; /* physical address of the buffer */ |
3f518509 MW |
846 | u32 buf_cookie; /* cookie for access to RX buffer in rx path */ |
847 | u16 reserved2; /* gem_port_id (for future use, PON) */ | |
848 | u16 reserved3; /* csum_l4 (for future use, PnC) */ | |
849 | u8 reserved4; /* bm_qset (for future use, BM) */ | |
850 | u8 reserved5; | |
851 | u16 reserved6; /* classify_info (for future use, PnC) */ | |
852 | u32 reserved7; /* flow_id (for future use, PnC) */ | |
853 | u32 reserved8; | |
854 | }; | |
855 | ||
e7c5359f TP |
856 | /* HW TX descriptor for PPv2.2 */ |
857 | struct mvpp22_tx_desc { | |
858 | u32 command; | |
859 | u8 packet_offset; | |
860 | u8 phys_txq; | |
861 | u16 data_size; | |
862 | u64 reserved1; | |
863 | u64 buf_dma_addr_ptp; | |
864 | u64 buf_cookie_misc; | |
865 | }; | |
866 | ||
867 | /* HW RX descriptor for PPv2.2 */ | |
868 | struct mvpp22_rx_desc { | |
869 | u32 status; | |
870 | u16 reserved1; | |
871 | u16 data_size; | |
872 | u32 reserved2; | |
873 | u32 reserved3; | |
874 | u64 buf_dma_addr_key_hash; | |
875 | u64 buf_cookie_misc; | |
876 | }; | |
877 | ||
054f6372 TP |
878 | /* Opaque type used by the driver to manipulate the HW TX and RX |
879 | * descriptors | |
880 | */ | |
881 | struct mvpp2_tx_desc { | |
882 | union { | |
883 | struct mvpp21_tx_desc pp21; | |
e7c5359f | 884 | struct mvpp22_tx_desc pp22; |
054f6372 TP |
885 | }; |
886 | }; | |
887 | ||
888 | struct mvpp2_rx_desc { | |
889 | union { | |
890 | struct mvpp21_rx_desc pp21; | |
e7c5359f | 891 | struct mvpp22_rx_desc pp22; |
054f6372 TP |
892 | }; |
893 | }; | |
894 | ||
8354491c TP |
895 | struct mvpp2_txq_pcpu_buf { |
896 | /* Transmitted SKB */ | |
897 | struct sk_buff *skb; | |
898 | ||
899 | /* Physical address of transmitted buffer */ | |
20396136 | 900 | dma_addr_t dma; |
8354491c TP |
901 | |
902 | /* Size transmitted */ | |
903 | size_t size; | |
904 | }; | |
905 | ||
3f518509 MW |
906 | /* Per-CPU Tx queue control */ |
907 | struct mvpp2_txq_pcpu { | |
908 | int cpu; | |
909 | ||
910 | /* Number of Tx DMA descriptors in the descriptor ring */ | |
911 | int size; | |
912 | ||
913 | /* Number of currently used Tx DMA descriptor in the | |
914 | * descriptor ring | |
915 | */ | |
916 | int count; | |
917 | ||
918 | /* Number of Tx DMA descriptors reserved for each CPU */ | |
919 | int reserved_num; | |
920 | ||
8354491c TP |
921 | /* Infos about transmitted buffers */ |
922 | struct mvpp2_txq_pcpu_buf *buffs; | |
71ce391d | 923 | |
3f518509 MW |
924 | /* Index of last TX DMA descriptor that was inserted */ |
925 | int txq_put_index; | |
926 | ||
927 | /* Index of the TX DMA descriptor to be cleaned up */ | |
928 | int txq_get_index; | |
929 | }; | |
930 | ||
931 | struct mvpp2_tx_queue { | |
932 | /* Physical number of this Tx queue */ | |
933 | u8 id; | |
934 | ||
935 | /* Logical number of this Tx queue */ | |
936 | u8 log_id; | |
937 | ||
938 | /* Number of Tx DMA descriptors in the descriptor ring */ | |
939 | int size; | |
940 | ||
941 | /* Number of currently used Tx DMA descriptor in the descriptor ring */ | |
942 | int count; | |
943 | ||
944 | /* Per-CPU control of physical Tx queues */ | |
945 | struct mvpp2_txq_pcpu __percpu *pcpu; | |
946 | ||
3f518509 MW |
947 | u32 done_pkts_coal; |
948 | ||
949 | /* Virtual address of thex Tx DMA descriptors array */ | |
950 | struct mvpp2_tx_desc *descs; | |
951 | ||
952 | /* DMA address of the Tx DMA descriptors array */ | |
20396136 | 953 | dma_addr_t descs_dma; |
3f518509 MW |
954 | |
955 | /* Index of the last Tx DMA descriptor */ | |
956 | int last_desc; | |
957 | ||
958 | /* Index of the next Tx DMA descriptor to process */ | |
959 | int next_desc_to_proc; | |
960 | }; | |
961 | ||
962 | struct mvpp2_rx_queue { | |
963 | /* RX queue number, in the range 0-31 for physical RXQs */ | |
964 | u8 id; | |
965 | ||
966 | /* Num of rx descriptors in the rx descriptor ring */ | |
967 | int size; | |
968 | ||
969 | u32 pkts_coal; | |
970 | u32 time_coal; | |
971 | ||
972 | /* Virtual address of the RX DMA descriptors array */ | |
973 | struct mvpp2_rx_desc *descs; | |
974 | ||
975 | /* DMA address of the RX DMA descriptors array */ | |
20396136 | 976 | dma_addr_t descs_dma; |
3f518509 MW |
977 | |
978 | /* Index of the last RX DMA descriptor */ | |
979 | int last_desc; | |
980 | ||
981 | /* Index of the next RX DMA descriptor to process */ | |
982 | int next_desc_to_proc; | |
983 | ||
984 | /* ID of port to which physical RXQ is mapped */ | |
985 | int port; | |
986 | ||
987 | /* Port's logic RXQ number to which physical RXQ is mapped */ | |
988 | int logic_rxq; | |
989 | }; | |
990 | ||
991 | union mvpp2_prs_tcam_entry { | |
992 | u32 word[MVPP2_PRS_TCAM_WORDS]; | |
993 | u8 byte[MVPP2_PRS_TCAM_WORDS * 4]; | |
994 | }; | |
995 | ||
996 | union mvpp2_prs_sram_entry { | |
997 | u32 word[MVPP2_PRS_SRAM_WORDS]; | |
998 | u8 byte[MVPP2_PRS_SRAM_WORDS * 4]; | |
999 | }; | |
1000 | ||
1001 | struct mvpp2_prs_entry { | |
1002 | u32 index; | |
1003 | union mvpp2_prs_tcam_entry tcam; | |
1004 | union mvpp2_prs_sram_entry sram; | |
1005 | }; | |
1006 | ||
1007 | struct mvpp2_prs_shadow { | |
1008 | bool valid; | |
1009 | bool finish; | |
1010 | ||
1011 | /* Lookup ID */ | |
1012 | int lu; | |
1013 | ||
1014 | /* User defined offset */ | |
1015 | int udf; | |
1016 | ||
1017 | /* Result info */ | |
1018 | u32 ri; | |
1019 | u32 ri_mask; | |
1020 | }; | |
1021 | ||
1022 | struct mvpp2_cls_flow_entry { | |
1023 | u32 index; | |
1024 | u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS]; | |
1025 | }; | |
1026 | ||
1027 | struct mvpp2_cls_lookup_entry { | |
1028 | u32 lkpid; | |
1029 | u32 way; | |
1030 | u32 data; | |
1031 | }; | |
1032 | ||
1033 | struct mvpp2_bm_pool { | |
1034 | /* Pool number in the range 0-7 */ | |
1035 | int id; | |
1036 | enum mvpp2_bm_type type; | |
1037 | ||
1038 | /* Buffer Pointers Pool External (BPPE) size */ | |
1039 | int size; | |
d01524d8 TP |
1040 | /* BPPE size in bytes */ |
1041 | int size_bytes; | |
3f518509 MW |
1042 | /* Number of buffers for this pool */ |
1043 | int buf_num; | |
1044 | /* Pool buffer size */ | |
1045 | int buf_size; | |
1046 | /* Packet size */ | |
1047 | int pkt_size; | |
0e037281 | 1048 | int frag_size; |
3f518509 MW |
1049 | |
1050 | /* BPPE virtual base address */ | |
1051 | u32 *virt_addr; | |
20396136 TP |
1052 | /* BPPE DMA base address */ |
1053 | dma_addr_t dma_addr; | |
3f518509 MW |
1054 | |
1055 | /* Ports using BM pool */ | |
1056 | u32 port_map; | |
3f518509 MW |
1057 | }; |
1058 | ||
3f518509 MW |
1059 | /* Static declaractions */ |
1060 | ||
1061 | /* Number of RXQs used by single port */ | |
1062 | static int rxq_number = MVPP2_DEFAULT_RXQ; | |
1063 | /* Number of TXQs used by single port */ | |
1064 | static int txq_number = MVPP2_MAX_TXQ; | |
1065 | ||
1066 | #define MVPP2_DRIVER_NAME "mvpp2" | |
1067 | #define MVPP2_DRIVER_VERSION "1.0" | |
1068 | ||
1069 | /* Utility/helper methods */ | |
1070 | ||
1071 | static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) | |
1072 | { | |
a786841d | 1073 | writel(data, priv->cpu_base[0] + offset); |
3f518509 MW |
1074 | } |
1075 | ||
1076 | static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) | |
1077 | { | |
a786841d TP |
1078 | return readl(priv->cpu_base[0] + offset); |
1079 | } | |
1080 | ||
1081 | /* These accessors should be used to access: | |
1082 | * | |
1083 | * - per-CPU registers, where each CPU has its own copy of the | |
1084 | * register. | |
1085 | * | |
1086 | * MVPP2_BM_VIRT_ALLOC_REG | |
1087 | * MVPP2_BM_ADDR_HIGH_ALLOC | |
1088 | * MVPP22_BM_ADDR_HIGH_RLS_REG | |
1089 | * MVPP2_BM_VIRT_RLS_REG | |
1090 | * MVPP2_ISR_RX_TX_CAUSE_REG | |
1091 | * MVPP2_ISR_RX_TX_MASK_REG | |
1092 | * MVPP2_TXQ_NUM_REG | |
1093 | * MVPP2_AGGR_TXQ_UPDATE_REG | |
1094 | * MVPP2_TXQ_RSVD_REQ_REG | |
1095 | * MVPP2_TXQ_RSVD_RSLT_REG | |
1096 | * MVPP2_TXQ_SENT_REG | |
1097 | * MVPP2_RXQ_NUM_REG | |
1098 | * | |
1099 | * - global registers that must be accessed through a specific CPU | |
1100 | * window, because they are related to an access to a per-CPU | |
1101 | * register | |
1102 | * | |
1103 | * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG) | |
1104 | * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG) | |
1105 | * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG) | |
1106 | * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG) | |
1107 | * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG) | |
1108 | * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG) | |
1109 | * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) | |
1110 | * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG) | |
1111 | * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG) | |
1112 | * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG) | |
1113 | * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) | |
1114 | * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) | |
1115 | * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) | |
1116 | */ | |
1117 | static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, | |
1118 | u32 offset, u32 data) | |
1119 | { | |
1120 | writel(data, priv->cpu_base[cpu] + offset); | |
1121 | } | |
1122 | ||
1123 | static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, | |
1124 | u32 offset) | |
1125 | { | |
1126 | return readl(priv->cpu_base[cpu] + offset); | |
3f518509 MW |
1127 | } |
1128 | ||
ac3dd277 TP |
1129 | static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, |
1130 | struct mvpp2_tx_desc *tx_desc) | |
1131 | { | |
e7c5359f TP |
1132 | if (port->priv->hw_version == MVPP21) |
1133 | return tx_desc->pp21.buf_dma_addr; | |
1134 | else | |
1135 | return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0); | |
ac3dd277 TP |
1136 | } |
1137 | ||
1138 | static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, | |
1139 | struct mvpp2_tx_desc *tx_desc, | |
1140 | dma_addr_t dma_addr) | |
1141 | { | |
e7c5359f TP |
1142 | if (port->priv->hw_version == MVPP21) { |
1143 | tx_desc->pp21.buf_dma_addr = dma_addr; | |
1144 | } else { | |
1145 | u64 val = (u64)dma_addr; | |
1146 | ||
1147 | tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0); | |
1148 | tx_desc->pp22.buf_dma_addr_ptp |= val; | |
1149 | } | |
ac3dd277 TP |
1150 | } |
1151 | ||
1152 | static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port, | |
1153 | struct mvpp2_tx_desc *tx_desc) | |
1154 | { | |
e7c5359f TP |
1155 | if (port->priv->hw_version == MVPP21) |
1156 | return tx_desc->pp21.data_size; | |
1157 | else | |
1158 | return tx_desc->pp22.data_size; | |
ac3dd277 TP |
1159 | } |
1160 | ||
1161 | static void mvpp2_txdesc_size_set(struct mvpp2_port *port, | |
1162 | struct mvpp2_tx_desc *tx_desc, | |
1163 | size_t size) | |
1164 | { | |
e7c5359f TP |
1165 | if (port->priv->hw_version == MVPP21) |
1166 | tx_desc->pp21.data_size = size; | |
1167 | else | |
1168 | tx_desc->pp22.data_size = size; | |
ac3dd277 TP |
1169 | } |
1170 | ||
1171 | static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, | |
1172 | struct mvpp2_tx_desc *tx_desc, | |
1173 | unsigned int txq) | |
1174 | { | |
e7c5359f TP |
1175 | if (port->priv->hw_version == MVPP21) |
1176 | tx_desc->pp21.phys_txq = txq; | |
1177 | else | |
1178 | tx_desc->pp22.phys_txq = txq; | |
ac3dd277 TP |
1179 | } |
1180 | ||
1181 | static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, | |
1182 | struct mvpp2_tx_desc *tx_desc, | |
1183 | unsigned int command) | |
1184 | { | |
e7c5359f TP |
1185 | if (port->priv->hw_version == MVPP21) |
1186 | tx_desc->pp21.command = command; | |
1187 | else | |
1188 | tx_desc->pp22.command = command; | |
ac3dd277 TP |
1189 | } |
1190 | ||
1191 | static void mvpp2_txdesc_offset_set(struct mvpp2_port *port, | |
1192 | struct mvpp2_tx_desc *tx_desc, | |
1193 | unsigned int offset) | |
1194 | { | |
e7c5359f TP |
1195 | if (port->priv->hw_version == MVPP21) |
1196 | tx_desc->pp21.packet_offset = offset; | |
1197 | else | |
1198 | tx_desc->pp22.packet_offset = offset; | |
ac3dd277 TP |
1199 | } |
1200 | ||
1201 | static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port, | |
1202 | struct mvpp2_tx_desc *tx_desc) | |
1203 | { | |
e7c5359f TP |
1204 | if (port->priv->hw_version == MVPP21) |
1205 | return tx_desc->pp21.packet_offset; | |
1206 | else | |
1207 | return tx_desc->pp22.packet_offset; | |
ac3dd277 TP |
1208 | } |
1209 | ||
1210 | static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, | |
1211 | struct mvpp2_rx_desc *rx_desc) | |
1212 | { | |
e7c5359f TP |
1213 | if (port->priv->hw_version == MVPP21) |
1214 | return rx_desc->pp21.buf_dma_addr; | |
1215 | else | |
1216 | return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0); | |
ac3dd277 TP |
1217 | } |
1218 | ||
1219 | static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, | |
1220 | struct mvpp2_rx_desc *rx_desc) | |
1221 | { | |
e7c5359f TP |
1222 | if (port->priv->hw_version == MVPP21) |
1223 | return rx_desc->pp21.buf_cookie; | |
1224 | else | |
1225 | return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0); | |
ac3dd277 TP |
1226 | } |
1227 | ||
1228 | static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, | |
1229 | struct mvpp2_rx_desc *rx_desc) | |
1230 | { | |
e7c5359f TP |
1231 | if (port->priv->hw_version == MVPP21) |
1232 | return rx_desc->pp21.data_size; | |
1233 | else | |
1234 | return rx_desc->pp22.data_size; | |
ac3dd277 TP |
1235 | } |
1236 | ||
1237 | static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, | |
1238 | struct mvpp2_rx_desc *rx_desc) | |
1239 | { | |
e7c5359f TP |
1240 | if (port->priv->hw_version == MVPP21) |
1241 | return rx_desc->pp21.status; | |
1242 | else | |
1243 | return rx_desc->pp22.status; | |
ac3dd277 TP |
1244 | } |
1245 | ||
3f518509 MW |
1246 | static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) |
1247 | { | |
1248 | txq_pcpu->txq_get_index++; | |
1249 | if (txq_pcpu->txq_get_index == txq_pcpu->size) | |
1250 | txq_pcpu->txq_get_index = 0; | |
1251 | } | |
1252 | ||
ac3dd277 TP |
1253 | static void mvpp2_txq_inc_put(struct mvpp2_port *port, |
1254 | struct mvpp2_txq_pcpu *txq_pcpu, | |
71ce391d MW |
1255 | struct sk_buff *skb, |
1256 | struct mvpp2_tx_desc *tx_desc) | |
3f518509 | 1257 | { |
8354491c TP |
1258 | struct mvpp2_txq_pcpu_buf *tx_buf = |
1259 | txq_pcpu->buffs + txq_pcpu->txq_put_index; | |
1260 | tx_buf->skb = skb; | |
ac3dd277 TP |
1261 | tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc); |
1262 | tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) + | |
1263 | mvpp2_txdesc_offset_get(port, tx_desc); | |
3f518509 MW |
1264 | txq_pcpu->txq_put_index++; |
1265 | if (txq_pcpu->txq_put_index == txq_pcpu->size) | |
1266 | txq_pcpu->txq_put_index = 0; | |
1267 | } | |
1268 | ||
1269 | /* Get number of physical egress port */ | |
1270 | static inline int mvpp2_egress_port(struct mvpp2_port *port) | |
1271 | { | |
1272 | return MVPP2_MAX_TCONT + port->id; | |
1273 | } | |
1274 | ||
1275 | /* Get number of physical TXQ */ | |
1276 | static inline int mvpp2_txq_phys(int port, int txq) | |
1277 | { | |
1278 | return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; | |
1279 | } | |
1280 | ||
1281 | /* Parser configuration routines */ | |
1282 | ||
1283 | /* Update parser tcam and sram hw entries */ | |
1284 | static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) | |
1285 | { | |
1286 | int i; | |
1287 | ||
1288 | if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) | |
1289 | return -EINVAL; | |
1290 | ||
1291 | /* Clear entry invalidation bit */ | |
1292 | pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; | |
1293 | ||
1294 | /* Write tcam index - indirect access */ | |
1295 | mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); | |
1296 | for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) | |
1297 | mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]); | |
1298 | ||
1299 | /* Write sram index - indirect access */ | |
1300 | mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); | |
1301 | for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) | |
1302 | mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]); | |
1303 | ||
1304 | return 0; | |
1305 | } | |
1306 | ||
1307 | /* Read tcam entry from hw */ | |
1308 | static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) | |
1309 | { | |
1310 | int i; | |
1311 | ||
1312 | if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) | |
1313 | return -EINVAL; | |
1314 | ||
1315 | /* Write tcam index - indirect access */ | |
1316 | mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); | |
1317 | ||
1318 | pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, | |
1319 | MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); | |
1320 | if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) | |
1321 | return MVPP2_PRS_TCAM_ENTRY_INVALID; | |
1322 | ||
1323 | for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) | |
1324 | pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); | |
1325 | ||
1326 | /* Write sram index - indirect access */ | |
1327 | mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); | |
1328 | for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) | |
1329 | pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); | |
1330 | ||
1331 | return 0; | |
1332 | } | |
1333 | ||
1334 | /* Invalidate tcam hw entry */ | |
1335 | static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) | |
1336 | { | |
1337 | /* Write index - indirect access */ | |
1338 | mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); | |
1339 | mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD), | |
1340 | MVPP2_PRS_TCAM_INV_MASK); | |
1341 | } | |
1342 | ||
1343 | /* Enable shadow table entry and set its lookup ID */ | |
1344 | static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu) | |
1345 | { | |
1346 | priv->prs_shadow[index].valid = true; | |
1347 | priv->prs_shadow[index].lu = lu; | |
1348 | } | |
1349 | ||
1350 | /* Update ri fields in shadow table entry */ | |
1351 | static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index, | |
1352 | unsigned int ri, unsigned int ri_mask) | |
1353 | { | |
1354 | priv->prs_shadow[index].ri_mask = ri_mask; | |
1355 | priv->prs_shadow[index].ri = ri; | |
1356 | } | |
1357 | ||
1358 | /* Update lookup field in tcam sw entry */ | |
1359 | static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) | |
1360 | { | |
1361 | int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE); | |
1362 | ||
1363 | pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu; | |
1364 | pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK; | |
1365 | } | |
1366 | ||
1367 | /* Update mask for single port in tcam sw entry */ | |
1368 | static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, | |
1369 | unsigned int port, bool add) | |
1370 | { | |
1371 | int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); | |
1372 | ||
1373 | if (add) | |
1374 | pe->tcam.byte[enable_off] &= ~(1 << port); | |
1375 | else | |
1376 | pe->tcam.byte[enable_off] |= 1 << port; | |
1377 | } | |
1378 | ||
1379 | /* Update port map in tcam sw entry */ | |
1380 | static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, | |
1381 | unsigned int ports) | |
1382 | { | |
1383 | unsigned char port_mask = MVPP2_PRS_PORT_MASK; | |
1384 | int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); | |
1385 | ||
1386 | pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0; | |
1387 | pe->tcam.byte[enable_off] &= ~port_mask; | |
1388 | pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK; | |
1389 | } | |
1390 | ||
1391 | /* Obtain port map from tcam sw entry */ | |
1392 | static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) | |
1393 | { | |
1394 | int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); | |
1395 | ||
1396 | return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK; | |
1397 | } | |
1398 | ||
1399 | /* Set byte of data and its enable bits in tcam sw entry */ | |
1400 | static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, | |
1401 | unsigned int offs, unsigned char byte, | |
1402 | unsigned char enable) | |
1403 | { | |
1404 | pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte; | |
1405 | pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable; | |
1406 | } | |
1407 | ||
1408 | /* Get byte of data and its enable bits from tcam sw entry */ | |
1409 | static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, | |
1410 | unsigned int offs, unsigned char *byte, | |
1411 | unsigned char *enable) | |
1412 | { | |
1413 | *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)]; | |
1414 | *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)]; | |
1415 | } | |
1416 | ||
1417 | /* Compare tcam data bytes with a pattern */ | |
1418 | static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs, | |
1419 | u16 data) | |
1420 | { | |
1421 | int off = MVPP2_PRS_TCAM_DATA_BYTE(offs); | |
1422 | u16 tcam_data; | |
1423 | ||
1424 | tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off]; | |
1425 | if (tcam_data != data) | |
1426 | return false; | |
1427 | return true; | |
1428 | } | |
1429 | ||
1430 | /* Update ai bits in tcam sw entry */ | |
1431 | static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, | |
1432 | unsigned int bits, unsigned int enable) | |
1433 | { | |
1434 | int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE; | |
1435 | ||
1436 | for (i = 0; i < MVPP2_PRS_AI_BITS; i++) { | |
1437 | ||
1438 | if (!(enable & BIT(i))) | |
1439 | continue; | |
1440 | ||
1441 | if (bits & BIT(i)) | |
1442 | pe->tcam.byte[ai_idx] |= 1 << i; | |
1443 | else | |
1444 | pe->tcam.byte[ai_idx] &= ~(1 << i); | |
1445 | } | |
1446 | ||
1447 | pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable; | |
1448 | } | |
1449 | ||
1450 | /* Get ai bits from tcam sw entry */ | |
1451 | static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe) | |
1452 | { | |
1453 | return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE]; | |
1454 | } | |
1455 | ||
1456 | /* Set ethertype in tcam sw entry */ | |
1457 | static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset, | |
1458 | unsigned short ethertype) | |
1459 | { | |
1460 | mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff); | |
1461 | mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff); | |
1462 | } | |
1463 | ||
1464 | /* Set bits in sram sw entry */ | |
1465 | static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, | |
1466 | int val) | |
1467 | { | |
1468 | pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8)); | |
1469 | } | |
1470 | ||
1471 | /* Clear bits in sram sw entry */ | |
1472 | static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num, | |
1473 | int val) | |
1474 | { | |
1475 | pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8)); | |
1476 | } | |
1477 | ||
1478 | /* Update ri bits in sram sw entry */ | |
1479 | static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, | |
1480 | unsigned int bits, unsigned int mask) | |
1481 | { | |
1482 | unsigned int i; | |
1483 | ||
1484 | for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { | |
1485 | int ri_off = MVPP2_PRS_SRAM_RI_OFFS; | |
1486 | ||
1487 | if (!(mask & BIT(i))) | |
1488 | continue; | |
1489 | ||
1490 | if (bits & BIT(i)) | |
1491 | mvpp2_prs_sram_bits_set(pe, ri_off + i, 1); | |
1492 | else | |
1493 | mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1); | |
1494 | ||
1495 | mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); | |
1496 | } | |
1497 | } | |
1498 | ||
1499 | /* Obtain ri bits from sram sw entry */ | |
1500 | static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe) | |
1501 | { | |
1502 | return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD]; | |
1503 | } | |
1504 | ||
1505 | /* Update ai bits in sram sw entry */ | |
1506 | static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, | |
1507 | unsigned int bits, unsigned int mask) | |
1508 | { | |
1509 | unsigned int i; | |
1510 | int ai_off = MVPP2_PRS_SRAM_AI_OFFS; | |
1511 | ||
1512 | for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { | |
1513 | ||
1514 | if (!(mask & BIT(i))) | |
1515 | continue; | |
1516 | ||
1517 | if (bits & BIT(i)) | |
1518 | mvpp2_prs_sram_bits_set(pe, ai_off + i, 1); | |
1519 | else | |
1520 | mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1); | |
1521 | ||
1522 | mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); | |
1523 | } | |
1524 | } | |
1525 | ||
1526 | /* Read ai bits from sram sw entry */ | |
1527 | static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe) | |
1528 | { | |
1529 | u8 bits; | |
1530 | int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS); | |
1531 | int ai_en_off = ai_off + 1; | |
1532 | int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8; | |
1533 | ||
1534 | bits = (pe->sram.byte[ai_off] >> ai_shift) | | |
1535 | (pe->sram.byte[ai_en_off] << (8 - ai_shift)); | |
1536 | ||
1537 | return bits; | |
1538 | } | |
1539 | ||
1540 | /* In sram sw entry set lookup ID field of the tcam key to be used in the next | |
1541 | * lookup interation | |
1542 | */ | |
1543 | static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, | |
1544 | unsigned int lu) | |
1545 | { | |
1546 | int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS; | |
1547 | ||
1548 | mvpp2_prs_sram_bits_clear(pe, sram_next_off, | |
1549 | MVPP2_PRS_SRAM_NEXT_LU_MASK); | |
1550 | mvpp2_prs_sram_bits_set(pe, sram_next_off, lu); | |
1551 | } | |
1552 | ||
1553 | /* In the sram sw entry set sign and value of the next lookup offset | |
1554 | * and the offset value generated to the classifier | |
1555 | */ | |
1556 | static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, | |
1557 | unsigned int op) | |
1558 | { | |
1559 | /* Set sign */ | |
1560 | if (shift < 0) { | |
1561 | mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); | |
1562 | shift = 0 - shift; | |
1563 | } else { | |
1564 | mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); | |
1565 | } | |
1566 | ||
1567 | /* Set value */ | |
1568 | pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] = | |
1569 | (unsigned char)shift; | |
1570 | ||
1571 | /* Reset and set operation */ | |
1572 | mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, | |
1573 | MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK); | |
1574 | mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op); | |
1575 | ||
1576 | /* Set base offset as current */ | |
1577 | mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); | |
1578 | } | |
1579 | ||
1580 | /* In the sram sw entry set sign and value of the user defined offset | |
1581 | * generated to the classifier | |
1582 | */ | |
1583 | static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, | |
1584 | unsigned int type, int offset, | |
1585 | unsigned int op) | |
1586 | { | |
1587 | /* Set sign */ | |
1588 | if (offset < 0) { | |
1589 | mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); | |
1590 | offset = 0 - offset; | |
1591 | } else { | |
1592 | mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); | |
1593 | } | |
1594 | ||
1595 | /* Set value */ | |
1596 | mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS, | |
1597 | MVPP2_PRS_SRAM_UDF_MASK); | |
1598 | mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset); | |
1599 | pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + | |
1600 | MVPP2_PRS_SRAM_UDF_BITS)] &= | |
1601 | ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); | |
1602 | pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + | |
1603 | MVPP2_PRS_SRAM_UDF_BITS)] |= | |
1604 | (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); | |
1605 | ||
1606 | /* Set offset type */ | |
1607 | mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, | |
1608 | MVPP2_PRS_SRAM_UDF_TYPE_MASK); | |
1609 | mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type); | |
1610 | ||
1611 | /* Set offset operation */ | |
1612 | mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, | |
1613 | MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); | |
1614 | mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op); | |
1615 | ||
1616 | pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + | |
1617 | MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &= | |
1618 | ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >> | |
1619 | (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); | |
1620 | ||
1621 | pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + | |
1622 | MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |= | |
1623 | (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); | |
1624 | ||
1625 | /* Set base offset as current */ | |
1626 | mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); | |
1627 | } | |
1628 | ||
1629 | /* Find parser flow entry */ | |
1630 | static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) | |
1631 | { | |
1632 | struct mvpp2_prs_entry *pe; | |
1633 | int tid; | |
1634 | ||
1635 | pe = kzalloc(sizeof(*pe), GFP_KERNEL); | |
1636 | if (!pe) | |
1637 | return NULL; | |
1638 | mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); | |
1639 | ||
1640 | /* Go through the all entires with MVPP2_PRS_LU_FLOWS */ | |
1641 | for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) { | |
1642 | u8 bits; | |
1643 | ||
1644 | if (!priv->prs_shadow[tid].valid || | |
1645 | priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) | |
1646 | continue; | |
1647 | ||
1648 | pe->index = tid; | |
1649 | mvpp2_prs_hw_read(priv, pe); | |
1650 | bits = mvpp2_prs_sram_ai_get(pe); | |
1651 | ||
1652 | /* Sram store classification lookup ID in AI bits [5:0] */ | |
1653 | if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow) | |
1654 | return pe; | |
1655 | } | |
1656 | kfree(pe); | |
1657 | ||
1658 | return NULL; | |
1659 | } | |
1660 | ||
1661 | /* Return first free tcam index, seeking from start to end */ | |
1662 | static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start, | |
1663 | unsigned char end) | |
1664 | { | |
1665 | int tid; | |
1666 | ||
1667 | if (start > end) | |
1668 | swap(start, end); | |
1669 | ||
1670 | if (end >= MVPP2_PRS_TCAM_SRAM_SIZE) | |
1671 | end = MVPP2_PRS_TCAM_SRAM_SIZE - 1; | |
1672 | ||
1673 | for (tid = start; tid <= end; tid++) { | |
1674 | if (!priv->prs_shadow[tid].valid) | |
1675 | return tid; | |
1676 | } | |
1677 | ||
1678 | return -EINVAL; | |
1679 | } | |
1680 | ||
1681 | /* Enable/disable dropping all mac da's */ | |
1682 | static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) | |
1683 | { | |
1684 | struct mvpp2_prs_entry pe; | |
1685 | ||
1686 | if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { | |
1687 | /* Entry exist - update port only */ | |
1688 | pe.index = MVPP2_PE_DROP_ALL; | |
1689 | mvpp2_prs_hw_read(priv, &pe); | |
1690 | } else { | |
1691 | /* Entry doesn't exist - create new */ | |
1692 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
1693 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); | |
1694 | pe.index = MVPP2_PE_DROP_ALL; | |
1695 | ||
1696 | /* Non-promiscuous mode for all ports - DROP unknown packets */ | |
1697 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, | |
1698 | MVPP2_PRS_RI_DROP_MASK); | |
1699 | ||
1700 | mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); | |
1701 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); | |
1702 | ||
1703 | /* Update shadow table */ | |
1704 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); | |
1705 | ||
1706 | /* Mask all ports */ | |
1707 | mvpp2_prs_tcam_port_map_set(&pe, 0); | |
1708 | } | |
1709 | ||
1710 | /* Update port mask */ | |
1711 | mvpp2_prs_tcam_port_set(&pe, port, add); | |
1712 | ||
1713 | mvpp2_prs_hw_write(priv, &pe); | |
1714 | } | |
1715 | ||
1716 | /* Set port to promiscuous mode */ | |
1717 | static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add) | |
1718 | { | |
1719 | struct mvpp2_prs_entry pe; | |
1720 | ||
dbedd44e | 1721 | /* Promiscuous mode - Accept unknown packets */ |
3f518509 MW |
1722 | |
1723 | if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) { | |
1724 | /* Entry exist - update port only */ | |
1725 | pe.index = MVPP2_PE_MAC_PROMISCUOUS; | |
1726 | mvpp2_prs_hw_read(priv, &pe); | |
1727 | } else { | |
1728 | /* Entry doesn't exist - create new */ | |
1729 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
1730 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); | |
1731 | pe.index = MVPP2_PE_MAC_PROMISCUOUS; | |
1732 | ||
1733 | /* Continue - set next lookup */ | |
1734 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); | |
1735 | ||
1736 | /* Set result info bits */ | |
1737 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST, | |
1738 | MVPP2_PRS_RI_L2_CAST_MASK); | |
1739 | ||
1740 | /* Shift to ethertype */ | |
1741 | mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, | |
1742 | MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); | |
1743 | ||
1744 | /* Mask all ports */ | |
1745 | mvpp2_prs_tcam_port_map_set(&pe, 0); | |
1746 | ||
1747 | /* Update shadow table */ | |
1748 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); | |
1749 | } | |
1750 | ||
1751 | /* Update port mask */ | |
1752 | mvpp2_prs_tcam_port_set(&pe, port, add); | |
1753 | ||
1754 | mvpp2_prs_hw_write(priv, &pe); | |
1755 | } | |
1756 | ||
1757 | /* Accept multicast */ | |
1758 | static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index, | |
1759 | bool add) | |
1760 | { | |
1761 | struct mvpp2_prs_entry pe; | |
1762 | unsigned char da_mc; | |
1763 | ||
1764 | /* Ethernet multicast address first byte is | |
1765 | * 0x01 for IPv4 and 0x33 for IPv6 | |
1766 | */ | |
1767 | da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33; | |
1768 | ||
1769 | if (priv->prs_shadow[index].valid) { | |
1770 | /* Entry exist - update port only */ | |
1771 | pe.index = index; | |
1772 | mvpp2_prs_hw_read(priv, &pe); | |
1773 | } else { | |
1774 | /* Entry doesn't exist - create new */ | |
1775 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
1776 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); | |
1777 | pe.index = index; | |
1778 | ||
1779 | /* Continue - set next lookup */ | |
1780 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); | |
1781 | ||
1782 | /* Set result info bits */ | |
1783 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST, | |
1784 | MVPP2_PRS_RI_L2_CAST_MASK); | |
1785 | ||
1786 | /* Update tcam entry data first byte */ | |
1787 | mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff); | |
1788 | ||
1789 | /* Shift to ethertype */ | |
1790 | mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, | |
1791 | MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); | |
1792 | ||
1793 | /* Mask all ports */ | |
1794 | mvpp2_prs_tcam_port_map_set(&pe, 0); | |
1795 | ||
1796 | /* Update shadow table */ | |
1797 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); | |
1798 | } | |
1799 | ||
1800 | /* Update port mask */ | |
1801 | mvpp2_prs_tcam_port_set(&pe, port, add); | |
1802 | ||
1803 | mvpp2_prs_hw_write(priv, &pe); | |
1804 | } | |
1805 | ||
1806 | /* Set entry for dsa packets */ | |
1807 | static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, | |
1808 | bool tagged, bool extend) | |
1809 | { | |
1810 | struct mvpp2_prs_entry pe; | |
1811 | int tid, shift; | |
1812 | ||
1813 | if (extend) { | |
1814 | tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED; | |
1815 | shift = 8; | |
1816 | } else { | |
1817 | tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED; | |
1818 | shift = 4; | |
1819 | } | |
1820 | ||
1821 | if (priv->prs_shadow[tid].valid) { | |
1822 | /* Entry exist - update port only */ | |
1823 | pe.index = tid; | |
1824 | mvpp2_prs_hw_read(priv, &pe); | |
1825 | } else { | |
1826 | /* Entry doesn't exist - create new */ | |
1827 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
1828 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); | |
1829 | pe.index = tid; | |
1830 | ||
1831 | /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/ | |
1832 | mvpp2_prs_sram_shift_set(&pe, shift, | |
1833 | MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); | |
1834 | ||
1835 | /* Update shadow table */ | |
1836 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); | |
1837 | ||
1838 | if (tagged) { | |
1839 | /* Set tagged bit in DSA tag */ | |
1840 | mvpp2_prs_tcam_data_byte_set(&pe, 0, | |
1841 | MVPP2_PRS_TCAM_DSA_TAGGED_BIT, | |
1842 | MVPP2_PRS_TCAM_DSA_TAGGED_BIT); | |
1843 | /* Clear all ai bits for next iteration */ | |
1844 | mvpp2_prs_sram_ai_update(&pe, 0, | |
1845 | MVPP2_PRS_SRAM_AI_MASK); | |
1846 | /* If packet is tagged continue check vlans */ | |
1847 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); | |
1848 | } else { | |
1849 | /* Set result info bits to 'no vlans' */ | |
1850 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, | |
1851 | MVPP2_PRS_RI_VLAN_MASK); | |
1852 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); | |
1853 | } | |
1854 | ||
1855 | /* Mask all ports */ | |
1856 | mvpp2_prs_tcam_port_map_set(&pe, 0); | |
1857 | } | |
1858 | ||
1859 | /* Update port mask */ | |
1860 | mvpp2_prs_tcam_port_set(&pe, port, add); | |
1861 | ||
1862 | mvpp2_prs_hw_write(priv, &pe); | |
1863 | } | |
1864 | ||
1865 | /* Set entry for dsa ethertype */ | |
1866 | static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port, | |
1867 | bool add, bool tagged, bool extend) | |
1868 | { | |
1869 | struct mvpp2_prs_entry pe; | |
1870 | int tid, shift, port_mask; | |
1871 | ||
1872 | if (extend) { | |
1873 | tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED : | |
1874 | MVPP2_PE_ETYPE_EDSA_UNTAGGED; | |
1875 | port_mask = 0; | |
1876 | shift = 8; | |
1877 | } else { | |
1878 | tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED : | |
1879 | MVPP2_PE_ETYPE_DSA_UNTAGGED; | |
1880 | port_mask = MVPP2_PRS_PORT_MASK; | |
1881 | shift = 4; | |
1882 | } | |
1883 | ||
1884 | if (priv->prs_shadow[tid].valid) { | |
1885 | /* Entry exist - update port only */ | |
1886 | pe.index = tid; | |
1887 | mvpp2_prs_hw_read(priv, &pe); | |
1888 | } else { | |
1889 | /* Entry doesn't exist - create new */ | |
1890 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
1891 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); | |
1892 | pe.index = tid; | |
1893 | ||
1894 | /* Set ethertype */ | |
1895 | mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA); | |
1896 | mvpp2_prs_match_etype(&pe, 2, 0); | |
1897 | ||
1898 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK, | |
1899 | MVPP2_PRS_RI_DSA_MASK); | |
1900 | /* Shift ethertype + 2 byte reserved + tag*/ | |
1901 | mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift, | |
1902 | MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); | |
1903 | ||
1904 | /* Update shadow table */ | |
1905 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); | |
1906 | ||
1907 | if (tagged) { | |
1908 | /* Set tagged bit in DSA tag */ | |
1909 | mvpp2_prs_tcam_data_byte_set(&pe, | |
1910 | MVPP2_ETH_TYPE_LEN + 2 + 3, | |
1911 | MVPP2_PRS_TCAM_DSA_TAGGED_BIT, | |
1912 | MVPP2_PRS_TCAM_DSA_TAGGED_BIT); | |
1913 | /* Clear all ai bits for next iteration */ | |
1914 | mvpp2_prs_sram_ai_update(&pe, 0, | |
1915 | MVPP2_PRS_SRAM_AI_MASK); | |
1916 | /* If packet is tagged continue check vlans */ | |
1917 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); | |
1918 | } else { | |
1919 | /* Set result info bits to 'no vlans' */ | |
1920 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, | |
1921 | MVPP2_PRS_RI_VLAN_MASK); | |
1922 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); | |
1923 | } | |
1924 | /* Mask/unmask all ports, depending on dsa type */ | |
1925 | mvpp2_prs_tcam_port_map_set(&pe, port_mask); | |
1926 | } | |
1927 | ||
1928 | /* Update port mask */ | |
1929 | mvpp2_prs_tcam_port_set(&pe, port, add); | |
1930 | ||
1931 | mvpp2_prs_hw_write(priv, &pe); | |
1932 | } | |
1933 | ||
1934 | /* Search for existing single/triple vlan entry */ | |
1935 | static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv, | |
1936 | unsigned short tpid, int ai) | |
1937 | { | |
1938 | struct mvpp2_prs_entry *pe; | |
1939 | int tid; | |
1940 | ||
1941 | pe = kzalloc(sizeof(*pe), GFP_KERNEL); | |
1942 | if (!pe) | |
1943 | return NULL; | |
1944 | mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); | |
1945 | ||
1946 | /* Go through the all entries with MVPP2_PRS_LU_VLAN */ | |
1947 | for (tid = MVPP2_PE_FIRST_FREE_TID; | |
1948 | tid <= MVPP2_PE_LAST_FREE_TID; tid++) { | |
1949 | unsigned int ri_bits, ai_bits; | |
1950 | bool match; | |
1951 | ||
1952 | if (!priv->prs_shadow[tid].valid || | |
1953 | priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) | |
1954 | continue; | |
1955 | ||
1956 | pe->index = tid; | |
1957 | ||
1958 | mvpp2_prs_hw_read(priv, pe); | |
1959 | match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid)); | |
1960 | if (!match) | |
1961 | continue; | |
1962 | ||
1963 | /* Get vlan type */ | |
1964 | ri_bits = mvpp2_prs_sram_ri_get(pe); | |
1965 | ri_bits &= MVPP2_PRS_RI_VLAN_MASK; | |
1966 | ||
1967 | /* Get current ai value from tcam */ | |
1968 | ai_bits = mvpp2_prs_tcam_ai_get(pe); | |
1969 | /* Clear double vlan bit */ | |
1970 | ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT; | |
1971 | ||
1972 | if (ai != ai_bits) | |
1973 | continue; | |
1974 | ||
1975 | if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || | |
1976 | ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) | |
1977 | return pe; | |
1978 | } | |
1979 | kfree(pe); | |
1980 | ||
1981 | return NULL; | |
1982 | } | |
1983 | ||
1984 | /* Add/update single/triple vlan entry */ | |
1985 | static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, | |
1986 | unsigned int port_map) | |
1987 | { | |
1988 | struct mvpp2_prs_entry *pe; | |
1989 | int tid_aux, tid; | |
43737473 | 1990 | int ret = 0; |
3f518509 MW |
1991 | |
1992 | pe = mvpp2_prs_vlan_find(priv, tpid, ai); | |
1993 | ||
1994 | if (!pe) { | |
1995 | /* Create new tcam entry */ | |
1996 | tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID, | |
1997 | MVPP2_PE_FIRST_FREE_TID); | |
1998 | if (tid < 0) | |
1999 | return tid; | |
2000 | ||
2001 | pe = kzalloc(sizeof(*pe), GFP_KERNEL); | |
2002 | if (!pe) | |
2003 | return -ENOMEM; | |
2004 | ||
2005 | /* Get last double vlan tid */ | |
2006 | for (tid_aux = MVPP2_PE_LAST_FREE_TID; | |
2007 | tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) { | |
2008 | unsigned int ri_bits; | |
2009 | ||
2010 | if (!priv->prs_shadow[tid_aux].valid || | |
2011 | priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) | |
2012 | continue; | |
2013 | ||
2014 | pe->index = tid_aux; | |
2015 | mvpp2_prs_hw_read(priv, pe); | |
2016 | ri_bits = mvpp2_prs_sram_ri_get(pe); | |
2017 | if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) == | |
2018 | MVPP2_PRS_RI_VLAN_DOUBLE) | |
2019 | break; | |
2020 | } | |
2021 | ||
43737473 SM |
2022 | if (tid <= tid_aux) { |
2023 | ret = -EINVAL; | |
2024 | goto error; | |
2025 | } | |
3f518509 MW |
2026 | |
2027 | memset(pe, 0 , sizeof(struct mvpp2_prs_entry)); | |
2028 | mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); | |
2029 | pe->index = tid; | |
2030 | ||
2031 | mvpp2_prs_match_etype(pe, 0, tpid); | |
2032 | ||
2033 | mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2); | |
2034 | /* Shift 4 bytes - skip 1 vlan tag */ | |
2035 | mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN, | |
2036 | MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); | |
2037 | /* Clear all ai bits for next iteration */ | |
2038 | mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK); | |
2039 | ||
2040 | if (ai == MVPP2_PRS_SINGLE_VLAN_AI) { | |
2041 | mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE, | |
2042 | MVPP2_PRS_RI_VLAN_MASK); | |
2043 | } else { | |
2044 | ai |= MVPP2_PRS_DBL_VLAN_AI_BIT; | |
2045 | mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE, | |
2046 | MVPP2_PRS_RI_VLAN_MASK); | |
2047 | } | |
2048 | mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK); | |
2049 | ||
2050 | mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN); | |
2051 | } | |
2052 | /* Update ports' mask */ | |
2053 | mvpp2_prs_tcam_port_map_set(pe, port_map); | |
2054 | ||
2055 | mvpp2_prs_hw_write(priv, pe); | |
2056 | ||
43737473 | 2057 | error: |
3f518509 MW |
2058 | kfree(pe); |
2059 | ||
43737473 | 2060 | return ret; |
3f518509 MW |
2061 | } |
2062 | ||
2063 | /* Get first free double vlan ai number */ | |
2064 | static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv) | |
2065 | { | |
2066 | int i; | |
2067 | ||
2068 | for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) { | |
2069 | if (!priv->prs_double_vlans[i]) | |
2070 | return i; | |
2071 | } | |
2072 | ||
2073 | return -EINVAL; | |
2074 | } | |
2075 | ||
2076 | /* Search for existing double vlan entry */ | |
2077 | static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv, | |
2078 | unsigned short tpid1, | |
2079 | unsigned short tpid2) | |
2080 | { | |
2081 | struct mvpp2_prs_entry *pe; | |
2082 | int tid; | |
2083 | ||
2084 | pe = kzalloc(sizeof(*pe), GFP_KERNEL); | |
2085 | if (!pe) | |
2086 | return NULL; | |
2087 | mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); | |
2088 | ||
2089 | /* Go through the all entries with MVPP2_PRS_LU_VLAN */ | |
2090 | for (tid = MVPP2_PE_FIRST_FREE_TID; | |
2091 | tid <= MVPP2_PE_LAST_FREE_TID; tid++) { | |
2092 | unsigned int ri_mask; | |
2093 | bool match; | |
2094 | ||
2095 | if (!priv->prs_shadow[tid].valid || | |
2096 | priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) | |
2097 | continue; | |
2098 | ||
2099 | pe->index = tid; | |
2100 | mvpp2_prs_hw_read(priv, pe); | |
2101 | ||
2102 | match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1)) | |
2103 | && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2)); | |
2104 | ||
2105 | if (!match) | |
2106 | continue; | |
2107 | ||
2108 | ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK; | |
2109 | if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE) | |
2110 | return pe; | |
2111 | } | |
2112 | kfree(pe); | |
2113 | ||
2114 | return NULL; | |
2115 | } | |
2116 | ||
2117 | /* Add or update double vlan entry */ | |
2118 | static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, | |
2119 | unsigned short tpid2, | |
2120 | unsigned int port_map) | |
2121 | { | |
2122 | struct mvpp2_prs_entry *pe; | |
43737473 | 2123 | int tid_aux, tid, ai, ret = 0; |
3f518509 MW |
2124 | |
2125 | pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2); | |
2126 | ||
2127 | if (!pe) { | |
2128 | /* Create new tcam entry */ | |
2129 | tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, | |
2130 | MVPP2_PE_LAST_FREE_TID); | |
2131 | if (tid < 0) | |
2132 | return tid; | |
2133 | ||
2134 | pe = kzalloc(sizeof(*pe), GFP_KERNEL); | |
2135 | if (!pe) | |
2136 | return -ENOMEM; | |
2137 | ||
2138 | /* Set ai value for new double vlan entry */ | |
2139 | ai = mvpp2_prs_double_vlan_ai_free_get(priv); | |
43737473 SM |
2140 | if (ai < 0) { |
2141 | ret = ai; | |
2142 | goto error; | |
2143 | } | |
3f518509 MW |
2144 | |
2145 | /* Get first single/triple vlan tid */ | |
2146 | for (tid_aux = MVPP2_PE_FIRST_FREE_TID; | |
2147 | tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) { | |
2148 | unsigned int ri_bits; | |
2149 | ||
2150 | if (!priv->prs_shadow[tid_aux].valid || | |
2151 | priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) | |
2152 | continue; | |
2153 | ||
2154 | pe->index = tid_aux; | |
2155 | mvpp2_prs_hw_read(priv, pe); | |
2156 | ri_bits = mvpp2_prs_sram_ri_get(pe); | |
2157 | ri_bits &= MVPP2_PRS_RI_VLAN_MASK; | |
2158 | if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || | |
2159 | ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) | |
2160 | break; | |
2161 | } | |
2162 | ||
43737473 SM |
2163 | if (tid >= tid_aux) { |
2164 | ret = -ERANGE; | |
2165 | goto error; | |
2166 | } | |
3f518509 MW |
2167 | |
2168 | memset(pe, 0, sizeof(struct mvpp2_prs_entry)); | |
2169 | mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); | |
2170 | pe->index = tid; | |
2171 | ||
2172 | priv->prs_double_vlans[ai] = true; | |
2173 | ||
2174 | mvpp2_prs_match_etype(pe, 0, tpid1); | |
2175 | mvpp2_prs_match_etype(pe, 4, tpid2); | |
2176 | ||
2177 | mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN); | |
2178 | /* Shift 8 bytes - skip 2 vlan tags */ | |
2179 | mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN, | |
2180 | MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); | |
2181 | mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE, | |
2182 | MVPP2_PRS_RI_VLAN_MASK); | |
2183 | mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT, | |
2184 | MVPP2_PRS_SRAM_AI_MASK); | |
2185 | ||
2186 | mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN); | |
2187 | } | |
2188 | ||
2189 | /* Update ports' mask */ | |
2190 | mvpp2_prs_tcam_port_map_set(pe, port_map); | |
2191 | mvpp2_prs_hw_write(priv, pe); | |
2192 | ||
43737473 | 2193 | error: |
3f518509 | 2194 | kfree(pe); |
43737473 | 2195 | return ret; |
3f518509 MW |
2196 | } |
2197 | ||
2198 | /* IPv4 header parsing for fragmentation and L4 offset */ | |
2199 | static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, | |
2200 | unsigned int ri, unsigned int ri_mask) | |
2201 | { | |
2202 | struct mvpp2_prs_entry pe; | |
2203 | int tid; | |
2204 | ||
2205 | if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && | |
2206 | (proto != IPPROTO_IGMP)) | |
2207 | return -EINVAL; | |
2208 | ||
2209 | /* Fragmented packet */ | |
2210 | tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, | |
2211 | MVPP2_PE_LAST_FREE_TID); | |
2212 | if (tid < 0) | |
2213 | return tid; | |
2214 | ||
2215 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
2216 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); | |
2217 | pe.index = tid; | |
2218 | ||
2219 | /* Set next lu to IPv4 */ | |
2220 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); | |
2221 | mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); | |
2222 | /* Set L4 offset */ | |
2223 | mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, | |
2224 | sizeof(struct iphdr) - 4, | |
2225 | MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); | |
2226 | mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, | |
2227 | MVPP2_PRS_IPV4_DIP_AI_BIT); | |
2228 | mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK, | |
2229 | ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); | |
2230 | ||
2231 | mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK); | |
2232 | mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); | |
2233 | /* Unmask all ports */ | |
2234 | mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); | |
2235 | ||
2236 | /* Update shadow table and hw entry */ | |
2237 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); | |
2238 | mvpp2_prs_hw_write(priv, &pe); | |
2239 | ||
2240 | /* Not fragmented packet */ | |
2241 | tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, | |
2242 | MVPP2_PE_LAST_FREE_TID); | |
2243 | if (tid < 0) | |
2244 | return tid; | |
2245 | ||
2246 | pe.index = tid; | |
2247 | /* Clear ri before updating */ | |
2248 | pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; | |
2249 | pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; | |
2250 | mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); | |
2251 | ||
2252 | mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L); | |
2253 | mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK); | |
2254 | ||
2255 | /* Update shadow table and hw entry */ | |
2256 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); | |
2257 | mvpp2_prs_hw_write(priv, &pe); | |
2258 | ||
2259 | return 0; | |
2260 | } | |
2261 | ||
2262 | /* IPv4 L3 multicast or broadcast */ | |
2263 | static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast) | |
2264 | { | |
2265 | struct mvpp2_prs_entry pe; | |
2266 | int mask, tid; | |
2267 | ||
2268 | tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, | |
2269 | MVPP2_PE_LAST_FREE_TID); | |
2270 | if (tid < 0) | |
2271 | return tid; | |
2272 | ||
2273 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
2274 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); | |
2275 | pe.index = tid; | |
2276 | ||
2277 | switch (l3_cast) { | |
2278 | case MVPP2_PRS_L3_MULTI_CAST: | |
2279 | mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC, | |
2280 | MVPP2_PRS_IPV4_MC_MASK); | |
2281 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, | |
2282 | MVPP2_PRS_RI_L3_ADDR_MASK); | |
2283 | break; | |
2284 | case MVPP2_PRS_L3_BROAD_CAST: | |
2285 | mask = MVPP2_PRS_IPV4_BC_MASK; | |
2286 | mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask); | |
2287 | mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask); | |
2288 | mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask); | |
2289 | mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask); | |
2290 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST, | |
2291 | MVPP2_PRS_RI_L3_ADDR_MASK); | |
2292 | break; | |
2293 | default: | |
2294 | return -EINVAL; | |
2295 | } | |
2296 | ||
2297 | /* Finished: go to flowid generation */ | |
2298 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); | |
2299 | mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); | |
2300 | ||
2301 | mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, | |
2302 | MVPP2_PRS_IPV4_DIP_AI_BIT); | |
2303 | /* Unmask all ports */ | |
2304 | mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); | |
2305 | ||
2306 | /* Update shadow table and hw entry */ | |
2307 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); | |
2308 | mvpp2_prs_hw_write(priv, &pe); | |
2309 | ||
2310 | return 0; | |
2311 | } | |
2312 | ||
2313 | /* Set entries for protocols over IPv6 */ | |
2314 | static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto, | |
2315 | unsigned int ri, unsigned int ri_mask) | |
2316 | { | |
2317 | struct mvpp2_prs_entry pe; | |
2318 | int tid; | |
2319 | ||
2320 | if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && | |
2321 | (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP)) | |
2322 | return -EINVAL; | |
2323 | ||
2324 | tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, | |
2325 | MVPP2_PE_LAST_FREE_TID); | |
2326 | if (tid < 0) | |
2327 | return tid; | |
2328 | ||
2329 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
2330 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); | |
2331 | pe.index = tid; | |
2332 | ||
2333 | /* Finished: go to flowid generation */ | |
2334 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); | |
2335 | mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); | |
2336 | mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); | |
2337 | mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, | |
2338 | sizeof(struct ipv6hdr) - 6, | |
2339 | MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); | |
2340 | ||
2341 | mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK); | |
2342 | mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, | |
2343 | MVPP2_PRS_IPV6_NO_EXT_AI_BIT); | |
2344 | /* Unmask all ports */ | |
2345 | mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); | |
2346 | ||
2347 | /* Write HW */ | |
2348 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); | |
2349 | mvpp2_prs_hw_write(priv, &pe); | |
2350 | ||
2351 | return 0; | |
2352 | } | |
2353 | ||
2354 | /* IPv6 L3 multicast entry */ | |
2355 | static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast) | |
2356 | { | |
2357 | struct mvpp2_prs_entry pe; | |
2358 | int tid; | |
2359 | ||
2360 | if (l3_cast != MVPP2_PRS_L3_MULTI_CAST) | |
2361 | return -EINVAL; | |
2362 | ||
2363 | tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, | |
2364 | MVPP2_PE_LAST_FREE_TID); | |
2365 | if (tid < 0) | |
2366 | return tid; | |
2367 | ||
2368 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
2369 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); | |
2370 | pe.index = tid; | |
2371 | ||
2372 | /* Finished: go to flowid generation */ | |
2373 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); | |
2374 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, | |
2375 | MVPP2_PRS_RI_L3_ADDR_MASK); | |
2376 | mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, | |
2377 | MVPP2_PRS_IPV6_NO_EXT_AI_BIT); | |
2378 | /* Shift back to IPv6 NH */ | |
2379 | mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); | |
2380 | ||
2381 | mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC, | |
2382 | MVPP2_PRS_IPV6_MC_MASK); | |
2383 | mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); | |
2384 | /* Unmask all ports */ | |
2385 | mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); | |
2386 | ||
2387 | /* Update shadow table and hw entry */ | |
2388 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); | |
2389 | mvpp2_prs_hw_write(priv, &pe); | |
2390 | ||
2391 | return 0; | |
2392 | } | |
2393 | ||
2394 | /* Parser per-port initialization */ | |
2395 | static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first, | |
2396 | int lu_max, int offset) | |
2397 | { | |
2398 | u32 val; | |
2399 | ||
2400 | /* Set lookup ID */ | |
2401 | val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG); | |
2402 | val &= ~MVPP2_PRS_PORT_LU_MASK(port); | |
2403 | val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first); | |
2404 | mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val); | |
2405 | ||
2406 | /* Set maximum number of loops for packet received from port */ | |
2407 | val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port)); | |
2408 | val &= ~MVPP2_PRS_MAX_LOOP_MASK(port); | |
2409 | val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max); | |
2410 | mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val); | |
2411 | ||
2412 | /* Set initial offset for packet header extraction for the first | |
2413 | * searching loop | |
2414 | */ | |
2415 | val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port)); | |
2416 | val &= ~MVPP2_PRS_INIT_OFF_MASK(port); | |
2417 | val |= MVPP2_PRS_INIT_OFF_VAL(port, offset); | |
2418 | mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val); | |
2419 | } | |
2420 | ||
2421 | /* Default flow entries initialization for all ports */ | |
2422 | static void mvpp2_prs_def_flow_init(struct mvpp2 *priv) | |
2423 | { | |
2424 | struct mvpp2_prs_entry pe; | |
2425 | int port; | |
2426 | ||
2427 | for (port = 0; port < MVPP2_MAX_PORTS; port++) { | |
2428 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
2429 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); | |
2430 | pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port; | |
2431 | ||
2432 | /* Mask all ports */ | |
2433 | mvpp2_prs_tcam_port_map_set(&pe, 0); | |
2434 | ||
2435 | /* Set flow ID*/ | |
2436 | mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK); | |
2437 | mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); | |
2438 | ||
2439 | /* Update shadow table and hw entry */ | |
2440 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); | |
2441 | mvpp2_prs_hw_write(priv, &pe); | |
2442 | } | |
2443 | } | |
2444 | ||
2445 | /* Set default entry for Marvell Header field */ | |
2446 | static void mvpp2_prs_mh_init(struct mvpp2 *priv) | |
2447 | { | |
2448 | struct mvpp2_prs_entry pe; | |
2449 | ||
2450 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
2451 | ||
2452 | pe.index = MVPP2_PE_MH_DEFAULT; | |
2453 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH); | |
2454 | mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE, | |
2455 | MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); | |
2456 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC); | |
2457 | ||
2458 | /* Unmask all ports */ | |
2459 | mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); | |
2460 | ||
2461 | /* Update shadow table and hw entry */ | |
2462 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH); | |
2463 | mvpp2_prs_hw_write(priv, &pe); | |
2464 | } | |
2465 | ||
2466 | /* Set default entires (place holder) for promiscuous, non-promiscuous and | |
2467 | * multicast MAC addresses | |
2468 | */ | |
2469 | static void mvpp2_prs_mac_init(struct mvpp2 *priv) | |
2470 | { | |
2471 | struct mvpp2_prs_entry pe; | |
2472 | ||
2473 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
2474 | ||
2475 | /* Non-promiscuous mode for all ports - DROP unknown packets */ | |
2476 | pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS; | |
2477 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); | |
2478 | ||
2479 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, | |
2480 | MVPP2_PRS_RI_DROP_MASK); | |
2481 | mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); | |
2482 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); | |
2483 | ||
2484 | /* Unmask all ports */ | |
2485 | mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); | |
2486 | ||
2487 | /* Update shadow table and hw entry */ | |
2488 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); | |
2489 | mvpp2_prs_hw_write(priv, &pe); | |
2490 | ||
2491 | /* place holders only - no ports */ | |
2492 | mvpp2_prs_mac_drop_all_set(priv, 0, false); | |
2493 | mvpp2_prs_mac_promisc_set(priv, 0, false); | |
2494 | mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false); | |
2495 | mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false); | |
2496 | } | |
2497 | ||
2498 | /* Set default entries for various types of dsa packets */ | |
2499 | static void mvpp2_prs_dsa_init(struct mvpp2 *priv) | |
2500 | { | |
2501 | struct mvpp2_prs_entry pe; | |
2502 | ||
2503 | /* None tagged EDSA entry - place holder */ | |
2504 | mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, | |
2505 | MVPP2_PRS_EDSA); | |
2506 | ||
2507 | /* Tagged EDSA entry - place holder */ | |
2508 | mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); | |
2509 | ||
2510 | /* None tagged DSA entry - place holder */ | |
2511 | mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, | |
2512 | MVPP2_PRS_DSA); | |
2513 | ||
2514 | /* Tagged DSA entry - place holder */ | |
2515 | mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); | |
2516 | ||
2517 | /* None tagged EDSA ethertype entry - place holder*/ | |
2518 | mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, | |
2519 | MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); | |
2520 | ||
2521 | /* Tagged EDSA ethertype entry - place holder*/ | |
2522 | mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, | |
2523 | MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); | |
2524 | ||
2525 | /* None tagged DSA ethertype entry */ | |
2526 | mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, | |
2527 | MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); | |
2528 | ||
2529 | /* Tagged DSA ethertype entry */ | |
2530 | mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, | |
2531 | MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); | |
2532 | ||
2533 | /* Set default entry, in case DSA or EDSA tag not found */ | |
2534 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
2535 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); | |
2536 | pe.index = MVPP2_PE_DSA_DEFAULT; | |
2537 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); | |
2538 | ||
2539 | /* Shift 0 bytes */ | |
2540 | mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); | |
2541 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); | |
2542 | ||
2543 | /* Clear all sram ai bits for next iteration */ | |
2544 | mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); | |
2545 | ||
2546 | /* Unmask all ports */ | |
2547 | mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); | |
2548 | ||
2549 | mvpp2_prs_hw_write(priv, &pe); | |
2550 | } | |
2551 | ||
2552 | /* Match basic ethertypes */ | |
2553 | static int mvpp2_prs_etype_init(struct mvpp2 *priv) | |
2554 | { | |
2555 | struct mvpp2_prs_entry pe; | |
2556 | int tid; | |
2557 | ||
2558 | /* Ethertype: PPPoE */ | |
2559 | tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, | |
2560 | MVPP2_PE_LAST_FREE_TID); | |
2561 | if (tid < 0) | |
2562 | return tid; | |
2563 | ||
2564 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
2565 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); | |
2566 | pe.index = tid; | |
2567 | ||
2568 | mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES); | |
2569 | ||
2570 | mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE, | |
2571 | MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); | |
2572 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE); | |
2573 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK, | |
2574 | MVPP2_PRS_RI_PPPOE_MASK); | |
2575 | ||
2576 | /* Update shadow table and hw entry */ | |
2577 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); | |
2578 | priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; | |
2579 | priv->prs_shadow[pe.index].finish = false; | |
2580 | mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK, | |
2581 | MVPP2_PRS_RI_PPPOE_MASK); | |
2582 | mvpp2_prs_hw_write(priv, &pe); | |
2583 | ||
2584 | /* Ethertype: ARP */ | |
2585 | tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, | |
2586 | MVPP2_PE_LAST_FREE_TID); | |
2587 | if (tid < 0) | |
2588 | return tid; | |
2589 | ||
2590 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
2591 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); | |
2592 | pe.index = tid; | |
2593 | ||
2594 | mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP); | |
2595 | ||
2596 | /* Generate flow in the next iteration*/ | |
2597 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); | |
2598 | mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); | |
2599 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP, | |
2600 | MVPP2_PRS_RI_L3_PROTO_MASK); | |
2601 | /* Set L3 offset */ | |
2602 | mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, | |
2603 | MVPP2_ETH_TYPE_LEN, | |
2604 | MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); | |
2605 | ||
2606 | /* Update shadow table and hw entry */ | |
2607 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); | |
2608 | priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; | |
2609 | priv->prs_shadow[pe.index].finish = true; | |
2610 | mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP, | |
2611 | MVPP2_PRS_RI_L3_PROTO_MASK); | |
2612 | mvpp2_prs_hw_write(priv, &pe); | |
2613 | ||
2614 | /* Ethertype: LBTD */ | |
2615 | tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, | |
2616 | MVPP2_PE_LAST_FREE_TID); | |
2617 | if (tid < 0) | |
2618 | return tid; | |
2619 | ||
2620 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
2621 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); | |
2622 | pe.index = tid; | |
2623 | ||
2624 | mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE); | |
2625 | ||
2626 | /* Generate flow in the next iteration*/ | |
2627 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); | |
2628 | mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); | |
2629 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | | |
2630 | MVPP2_PRS_RI_UDF3_RX_SPECIAL, | |
2631 | MVPP2_PRS_RI_CPU_CODE_MASK | | |
2632 | MVPP2_PRS_RI_UDF3_MASK); | |
2633 | /* Set L3 offset */ | |
2634 | mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, | |
2635 | MVPP2_ETH_TYPE_LEN, | |
2636 | MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); | |
2637 | ||
2638 | /* Update shadow table and hw entry */ | |
2639 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); | |
2640 | priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; | |
2641 | priv->prs_shadow[pe.index].finish = true; | |
2642 | mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | | |
2643 | MVPP2_PRS_RI_UDF3_RX_SPECIAL, | |
2644 | MVPP2_PRS_RI_CPU_CODE_MASK | | |
2645 | MVPP2_PRS_RI_UDF3_MASK); | |
2646 | mvpp2_prs_hw_write(priv, &pe); | |
2647 | ||
2648 | /* Ethertype: IPv4 without options */ | |
2649 | tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, | |
2650 | MVPP2_PE_LAST_FREE_TID); | |
2651 | if (tid < 0) | |
2652 | return tid; | |
2653 | ||
2654 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
2655 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); | |
2656 | pe.index = tid; | |
2657 | ||
2658 | mvpp2_prs_match_etype(&pe, 0, ETH_P_IP); | |
2659 | mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, | |
2660 | MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, | |
2661 | MVPP2_PRS_IPV4_HEAD_MASK | | |
2662 | MVPP2_PRS_IPV4_IHL_MASK); | |
2663 | ||
2664 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); | |
2665 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, | |
2666 | MVPP2_PRS_RI_L3_PROTO_MASK); | |
2667 | /* Skip eth_type + 4 bytes of IP header */ | |
2668 | mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, | |
2669 | MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); | |
2670 | /* Set L3 offset */ | |
2671 | mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, | |
2672 | MVPP2_ETH_TYPE_LEN, | |
2673 | MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); | |
2674 | ||
2675 | /* Update shadow table and hw entry */ | |
2676 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); | |
2677 | priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; | |
2678 | priv->prs_shadow[pe.index].finish = false; | |
2679 | mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4, | |
2680 | MVPP2_PRS_RI_L3_PROTO_MASK); | |
2681 | mvpp2_prs_hw_write(priv, &pe); | |
2682 | ||
2683 | /* Ethertype: IPv4 with options */ | |
2684 | tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, | |
2685 | MVPP2_PE_LAST_FREE_TID); | |
2686 | if (tid < 0) | |
2687 | return tid; | |
2688 | ||
2689 | pe.index = tid; | |
2690 | ||
2691 | /* Clear tcam data before updating */ | |
2692 | pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0; | |
2693 | pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0; | |
2694 | ||
2695 | mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, | |
2696 | MVPP2_PRS_IPV4_HEAD, | |
2697 | MVPP2_PRS_IPV4_HEAD_MASK); | |
2698 | ||
2699 | /* Clear ri before updating */ | |
2700 | pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; | |
2701 | pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; | |
2702 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, | |
2703 | MVPP2_PRS_RI_L3_PROTO_MASK); | |
2704 | ||
2705 | /* Update shadow table and hw entry */ | |
2706 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); | |
2707 | priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; | |
2708 | priv->prs_shadow[pe.index].finish = false; | |
2709 | mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT, | |
2710 | MVPP2_PRS_RI_L3_PROTO_MASK); | |
2711 | mvpp2_prs_hw_write(priv, &pe); | |
2712 | ||
2713 | /* Ethertype: IPv6 without options */ | |
2714 | tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, | |
2715 | MVPP2_PE_LAST_FREE_TID); | |
2716 | if (tid < 0) | |
2717 | return tid; | |
2718 | ||
2719 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
2720 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); | |
2721 | pe.index = tid; | |
2722 | ||
2723 | mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6); | |
2724 | ||
2725 | /* Skip DIP of IPV6 header */ | |
2726 | mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 + | |
2727 | MVPP2_MAX_L3_ADDR_SIZE, | |
2728 | MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); | |
2729 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); | |
2730 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, | |
2731 | MVPP2_PRS_RI_L3_PROTO_MASK); | |
2732 | /* Set L3 offset */ | |
2733 | mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, | |
2734 | MVPP2_ETH_TYPE_LEN, | |
2735 | MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); | |
2736 | ||
2737 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); | |
2738 | priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; | |
2739 | priv->prs_shadow[pe.index].finish = false; | |
2740 | mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6, | |
2741 | MVPP2_PRS_RI_L3_PROTO_MASK); | |
2742 | mvpp2_prs_hw_write(priv, &pe); | |
2743 | ||
2744 | /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */ | |
2745 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
2746 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); | |
2747 | pe.index = MVPP2_PE_ETH_TYPE_UN; | |
2748 | ||
2749 | /* Unmask all ports */ | |
2750 | mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); | |
2751 | ||
2752 | /* Generate flow in the next iteration*/ | |
2753 | mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); | |
2754 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); | |
2755 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, | |
2756 | MVPP2_PRS_RI_L3_PROTO_MASK); | |
2757 | /* Set L3 offset even it's unknown L3 */ | |
2758 | mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, | |
2759 | MVPP2_ETH_TYPE_LEN, | |
2760 | MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); | |
2761 | ||
2762 | /* Update shadow table and hw entry */ | |
2763 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); | |
2764 | priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; | |
2765 | priv->prs_shadow[pe.index].finish = true; | |
2766 | mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN, | |
2767 | MVPP2_PRS_RI_L3_PROTO_MASK); | |
2768 | mvpp2_prs_hw_write(priv, &pe); | |
2769 | ||
2770 | return 0; | |
2771 | } | |
2772 | ||
2773 | /* Configure vlan entries and detect up to 2 successive VLAN tags. | |
2774 | * Possible options: | |
2775 | * 0x8100, 0x88A8 | |
2776 | * 0x8100, 0x8100 | |
2777 | * 0x8100 | |
2778 | * 0x88A8 | |
2779 | */ | |
2780 | static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv) | |
2781 | { | |
2782 | struct mvpp2_prs_entry pe; | |
2783 | int err; | |
2784 | ||
2785 | priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), | |
2786 | MVPP2_PRS_DBL_VLANS_MAX, | |
2787 | GFP_KERNEL); | |
2788 | if (!priv->prs_double_vlans) | |
2789 | return -ENOMEM; | |
2790 | ||
2791 | /* Double VLAN: 0x8100, 0x88A8 */ | |
2792 | err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD, | |
2793 | MVPP2_PRS_PORT_MASK); | |
2794 | if (err) | |
2795 | return err; | |
2796 | ||
2797 | /* Double VLAN: 0x8100, 0x8100 */ | |
2798 | err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q, | |
2799 | MVPP2_PRS_PORT_MASK); | |
2800 | if (err) | |
2801 | return err; | |
2802 | ||
2803 | /* Single VLAN: 0x88a8 */ | |
2804 | err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI, | |
2805 | MVPP2_PRS_PORT_MASK); | |
2806 | if (err) | |
2807 | return err; | |
2808 | ||
2809 | /* Single VLAN: 0x8100 */ | |
2810 | err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI, | |
2811 | MVPP2_PRS_PORT_MASK); | |
2812 | if (err) | |
2813 | return err; | |
2814 | ||
2815 | /* Set default double vlan entry */ | |
2816 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
2817 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); | |
2818 | pe.index = MVPP2_PE_VLAN_DBL; | |
2819 | ||
2820 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); | |
2821 | /* Clear ai for next iterations */ | |
2822 | mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); | |
2823 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, | |
2824 | MVPP2_PRS_RI_VLAN_MASK); | |
2825 | ||
2826 | mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT, | |
2827 | MVPP2_PRS_DBL_VLAN_AI_BIT); | |
2828 | /* Unmask all ports */ | |
2829 | mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); | |
2830 | ||
2831 | /* Update shadow table and hw entry */ | |
2832 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); | |
2833 | mvpp2_prs_hw_write(priv, &pe); | |
2834 | ||
2835 | /* Set default vlan none entry */ | |
2836 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
2837 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); | |
2838 | pe.index = MVPP2_PE_VLAN_NONE; | |
2839 | ||
2840 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); | |
2841 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, | |
2842 | MVPP2_PRS_RI_VLAN_MASK); | |
2843 | ||
2844 | /* Unmask all ports */ | |
2845 | mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); | |
2846 | ||
2847 | /* Update shadow table and hw entry */ | |
2848 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); | |
2849 | mvpp2_prs_hw_write(priv, &pe); | |
2850 | ||
2851 | return 0; | |
2852 | } | |
2853 | ||
2854 | /* Set entries for PPPoE ethertype */ | |
2855 | static int mvpp2_prs_pppoe_init(struct mvpp2 *priv) | |
2856 | { | |
2857 | struct mvpp2_prs_entry pe; | |
2858 | int tid; | |
2859 | ||
2860 | /* IPv4 over PPPoE with options */ | |
2861 | tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, | |
2862 | MVPP2_PE_LAST_FREE_TID); | |
2863 | if (tid < 0) | |
2864 | return tid; | |
2865 | ||
2866 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
2867 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); | |
2868 | pe.index = tid; | |
2869 | ||
2870 | mvpp2_prs_match_etype(&pe, 0, PPP_IP); | |
2871 | ||
2872 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); | |
2873 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, | |
2874 | MVPP2_PRS_RI_L3_PROTO_MASK); | |
2875 | /* Skip eth_type + 4 bytes of IP header */ | |
2876 | mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, | |
2877 | MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); | |
2878 | /* Set L3 offset */ | |
2879 | mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, | |
2880 | MVPP2_ETH_TYPE_LEN, | |
2881 | MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); | |
2882 | ||
2883 | /* Update shadow table and hw entry */ | |
2884 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); | |
2885 | mvpp2_prs_hw_write(priv, &pe); | |
2886 | ||
2887 | /* IPv4 over PPPoE without options */ | |
2888 | tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, | |
2889 | MVPP2_PE_LAST_FREE_TID); | |
2890 | if (tid < 0) | |
2891 | return tid; | |
2892 | ||
2893 | pe.index = tid; | |
2894 | ||
2895 | mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, | |
2896 | MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, | |
2897 | MVPP2_PRS_IPV4_HEAD_MASK | | |
2898 | MVPP2_PRS_IPV4_IHL_MASK); | |
2899 | ||
2900 | /* Clear ri before updating */ | |
2901 | pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; | |
2902 | pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; | |
2903 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, | |
2904 | MVPP2_PRS_RI_L3_PROTO_MASK); | |
2905 | ||
2906 | /* Update shadow table and hw entry */ | |
2907 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); | |
2908 | mvpp2_prs_hw_write(priv, &pe); | |
2909 | ||
2910 | /* IPv6 over PPPoE */ | |
2911 | tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, | |
2912 | MVPP2_PE_LAST_FREE_TID); | |
2913 | if (tid < 0) | |
2914 | return tid; | |
2915 | ||
2916 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
2917 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); | |
2918 | pe.index = tid; | |
2919 | ||
2920 | mvpp2_prs_match_etype(&pe, 0, PPP_IPV6); | |
2921 | ||
2922 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); | |
2923 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, | |
2924 | MVPP2_PRS_RI_L3_PROTO_MASK); | |
2925 | /* Skip eth_type + 4 bytes of IPv6 header */ | |
2926 | mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, | |
2927 | MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); | |
2928 | /* Set L3 offset */ | |
2929 | mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, | |
2930 | MVPP2_ETH_TYPE_LEN, | |
2931 | MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); | |
2932 | ||
2933 | /* Update shadow table and hw entry */ | |
2934 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); | |
2935 | mvpp2_prs_hw_write(priv, &pe); | |
2936 | ||
2937 | /* Non-IP over PPPoE */ | |
2938 | tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, | |
2939 | MVPP2_PE_LAST_FREE_TID); | |
2940 | if (tid < 0) | |
2941 | return tid; | |
2942 | ||
2943 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
2944 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); | |
2945 | pe.index = tid; | |
2946 | ||
2947 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, | |
2948 | MVPP2_PRS_RI_L3_PROTO_MASK); | |
2949 | ||
2950 | /* Finished: go to flowid generation */ | |
2951 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); | |
2952 | mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); | |
2953 | /* Set L3 offset even if it's unknown L3 */ | |
2954 | mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, | |
2955 | MVPP2_ETH_TYPE_LEN, | |
2956 | MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); | |
2957 | ||
2958 | /* Update shadow table and hw entry */ | |
2959 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); | |
2960 | mvpp2_prs_hw_write(priv, &pe); | |
2961 | ||
2962 | return 0; | |
2963 | } | |
2964 | ||
2965 | /* Initialize entries for IPv4 */ | |
2966 | static int mvpp2_prs_ip4_init(struct mvpp2 *priv) | |
2967 | { | |
2968 | struct mvpp2_prs_entry pe; | |
2969 | int err; | |
2970 | ||
2971 | /* Set entries for TCP, UDP and IGMP over IPv4 */ | |
2972 | err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP, | |
2973 | MVPP2_PRS_RI_L4_PROTO_MASK); | |
2974 | if (err) | |
2975 | return err; | |
2976 | ||
2977 | err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP, | |
2978 | MVPP2_PRS_RI_L4_PROTO_MASK); | |
2979 | if (err) | |
2980 | return err; | |
2981 | ||
2982 | err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP, | |
2983 | MVPP2_PRS_RI_CPU_CODE_RX_SPEC | | |
2984 | MVPP2_PRS_RI_UDF3_RX_SPECIAL, | |
2985 | MVPP2_PRS_RI_CPU_CODE_MASK | | |
2986 | MVPP2_PRS_RI_UDF3_MASK); | |
2987 | if (err) | |
2988 | return err; | |
2989 | ||
2990 | /* IPv4 Broadcast */ | |
2991 | err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST); | |
2992 | if (err) | |
2993 | return err; | |
2994 | ||
2995 | /* IPv4 Multicast */ | |
2996 | err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST); | |
2997 | if (err) | |
2998 | return err; | |
2999 | ||
3000 | /* Default IPv4 entry for unknown protocols */ | |
3001 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
3002 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); | |
3003 | pe.index = MVPP2_PE_IP4_PROTO_UN; | |
3004 | ||
3005 | /* Set next lu to IPv4 */ | |
3006 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); | |
3007 | mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); | |
3008 | /* Set L4 offset */ | |
3009 | mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, | |
3010 | sizeof(struct iphdr) - 4, | |
3011 | MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); | |
3012 | mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, | |
3013 | MVPP2_PRS_IPV4_DIP_AI_BIT); | |
3014 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, | |
3015 | MVPP2_PRS_RI_L4_PROTO_MASK); | |
3016 | ||
3017 | mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); | |
3018 | /* Unmask all ports */ | |
3019 | mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); | |
3020 | ||
3021 | /* Update shadow table and hw entry */ | |
3022 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); | |
3023 | mvpp2_prs_hw_write(priv, &pe); | |
3024 | ||
3025 | /* Default IPv4 entry for unicast address */ | |
3026 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
3027 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); | |
3028 | pe.index = MVPP2_PE_IP4_ADDR_UN; | |
3029 | ||
3030 | /* Finished: go to flowid generation */ | |
3031 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); | |
3032 | mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); | |
3033 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, | |
3034 | MVPP2_PRS_RI_L3_ADDR_MASK); | |
3035 | ||
3036 | mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, | |
3037 | MVPP2_PRS_IPV4_DIP_AI_BIT); | |
3038 | /* Unmask all ports */ | |
3039 | mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); | |
3040 | ||
3041 | /* Update shadow table and hw entry */ | |
3042 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); | |
3043 | mvpp2_prs_hw_write(priv, &pe); | |
3044 | ||
3045 | return 0; | |
3046 | } | |
3047 | ||
3048 | /* Initialize entries for IPv6 */ | |
3049 | static int mvpp2_prs_ip6_init(struct mvpp2 *priv) | |
3050 | { | |
3051 | struct mvpp2_prs_entry pe; | |
3052 | int tid, err; | |
3053 | ||
3054 | /* Set entries for TCP, UDP and ICMP over IPv6 */ | |
3055 | err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP, | |
3056 | MVPP2_PRS_RI_L4_TCP, | |
3057 | MVPP2_PRS_RI_L4_PROTO_MASK); | |
3058 | if (err) | |
3059 | return err; | |
3060 | ||
3061 | err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP, | |
3062 | MVPP2_PRS_RI_L4_UDP, | |
3063 | MVPP2_PRS_RI_L4_PROTO_MASK); | |
3064 | if (err) | |
3065 | return err; | |
3066 | ||
3067 | err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6, | |
3068 | MVPP2_PRS_RI_CPU_CODE_RX_SPEC | | |
3069 | MVPP2_PRS_RI_UDF3_RX_SPECIAL, | |
3070 | MVPP2_PRS_RI_CPU_CODE_MASK | | |
3071 | MVPP2_PRS_RI_UDF3_MASK); | |
3072 | if (err) | |
3073 | return err; | |
3074 | ||
3075 | /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */ | |
3076 | /* Result Info: UDF7=1, DS lite */ | |
3077 | err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP, | |
3078 | MVPP2_PRS_RI_UDF7_IP6_LITE, | |
3079 | MVPP2_PRS_RI_UDF7_MASK); | |
3080 | if (err) | |
3081 | return err; | |
3082 | ||
3083 | /* IPv6 multicast */ | |
3084 | err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST); | |
3085 | if (err) | |
3086 | return err; | |
3087 | ||
3088 | /* Entry for checking hop limit */ | |
3089 | tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, | |
3090 | MVPP2_PE_LAST_FREE_TID); | |
3091 | if (tid < 0) | |
3092 | return tid; | |
3093 | ||
3094 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
3095 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); | |
3096 | pe.index = tid; | |
3097 | ||
3098 | /* Finished: go to flowid generation */ | |
3099 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); | |
3100 | mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); | |
3101 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN | | |
3102 | MVPP2_PRS_RI_DROP_MASK, | |
3103 | MVPP2_PRS_RI_L3_PROTO_MASK | | |
3104 | MVPP2_PRS_RI_DROP_MASK); | |
3105 | ||
3106 | mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK); | |
3107 | mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, | |
3108 | MVPP2_PRS_IPV6_NO_EXT_AI_BIT); | |
3109 | ||
3110 | /* Update shadow table and hw entry */ | |
3111 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); | |
3112 | mvpp2_prs_hw_write(priv, &pe); | |
3113 | ||
3114 | /* Default IPv6 entry for unknown protocols */ | |
3115 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
3116 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); | |
3117 | pe.index = MVPP2_PE_IP6_PROTO_UN; | |
3118 | ||
3119 | /* Finished: go to flowid generation */ | |
3120 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); | |
3121 | mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); | |
3122 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, | |
3123 | MVPP2_PRS_RI_L4_PROTO_MASK); | |
3124 | /* Set L4 offset relatively to our current place */ | |
3125 | mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, | |
3126 | sizeof(struct ipv6hdr) - 4, | |
3127 | MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); | |
3128 | ||
3129 | mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, | |
3130 | MVPP2_PRS_IPV6_NO_EXT_AI_BIT); | |
3131 | /* Unmask all ports */ | |
3132 | mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); | |
3133 | ||
3134 | /* Update shadow table and hw entry */ | |
3135 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); | |
3136 | mvpp2_prs_hw_write(priv, &pe); | |
3137 | ||
3138 | /* Default IPv6 entry for unknown ext protocols */ | |
3139 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
3140 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); | |
3141 | pe.index = MVPP2_PE_IP6_EXT_PROTO_UN; | |
3142 | ||
3143 | /* Finished: go to flowid generation */ | |
3144 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); | |
3145 | mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); | |
3146 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, | |
3147 | MVPP2_PRS_RI_L4_PROTO_MASK); | |
3148 | ||
3149 | mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT, | |
3150 | MVPP2_PRS_IPV6_EXT_AI_BIT); | |
3151 | /* Unmask all ports */ | |
3152 | mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); | |
3153 | ||
3154 | /* Update shadow table and hw entry */ | |
3155 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); | |
3156 | mvpp2_prs_hw_write(priv, &pe); | |
3157 | ||
3158 | /* Default IPv6 entry for unicast address */ | |
3159 | memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); | |
3160 | mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); | |
3161 | pe.index = MVPP2_PE_IP6_ADDR_UN; | |
3162 | ||
3163 | /* Finished: go to IPv6 again */ | |
3164 | mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); | |
3165 | mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, | |
3166 | MVPP2_PRS_RI_L3_ADDR_MASK); | |
3167 | mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, | |
3168 | MVPP2_PRS_IPV6_NO_EXT_AI_BIT); | |
3169 | /* Shift back to IPV6 NH */ | |
3170 | mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); | |
3171 | ||
3172 | mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); | |
3173 | /* Unmask all ports */ | |
3174 | mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); | |
3175 | ||
3176 | /* Update shadow table and hw entry */ | |
3177 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); | |
3178 | mvpp2_prs_hw_write(priv, &pe); | |
3179 | ||
3180 | return 0; | |
3181 | } | |
3182 | ||
3183 | /* Parser default initialization */ | |
3184 | static int mvpp2_prs_default_init(struct platform_device *pdev, | |
3185 | struct mvpp2 *priv) | |
3186 | { | |
3187 | int err, index, i; | |
3188 | ||
3189 | /* Enable tcam table */ | |
3190 | mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); | |
3191 | ||
3192 | /* Clear all tcam and sram entries */ | |
3193 | for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) { | |
3194 | mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); | |
3195 | for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) | |
3196 | mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0); | |
3197 | ||
3198 | mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index); | |
3199 | for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) | |
3200 | mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0); | |
3201 | } | |
3202 | ||
3203 | /* Invalidate all tcam entries */ | |
3204 | for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) | |
3205 | mvpp2_prs_hw_inv(priv, index); | |
3206 | ||
3207 | priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, | |
3208 | sizeof(struct mvpp2_prs_shadow), | |
3209 | GFP_KERNEL); | |
3210 | if (!priv->prs_shadow) | |
3211 | return -ENOMEM; | |
3212 | ||
3213 | /* Always start from lookup = 0 */ | |
3214 | for (index = 0; index < MVPP2_MAX_PORTS; index++) | |
3215 | mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, | |
3216 | MVPP2_PRS_PORT_LU_MAX, 0); | |
3217 | ||
3218 | mvpp2_prs_def_flow_init(priv); | |
3219 | ||
3220 | mvpp2_prs_mh_init(priv); | |
3221 | ||
3222 | mvpp2_prs_mac_init(priv); | |
3223 | ||
3224 | mvpp2_prs_dsa_init(priv); | |
3225 | ||
3226 | err = mvpp2_prs_etype_init(priv); | |
3227 | if (err) | |
3228 | return err; | |
3229 | ||
3230 | err = mvpp2_prs_vlan_init(pdev, priv); | |
3231 | if (err) | |
3232 | return err; | |
3233 | ||
3234 | err = mvpp2_prs_pppoe_init(priv); | |
3235 | if (err) | |
3236 | return err; | |
3237 | ||
3238 | err = mvpp2_prs_ip6_init(priv); | |
3239 | if (err) | |
3240 | return err; | |
3241 | ||
3242 | err = mvpp2_prs_ip4_init(priv); | |
3243 | if (err) | |
3244 | return err; | |
3245 | ||
3246 | return 0; | |
3247 | } | |
3248 | ||
3249 | /* Compare MAC DA with tcam entry data */ | |
3250 | static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, | |
3251 | const u8 *da, unsigned char *mask) | |
3252 | { | |
3253 | unsigned char tcam_byte, tcam_mask; | |
3254 | int index; | |
3255 | ||
3256 | for (index = 0; index < ETH_ALEN; index++) { | |
3257 | mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask); | |
3258 | if (tcam_mask != mask[index]) | |
3259 | return false; | |
3260 | ||
3261 | if ((tcam_mask & tcam_byte) != (da[index] & mask[index])) | |
3262 | return false; | |
3263 | } | |
3264 | ||
3265 | return true; | |
3266 | } | |
3267 | ||
3268 | /* Find tcam entry with matched pair <MAC DA, port> */ | |
3269 | static struct mvpp2_prs_entry * | |
3270 | mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, | |
3271 | unsigned char *mask, int udf_type) | |
3272 | { | |
3273 | struct mvpp2_prs_entry *pe; | |
3274 | int tid; | |
3275 | ||
3276 | pe = kzalloc(sizeof(*pe), GFP_KERNEL); | |
3277 | if (!pe) | |
3278 | return NULL; | |
3279 | mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); | |
3280 | ||
3281 | /* Go through the all entires with MVPP2_PRS_LU_MAC */ | |
3282 | for (tid = MVPP2_PE_FIRST_FREE_TID; | |
3283 | tid <= MVPP2_PE_LAST_FREE_TID; tid++) { | |
3284 | unsigned int entry_pmap; | |
3285 | ||
3286 | if (!priv->prs_shadow[tid].valid || | |
3287 | (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || | |
3288 | (priv->prs_shadow[tid].udf != udf_type)) | |
3289 | continue; | |
3290 | ||
3291 | pe->index = tid; | |
3292 | mvpp2_prs_hw_read(priv, pe); | |
3293 | entry_pmap = mvpp2_prs_tcam_port_map_get(pe); | |
3294 | ||
3295 | if (mvpp2_prs_mac_range_equals(pe, da, mask) && | |
3296 | entry_pmap == pmap) | |
3297 | return pe; | |
3298 | } | |
3299 | kfree(pe); | |
3300 | ||
3301 | return NULL; | |
3302 | } | |
3303 | ||
3304 | /* Update parser's mac da entry */ | |
3305 | static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port, | |
3306 | const u8 *da, bool add) | |
3307 | { | |
3308 | struct mvpp2_prs_entry *pe; | |
3309 | unsigned int pmap, len, ri; | |
3310 | unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; | |
3311 | int tid; | |
3312 | ||
3313 | /* Scan TCAM and see if entry with this <MAC DA, port> already exist */ | |
3314 | pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask, | |
3315 | MVPP2_PRS_UDF_MAC_DEF); | |
3316 | ||
3317 | /* No such entry */ | |
3318 | if (!pe) { | |
3319 | if (!add) | |
3320 | return 0; | |
3321 | ||
3322 | /* Create new TCAM entry */ | |
3323 | /* Find first range mac entry*/ | |
3324 | for (tid = MVPP2_PE_FIRST_FREE_TID; | |
3325 | tid <= MVPP2_PE_LAST_FREE_TID; tid++) | |
3326 | if (priv->prs_shadow[tid].valid && | |
3327 | (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) && | |
3328 | (priv->prs_shadow[tid].udf == | |
3329 | MVPP2_PRS_UDF_MAC_RANGE)) | |
3330 | break; | |
3331 | ||
3332 | /* Go through the all entries from first to last */ | |
3333 | tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, | |
3334 | tid - 1); | |
3335 | if (tid < 0) | |
3336 | return tid; | |
3337 | ||
3338 | pe = kzalloc(sizeof(*pe), GFP_KERNEL); | |
3339 | if (!pe) | |
c2bb7bc5 | 3340 | return -ENOMEM; |
3f518509 MW |
3341 | mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); |
3342 | pe->index = tid; | |
3343 | ||
3344 | /* Mask all ports */ | |
3345 | mvpp2_prs_tcam_port_map_set(pe, 0); | |
3346 | } | |
3347 | ||
3348 | /* Update port mask */ | |
3349 | mvpp2_prs_tcam_port_set(pe, port, add); | |
3350 | ||
3351 | /* Invalidate the entry if no ports are left enabled */ | |
3352 | pmap = mvpp2_prs_tcam_port_map_get(pe); | |
3353 | if (pmap == 0) { | |
3354 | if (add) { | |
3355 | kfree(pe); | |
c2bb7bc5 | 3356 | return -EINVAL; |
3f518509 MW |
3357 | } |
3358 | mvpp2_prs_hw_inv(priv, pe->index); | |
3359 | priv->prs_shadow[pe->index].valid = false; | |
3360 | kfree(pe); | |
3361 | return 0; | |
3362 | } | |
3363 | ||
3364 | /* Continue - set next lookup */ | |
3365 | mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA); | |
3366 | ||
3367 | /* Set match on DA */ | |
3368 | len = ETH_ALEN; | |
3369 | while (len--) | |
3370 | mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff); | |
3371 | ||
3372 | /* Set result info bits */ | |
3373 | if (is_broadcast_ether_addr(da)) | |
3374 | ri = MVPP2_PRS_RI_L2_BCAST; | |
3375 | else if (is_multicast_ether_addr(da)) | |
3376 | ri = MVPP2_PRS_RI_L2_MCAST; | |
3377 | else | |
3378 | ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK; | |
3379 | ||
3380 | mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK | | |
3381 | MVPP2_PRS_RI_MAC_ME_MASK); | |
3382 | mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK | | |
3383 | MVPP2_PRS_RI_MAC_ME_MASK); | |
3384 | ||
3385 | /* Shift to ethertype */ | |
3386 | mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN, | |
3387 | MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); | |
3388 | ||
3389 | /* Update shadow table and hw entry */ | |
3390 | priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF; | |
3391 | mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC); | |
3392 | mvpp2_prs_hw_write(priv, pe); | |
3393 | ||
3394 | kfree(pe); | |
3395 | ||
3396 | return 0; | |
3397 | } | |
3398 | ||
3399 | static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da) | |
3400 | { | |
3401 | struct mvpp2_port *port = netdev_priv(dev); | |
3402 | int err; | |
3403 | ||
3404 | /* Remove old parser entry */ | |
3405 | err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr, | |
3406 | false); | |
3407 | if (err) | |
3408 | return err; | |
3409 | ||
3410 | /* Add new parser entry */ | |
3411 | err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true); | |
3412 | if (err) | |
3413 | return err; | |
3414 | ||
3415 | /* Set addr in the device */ | |
3416 | ether_addr_copy(dev->dev_addr, da); | |
3417 | ||
3418 | return 0; | |
3419 | } | |
3420 | ||
3421 | /* Delete all port's multicast simple (not range) entries */ | |
3422 | static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port) | |
3423 | { | |
3424 | struct mvpp2_prs_entry pe; | |
3425 | int index, tid; | |
3426 | ||
3427 | for (tid = MVPP2_PE_FIRST_FREE_TID; | |
3428 | tid <= MVPP2_PE_LAST_FREE_TID; tid++) { | |
3429 | unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; | |
3430 | ||
3431 | if (!priv->prs_shadow[tid].valid || | |
3432 | (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || | |
3433 | (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)) | |
3434 | continue; | |
3435 | ||
3436 | /* Only simple mac entries */ | |
3437 | pe.index = tid; | |
3438 | mvpp2_prs_hw_read(priv, &pe); | |
3439 | ||
3440 | /* Read mac addr from entry */ | |
3441 | for (index = 0; index < ETH_ALEN; index++) | |
3442 | mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index], | |
3443 | &da_mask[index]); | |
3444 | ||
3445 | if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da)) | |
3446 | /* Delete this entry */ | |
3447 | mvpp2_prs_mac_da_accept(priv, port, da, false); | |
3448 | } | |
3449 | } | |
3450 | ||
3451 | static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) | |
3452 | { | |
3453 | switch (type) { | |
3454 | case MVPP2_TAG_TYPE_EDSA: | |
3455 | /* Add port to EDSA entries */ | |
3456 | mvpp2_prs_dsa_tag_set(priv, port, true, | |
3457 | MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); | |
3458 | mvpp2_prs_dsa_tag_set(priv, port, true, | |
3459 | MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); | |
3460 | /* Remove port from DSA entries */ | |
3461 | mvpp2_prs_dsa_tag_set(priv, port, false, | |
3462 | MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); | |
3463 | mvpp2_prs_dsa_tag_set(priv, port, false, | |
3464 | MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); | |
3465 | break; | |
3466 | ||
3467 | case MVPP2_TAG_TYPE_DSA: | |
3468 | /* Add port to DSA entries */ | |
3469 | mvpp2_prs_dsa_tag_set(priv, port, true, | |
3470 | MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); | |
3471 | mvpp2_prs_dsa_tag_set(priv, port, true, | |
3472 | MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); | |
3473 | /* Remove port from EDSA entries */ | |
3474 | mvpp2_prs_dsa_tag_set(priv, port, false, | |
3475 | MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); | |
3476 | mvpp2_prs_dsa_tag_set(priv, port, false, | |
3477 | MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); | |
3478 | break; | |
3479 | ||
3480 | case MVPP2_TAG_TYPE_MH: | |
3481 | case MVPP2_TAG_TYPE_NONE: | |
3482 | /* Remove port form EDSA and DSA entries */ | |
3483 | mvpp2_prs_dsa_tag_set(priv, port, false, | |
3484 | MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); | |
3485 | mvpp2_prs_dsa_tag_set(priv, port, false, | |
3486 | MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); | |
3487 | mvpp2_prs_dsa_tag_set(priv, port, false, | |
3488 | MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); | |
3489 | mvpp2_prs_dsa_tag_set(priv, port, false, | |
3490 | MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); | |
3491 | break; | |
3492 | ||
3493 | default: | |
3494 | if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA)) | |
3495 | return -EINVAL; | |
3496 | } | |
3497 | ||
3498 | return 0; | |
3499 | } | |
3500 | ||
3501 | /* Set prs flow for the port */ | |
3502 | static int mvpp2_prs_def_flow(struct mvpp2_port *port) | |
3503 | { | |
3504 | struct mvpp2_prs_entry *pe; | |
3505 | int tid; | |
3506 | ||
3507 | pe = mvpp2_prs_flow_find(port->priv, port->id); | |
3508 | ||
3509 | /* Such entry not exist */ | |
3510 | if (!pe) { | |
3511 | /* Go through the all entires from last to first */ | |
3512 | tid = mvpp2_prs_tcam_first_free(port->priv, | |
3513 | MVPP2_PE_LAST_FREE_TID, | |
3514 | MVPP2_PE_FIRST_FREE_TID); | |
3515 | if (tid < 0) | |
3516 | return tid; | |
3517 | ||
3518 | pe = kzalloc(sizeof(*pe), GFP_KERNEL); | |
3519 | if (!pe) | |
3520 | return -ENOMEM; | |
3521 | ||
3522 | mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); | |
3523 | pe->index = tid; | |
3524 | ||
3525 | /* Set flow ID*/ | |
3526 | mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK); | |
3527 | mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); | |
3528 | ||
3529 | /* Update shadow table */ | |
3530 | mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS); | |
3531 | } | |
3532 | ||
3533 | mvpp2_prs_tcam_port_map_set(pe, (1 << port->id)); | |
3534 | mvpp2_prs_hw_write(port->priv, pe); | |
3535 | kfree(pe); | |
3536 | ||
3537 | return 0; | |
3538 | } | |
3539 | ||
3540 | /* Classifier configuration routines */ | |
3541 | ||
3542 | /* Update classification flow table registers */ | |
3543 | static void mvpp2_cls_flow_write(struct mvpp2 *priv, | |
3544 | struct mvpp2_cls_flow_entry *fe) | |
3545 | { | |
3546 | mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index); | |
3547 | mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]); | |
3548 | mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]); | |
3549 | mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); | |
3550 | } | |
3551 | ||
3552 | /* Update classification lookup table register */ | |
3553 | static void mvpp2_cls_lookup_write(struct mvpp2 *priv, | |
3554 | struct mvpp2_cls_lookup_entry *le) | |
3555 | { | |
3556 | u32 val; | |
3557 | ||
3558 | val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid; | |
3559 | mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val); | |
3560 | mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data); | |
3561 | } | |
3562 | ||
3563 | /* Classifier default initialization */ | |
3564 | static void mvpp2_cls_init(struct mvpp2 *priv) | |
3565 | { | |
3566 | struct mvpp2_cls_lookup_entry le; | |
3567 | struct mvpp2_cls_flow_entry fe; | |
3568 | int index; | |
3569 | ||
3570 | /* Enable classifier */ | |
3571 | mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); | |
3572 | ||
3573 | /* Clear classifier flow table */ | |
e8f967c3 | 3574 | memset(&fe.data, 0, sizeof(fe.data)); |
3f518509 MW |
3575 | for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { |
3576 | fe.index = index; | |
3577 | mvpp2_cls_flow_write(priv, &fe); | |
3578 | } | |
3579 | ||
3580 | /* Clear classifier lookup table */ | |
3581 | le.data = 0; | |
3582 | for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) { | |
3583 | le.lkpid = index; | |
3584 | le.way = 0; | |
3585 | mvpp2_cls_lookup_write(priv, &le); | |
3586 | ||
3587 | le.way = 1; | |
3588 | mvpp2_cls_lookup_write(priv, &le); | |
3589 | } | |
3590 | } | |
3591 | ||
3592 | static void mvpp2_cls_port_config(struct mvpp2_port *port) | |
3593 | { | |
3594 | struct mvpp2_cls_lookup_entry le; | |
3595 | u32 val; | |
3596 | ||
3597 | /* Set way for the port */ | |
3598 | val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG); | |
3599 | val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id); | |
3600 | mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val); | |
3601 | ||
3602 | /* Pick the entry to be accessed in lookup ID decoding table | |
3603 | * according to the way and lkpid. | |
3604 | */ | |
3605 | le.lkpid = port->id; | |
3606 | le.way = 0; | |
3607 | le.data = 0; | |
3608 | ||
3609 | /* Set initial CPU queue for receiving packets */ | |
3610 | le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK; | |
3611 | le.data |= port->first_rxq; | |
3612 | ||
3613 | /* Disable classification engines */ | |
3614 | le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; | |
3615 | ||
3616 | /* Update lookup ID table entry */ | |
3617 | mvpp2_cls_lookup_write(port->priv, &le); | |
3618 | } | |
3619 | ||
3620 | /* Set CPU queue number for oversize packets */ | |
3621 | static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) | |
3622 | { | |
3623 | u32 val; | |
3624 | ||
3625 | mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id), | |
3626 | port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK); | |
3627 | ||
3628 | mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id), | |
3629 | (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS)); | |
3630 | ||
3631 | val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); | |
3632 | val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); | |
3633 | mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); | |
3634 | } | |
3635 | ||
0e037281 TP |
3636 | static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool) |
3637 | { | |
3638 | if (likely(pool->frag_size <= PAGE_SIZE)) | |
3639 | return netdev_alloc_frag(pool->frag_size); | |
3640 | else | |
3641 | return kmalloc(pool->frag_size, GFP_ATOMIC); | |
3642 | } | |
3643 | ||
3644 | static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data) | |
3645 | { | |
3646 | if (likely(pool->frag_size <= PAGE_SIZE)) | |
3647 | skb_free_frag(data); | |
3648 | else | |
3649 | kfree(data); | |
3650 | } | |
3651 | ||
3f518509 MW |
3652 | /* Buffer Manager configuration routines */ |
3653 | ||
3654 | /* Create pool */ | |
3655 | static int mvpp2_bm_pool_create(struct platform_device *pdev, | |
3656 | struct mvpp2 *priv, | |
3657 | struct mvpp2_bm_pool *bm_pool, int size) | |
3658 | { | |
3f518509 MW |
3659 | u32 val; |
3660 | ||
d01524d8 TP |
3661 | /* Number of buffer pointers must be a multiple of 16, as per |
3662 | * hardware constraints | |
3663 | */ | |
3664 | if (!IS_ALIGNED(size, 16)) | |
3665 | return -EINVAL; | |
3666 | ||
3667 | /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16 | |
3668 | * bytes per buffer pointer | |
3669 | */ | |
3670 | if (priv->hw_version == MVPP21) | |
3671 | bm_pool->size_bytes = 2 * sizeof(u32) * size; | |
3672 | else | |
3673 | bm_pool->size_bytes = 2 * sizeof(u64) * size; | |
3674 | ||
3675 | bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes, | |
20396136 | 3676 | &bm_pool->dma_addr, |
3f518509 MW |
3677 | GFP_KERNEL); |
3678 | if (!bm_pool->virt_addr) | |
3679 | return -ENOMEM; | |
3680 | ||
d3158807 TP |
3681 | if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, |
3682 | MVPP2_BM_POOL_PTR_ALIGN)) { | |
d01524d8 TP |
3683 | dma_free_coherent(&pdev->dev, bm_pool->size_bytes, |
3684 | bm_pool->virt_addr, bm_pool->dma_addr); | |
3f518509 MW |
3685 | dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", |
3686 | bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); | |
3687 | return -ENOMEM; | |
3688 | } | |
3689 | ||
3690 | mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), | |
d01524d8 | 3691 | lower_32_bits(bm_pool->dma_addr)); |
3f518509 MW |
3692 | mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); |
3693 | ||
3694 | val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); | |
3695 | val |= MVPP2_BM_START_MASK; | |
3696 | mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); | |
3697 | ||
3698 | bm_pool->type = MVPP2_BM_FREE; | |
3699 | bm_pool->size = size; | |
3700 | bm_pool->pkt_size = 0; | |
3701 | bm_pool->buf_num = 0; | |
3f518509 MW |
3702 | |
3703 | return 0; | |
3704 | } | |
3705 | ||
3706 | /* Set pool buffer size */ | |
3707 | static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, | |
3708 | struct mvpp2_bm_pool *bm_pool, | |
3709 | int buf_size) | |
3710 | { | |
3711 | u32 val; | |
3712 | ||
3713 | bm_pool->buf_size = buf_size; | |
3714 | ||
3715 | val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); | |
3716 | mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); | |
3717 | } | |
3718 | ||
d01524d8 TP |
3719 | static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, |
3720 | struct mvpp2_bm_pool *bm_pool, | |
3721 | dma_addr_t *dma_addr, | |
3722 | phys_addr_t *phys_addr) | |
3723 | { | |
a786841d TP |
3724 | int cpu = smp_processor_id(); |
3725 | ||
3726 | *dma_addr = mvpp2_percpu_read(priv, cpu, | |
3727 | MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); | |
3728 | *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG); | |
d01524d8 TP |
3729 | |
3730 | if (priv->hw_version == MVPP22) { | |
3731 | u32 val; | |
3732 | u32 dma_addr_highbits, phys_addr_highbits; | |
3733 | ||
a786841d | 3734 | val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC); |
d01524d8 TP |
3735 | dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK); |
3736 | phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >> | |
3737 | MVPP22_BM_ADDR_HIGH_VIRT_SHIFT; | |
3738 | ||
3739 | if (sizeof(dma_addr_t) == 8) | |
3740 | *dma_addr |= (u64)dma_addr_highbits << 32; | |
3741 | ||
3742 | if (sizeof(phys_addr_t) == 8) | |
3743 | *phys_addr |= (u64)phys_addr_highbits << 32; | |
3744 | } | |
3745 | } | |
3746 | ||
7861f12b | 3747 | /* Free all buffers from the pool */ |
4229d502 MW |
3748 | static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, |
3749 | struct mvpp2_bm_pool *bm_pool) | |
3f518509 MW |
3750 | { |
3751 | int i; | |
3752 | ||
7861f12b | 3753 | for (i = 0; i < bm_pool->buf_num; i++) { |
20396136 | 3754 | dma_addr_t buf_dma_addr; |
4e4a105f TP |
3755 | phys_addr_t buf_phys_addr; |
3756 | void *data; | |
3f518509 | 3757 | |
d01524d8 TP |
3758 | mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool, |
3759 | &buf_dma_addr, &buf_phys_addr); | |
4229d502 | 3760 | |
20396136 | 3761 | dma_unmap_single(dev, buf_dma_addr, |
4229d502 MW |
3762 | bm_pool->buf_size, DMA_FROM_DEVICE); |
3763 | ||
4e4a105f TP |
3764 | data = (void *)phys_to_virt(buf_phys_addr); |
3765 | if (!data) | |
3f518509 | 3766 | break; |
0e037281 | 3767 | |
4e4a105f | 3768 | mvpp2_frag_free(bm_pool, data); |
3f518509 MW |
3769 | } |
3770 | ||
3771 | /* Update BM driver with number of buffers removed from pool */ | |
3772 | bm_pool->buf_num -= i; | |
3f518509 MW |
3773 | } |
3774 | ||
3775 | /* Cleanup pool */ | |
3776 | static int mvpp2_bm_pool_destroy(struct platform_device *pdev, | |
3777 | struct mvpp2 *priv, | |
3778 | struct mvpp2_bm_pool *bm_pool) | |
3779 | { | |
3f518509 MW |
3780 | u32 val; |
3781 | ||
4229d502 | 3782 | mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool); |
d74c96c1 | 3783 | if (bm_pool->buf_num) { |
3f518509 MW |
3784 | WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); |
3785 | return 0; | |
3786 | } | |
3787 | ||
3788 | val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); | |
3789 | val |= MVPP2_BM_STOP_MASK; | |
3790 | mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); | |
3791 | ||
d01524d8 | 3792 | dma_free_coherent(&pdev->dev, bm_pool->size_bytes, |
3f518509 | 3793 | bm_pool->virt_addr, |
20396136 | 3794 | bm_pool->dma_addr); |
3f518509 MW |
3795 | return 0; |
3796 | } | |
3797 | ||
3798 | static int mvpp2_bm_pools_init(struct platform_device *pdev, | |
3799 | struct mvpp2 *priv) | |
3800 | { | |
3801 | int i, err, size; | |
3802 | struct mvpp2_bm_pool *bm_pool; | |
3803 | ||
3804 | /* Create all pools with maximum size */ | |
3805 | size = MVPP2_BM_POOL_SIZE_MAX; | |
3806 | for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { | |
3807 | bm_pool = &priv->bm_pools[i]; | |
3808 | bm_pool->id = i; | |
3809 | err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size); | |
3810 | if (err) | |
3811 | goto err_unroll_pools; | |
3812 | mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0); | |
3813 | } | |
3814 | return 0; | |
3815 | ||
3816 | err_unroll_pools: | |
3817 | dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size); | |
3818 | for (i = i - 1; i >= 0; i--) | |
3819 | mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]); | |
3820 | return err; | |
3821 | } | |
3822 | ||
3823 | static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv) | |
3824 | { | |
3825 | int i, err; | |
3826 | ||
3827 | for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { | |
3828 | /* Mask BM all interrupts */ | |
3829 | mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0); | |
3830 | /* Clear BM cause register */ | |
3831 | mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0); | |
3832 | } | |
3833 | ||
3834 | /* Allocate and initialize BM pools */ | |
3835 | priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM, | |
3836 | sizeof(struct mvpp2_bm_pool), GFP_KERNEL); | |
3837 | if (!priv->bm_pools) | |
3838 | return -ENOMEM; | |
3839 | ||
3840 | err = mvpp2_bm_pools_init(pdev, priv); | |
3841 | if (err < 0) | |
3842 | return err; | |
3843 | return 0; | |
3844 | } | |
3845 | ||
3846 | /* Attach long pool to rxq */ | |
3847 | static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, | |
3848 | int lrxq, int long_pool) | |
3849 | { | |
5eac892a | 3850 | u32 val, mask; |
3f518509 MW |
3851 | int prxq; |
3852 | ||
3853 | /* Get queue physical ID */ | |
3854 | prxq = port->rxqs[lrxq]->id; | |
3855 | ||
5eac892a TP |
3856 | if (port->priv->hw_version == MVPP21) |
3857 | mask = MVPP21_RXQ_POOL_LONG_MASK; | |
3858 | else | |
3859 | mask = MVPP22_RXQ_POOL_LONG_MASK; | |
3f518509 | 3860 | |
5eac892a TP |
3861 | val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); |
3862 | val &= ~mask; | |
3863 | val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask; | |
3f518509 MW |
3864 | mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); |
3865 | } | |
3866 | ||
3867 | /* Attach short pool to rxq */ | |
3868 | static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port, | |
3869 | int lrxq, int short_pool) | |
3870 | { | |
5eac892a | 3871 | u32 val, mask; |
3f518509 MW |
3872 | int prxq; |
3873 | ||
3874 | /* Get queue physical ID */ | |
3875 | prxq = port->rxqs[lrxq]->id; | |
3876 | ||
5eac892a TP |
3877 | if (port->priv->hw_version == MVPP21) |
3878 | mask = MVPP21_RXQ_POOL_SHORT_MASK; | |
3879 | else | |
3880 | mask = MVPP22_RXQ_POOL_SHORT_MASK; | |
3f518509 | 3881 | |
5eac892a TP |
3882 | val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); |
3883 | val &= ~mask; | |
3884 | val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask; | |
3f518509 MW |
3885 | mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); |
3886 | } | |
3887 | ||
0e037281 TP |
3888 | static void *mvpp2_buf_alloc(struct mvpp2_port *port, |
3889 | struct mvpp2_bm_pool *bm_pool, | |
20396136 | 3890 | dma_addr_t *buf_dma_addr, |
4e4a105f | 3891 | phys_addr_t *buf_phys_addr, |
0e037281 | 3892 | gfp_t gfp_mask) |
3f518509 | 3893 | { |
20396136 | 3894 | dma_addr_t dma_addr; |
0e037281 | 3895 | void *data; |
3f518509 | 3896 | |
0e037281 TP |
3897 | data = mvpp2_frag_alloc(bm_pool); |
3898 | if (!data) | |
3f518509 MW |
3899 | return NULL; |
3900 | ||
20396136 TP |
3901 | dma_addr = dma_map_single(port->dev->dev.parent, data, |
3902 | MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), | |
3903 | DMA_FROM_DEVICE); | |
3904 | if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { | |
0e037281 | 3905 | mvpp2_frag_free(bm_pool, data); |
3f518509 MW |
3906 | return NULL; |
3907 | } | |
20396136 | 3908 | *buf_dma_addr = dma_addr; |
4e4a105f | 3909 | *buf_phys_addr = virt_to_phys(data); |
3f518509 | 3910 | |
0e037281 | 3911 | return data; |
3f518509 MW |
3912 | } |
3913 | ||
3914 | /* Set pool number in a BM cookie */ | |
3915 | static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool) | |
3916 | { | |
3917 | u32 bm; | |
3918 | ||
3919 | bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS); | |
3920 | bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS); | |
3921 | ||
3922 | return bm; | |
3923 | } | |
3924 | ||
3925 | /* Get pool number from a BM cookie */ | |
d3158807 | 3926 | static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie) |
3f518509 MW |
3927 | { |
3928 | return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF; | |
3929 | } | |
3930 | ||
3931 | /* Release buffer to BM */ | |
3932 | static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, | |
20396136 | 3933 | dma_addr_t buf_dma_addr, |
4e4a105f | 3934 | phys_addr_t buf_phys_addr) |
3f518509 | 3935 | { |
a786841d TP |
3936 | int cpu = smp_processor_id(); |
3937 | ||
d01524d8 TP |
3938 | if (port->priv->hw_version == MVPP22) { |
3939 | u32 val = 0; | |
3940 | ||
3941 | if (sizeof(dma_addr_t) == 8) | |
3942 | val |= upper_32_bits(buf_dma_addr) & | |
3943 | MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK; | |
3944 | ||
3945 | if (sizeof(phys_addr_t) == 8) | |
3946 | val |= (upper_32_bits(buf_phys_addr) | |
3947 | << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & | |
3948 | MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; | |
3949 | ||
a786841d TP |
3950 | mvpp2_percpu_write(port->priv, cpu, |
3951 | MVPP22_BM_ADDR_HIGH_RLS_REG, val); | |
d01524d8 TP |
3952 | } |
3953 | ||
4e4a105f TP |
3954 | /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply |
3955 | * returned in the "cookie" field of the RX | |
3956 | * descriptor. Instead of storing the virtual address, we | |
3957 | * store the physical address | |
3958 | */ | |
a786841d TP |
3959 | mvpp2_percpu_write(port->priv, cpu, |
3960 | MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); | |
3961 | mvpp2_percpu_write(port->priv, cpu, | |
3962 | MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); | |
3f518509 MW |
3963 | } |
3964 | ||
3f518509 MW |
3965 | /* Refill BM pool */ |
3966 | static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, | |
20396136 | 3967 | dma_addr_t dma_addr, |
4e4a105f | 3968 | phys_addr_t phys_addr) |
3f518509 MW |
3969 | { |
3970 | int pool = mvpp2_bm_cookie_pool_get(bm); | |
3971 | ||
4e4a105f | 3972 | mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); |
3f518509 MW |
3973 | } |
3974 | ||
3975 | /* Allocate buffers for the pool */ | |
3976 | static int mvpp2_bm_bufs_add(struct mvpp2_port *port, | |
3977 | struct mvpp2_bm_pool *bm_pool, int buf_num) | |
3978 | { | |
3f518509 | 3979 | int i, buf_size, total_size; |
20396136 | 3980 | dma_addr_t dma_addr; |
4e4a105f | 3981 | phys_addr_t phys_addr; |
0e037281 | 3982 | void *buf; |
3f518509 MW |
3983 | |
3984 | buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size); | |
3985 | total_size = MVPP2_RX_TOTAL_SIZE(buf_size); | |
3986 | ||
3987 | if (buf_num < 0 || | |
3988 | (buf_num + bm_pool->buf_num > bm_pool->size)) { | |
3989 | netdev_err(port->dev, | |
3990 | "cannot allocate %d buffers for pool %d\n", | |
3991 | buf_num, bm_pool->id); | |
3992 | return 0; | |
3993 | } | |
3994 | ||
3f518509 | 3995 | for (i = 0; i < buf_num; i++) { |
4e4a105f TP |
3996 | buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, |
3997 | &phys_addr, GFP_KERNEL); | |
0e037281 | 3998 | if (!buf) |
3f518509 MW |
3999 | break; |
4000 | ||
20396136 | 4001 | mvpp2_bm_pool_put(port, bm_pool->id, dma_addr, |
4e4a105f | 4002 | phys_addr); |
3f518509 MW |
4003 | } |
4004 | ||
4005 | /* Update BM driver with number of buffers added to pool */ | |
4006 | bm_pool->buf_num += i; | |
3f518509 MW |
4007 | |
4008 | netdev_dbg(port->dev, | |
4009 | "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n", | |
4010 | bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long", | |
4011 | bm_pool->id, bm_pool->pkt_size, buf_size, total_size); | |
4012 | ||
4013 | netdev_dbg(port->dev, | |
4014 | "%s pool %d: %d of %d buffers added\n", | |
4015 | bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long", | |
4016 | bm_pool->id, i, buf_num); | |
4017 | return i; | |
4018 | } | |
4019 | ||
4020 | /* Notify the driver that BM pool is being used as specific type and return the | |
4021 | * pool pointer on success | |
4022 | */ | |
4023 | static struct mvpp2_bm_pool * | |
4024 | mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, | |
4025 | int pkt_size) | |
4026 | { | |
3f518509 MW |
4027 | struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; |
4028 | int num; | |
4029 | ||
4030 | if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) { | |
4031 | netdev_err(port->dev, "mixing pool types is forbidden\n"); | |
4032 | return NULL; | |
4033 | } | |
4034 | ||
3f518509 MW |
4035 | if (new_pool->type == MVPP2_BM_FREE) |
4036 | new_pool->type = type; | |
4037 | ||
4038 | /* Allocate buffers in case BM pool is used as long pool, but packet | |
4039 | * size doesn't match MTU or BM pool hasn't being used yet | |
4040 | */ | |
4041 | if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) || | |
4042 | (new_pool->pkt_size == 0)) { | |
4043 | int pkts_num; | |
4044 | ||
4045 | /* Set default buffer number or free all the buffers in case | |
4046 | * the pool is not empty | |
4047 | */ | |
4048 | pkts_num = new_pool->buf_num; | |
4049 | if (pkts_num == 0) | |
4050 | pkts_num = type == MVPP2_BM_SWF_LONG ? | |
4051 | MVPP2_BM_LONG_BUF_NUM : | |
4052 | MVPP2_BM_SHORT_BUF_NUM; | |
4053 | else | |
4229d502 MW |
4054 | mvpp2_bm_bufs_free(port->dev->dev.parent, |
4055 | port->priv, new_pool); | |
3f518509 MW |
4056 | |
4057 | new_pool->pkt_size = pkt_size; | |
0e037281 TP |
4058 | new_pool->frag_size = |
4059 | SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + | |
4060 | MVPP2_SKB_SHINFO_SIZE; | |
3f518509 MW |
4061 | |
4062 | /* Allocate buffers for this pool */ | |
4063 | num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); | |
4064 | if (num != pkts_num) { | |
4065 | WARN(1, "pool %d: %d of %d allocated\n", | |
4066 | new_pool->id, num, pkts_num); | |
3f518509 MW |
4067 | return NULL; |
4068 | } | |
4069 | } | |
4070 | ||
4071 | mvpp2_bm_pool_bufsize_set(port->priv, new_pool, | |
4072 | MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); | |
4073 | ||
3f518509 MW |
4074 | return new_pool; |
4075 | } | |
4076 | ||
4077 | /* Initialize pools for swf */ | |
4078 | static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) | |
4079 | { | |
3f518509 MW |
4080 | int rxq; |
4081 | ||
4082 | if (!port->pool_long) { | |
4083 | port->pool_long = | |
4084 | mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id), | |
4085 | MVPP2_BM_SWF_LONG, | |
4086 | port->pkt_size); | |
4087 | if (!port->pool_long) | |
4088 | return -ENOMEM; | |
4089 | ||
3f518509 | 4090 | port->pool_long->port_map |= (1 << port->id); |
3f518509 MW |
4091 | |
4092 | for (rxq = 0; rxq < rxq_number; rxq++) | |
4093 | mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); | |
4094 | } | |
4095 | ||
4096 | if (!port->pool_short) { | |
4097 | port->pool_short = | |
4098 | mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL, | |
4099 | MVPP2_BM_SWF_SHORT, | |
4100 | MVPP2_BM_SHORT_PKT_SIZE); | |
4101 | if (!port->pool_short) | |
4102 | return -ENOMEM; | |
4103 | ||
3f518509 | 4104 | port->pool_short->port_map |= (1 << port->id); |
3f518509 MW |
4105 | |
4106 | for (rxq = 0; rxq < rxq_number; rxq++) | |
4107 | mvpp2_rxq_short_pool_set(port, rxq, | |
4108 | port->pool_short->id); | |
4109 | } | |
4110 | ||
4111 | return 0; | |
4112 | } | |
4113 | ||
4114 | static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) | |
4115 | { | |
4116 | struct mvpp2_port *port = netdev_priv(dev); | |
4117 | struct mvpp2_bm_pool *port_pool = port->pool_long; | |
4118 | int num, pkts_num = port_pool->buf_num; | |
4119 | int pkt_size = MVPP2_RX_PKT_SIZE(mtu); | |
4120 | ||
4121 | /* Update BM pool with new buffer size */ | |
4229d502 | 4122 | mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool); |
d74c96c1 | 4123 | if (port_pool->buf_num) { |
3f518509 MW |
4124 | WARN(1, "cannot free all buffers in pool %d\n", port_pool->id); |
4125 | return -EIO; | |
4126 | } | |
4127 | ||
4128 | port_pool->pkt_size = pkt_size; | |
0e037281 TP |
4129 | port_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + |
4130 | MVPP2_SKB_SHINFO_SIZE; | |
3f518509 MW |
4131 | num = mvpp2_bm_bufs_add(port, port_pool, pkts_num); |
4132 | if (num != pkts_num) { | |
4133 | WARN(1, "pool %d: %d of %d allocated\n", | |
4134 | port_pool->id, num, pkts_num); | |
4135 | return -EIO; | |
4136 | } | |
4137 | ||
4138 | mvpp2_bm_pool_bufsize_set(port->priv, port_pool, | |
4139 | MVPP2_RX_BUF_SIZE(port_pool->pkt_size)); | |
4140 | dev->mtu = mtu; | |
4141 | netdev_update_features(dev); | |
4142 | return 0; | |
4143 | } | |
4144 | ||
4145 | static inline void mvpp2_interrupts_enable(struct mvpp2_port *port) | |
4146 | { | |
4147 | int cpu, cpu_mask = 0; | |
4148 | ||
4149 | for_each_present_cpu(cpu) | |
4150 | cpu_mask |= 1 << cpu; | |
4151 | mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), | |
4152 | MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask)); | |
4153 | } | |
4154 | ||
4155 | static inline void mvpp2_interrupts_disable(struct mvpp2_port *port) | |
4156 | { | |
4157 | int cpu, cpu_mask = 0; | |
4158 | ||
4159 | for_each_present_cpu(cpu) | |
4160 | cpu_mask |= 1 << cpu; | |
4161 | mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), | |
4162 | MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask)); | |
4163 | } | |
4164 | ||
4165 | /* Mask the current CPU's Rx/Tx interrupts */ | |
4166 | static void mvpp2_interrupts_mask(void *arg) | |
4167 | { | |
4168 | struct mvpp2_port *port = arg; | |
4169 | ||
a786841d TP |
4170 | mvpp2_percpu_write(port->priv, smp_processor_id(), |
4171 | MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); | |
3f518509 MW |
4172 | } |
4173 | ||
4174 | /* Unmask the current CPU's Rx/Tx interrupts */ | |
4175 | static void mvpp2_interrupts_unmask(void *arg) | |
4176 | { | |
4177 | struct mvpp2_port *port = arg; | |
4178 | ||
a786841d TP |
4179 | mvpp2_percpu_write(port->priv, smp_processor_id(), |
4180 | MVPP2_ISR_RX_TX_MASK_REG(port->id), | |
4181 | (MVPP2_CAUSE_MISC_SUM_MASK | | |
4182 | MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)); | |
3f518509 MW |
4183 | } |
4184 | ||
4185 | /* Port configuration routines */ | |
4186 | ||
26975821 TP |
4187 | static void mvpp22_port_mii_set(struct mvpp2_port *port) |
4188 | { | |
4189 | u32 val; | |
4190 | ||
4191 | return; | |
4192 | ||
4193 | /* Only GOP port 0 has an XLG MAC */ | |
4194 | if (port->gop_id == 0) { | |
4195 | val = readl(port->base + MVPP22_XLG_CTRL3_REG); | |
4196 | val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; | |
4197 | val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; | |
4198 | writel(val, port->base + MVPP22_XLG_CTRL3_REG); | |
4199 | } | |
4200 | ||
4201 | val = readl(port->base + MVPP22_GMAC_CTRL_4_REG); | |
4202 | if (port->phy_interface == PHY_INTERFACE_MODE_RGMII) | |
4203 | val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL; | |
4204 | else | |
4205 | val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; | |
4206 | val &= ~MVPP22_CTRL4_DP_CLK_SEL; | |
4207 | val |= MVPP22_CTRL4_SYNC_BYPASS; | |
4208 | val |= MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; | |
4209 | writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); | |
4210 | } | |
4211 | ||
3f518509 MW |
4212 | static void mvpp2_port_mii_set(struct mvpp2_port *port) |
4213 | { | |
08a23755 | 4214 | u32 val; |
3f518509 | 4215 | |
26975821 TP |
4216 | if (port->priv->hw_version == MVPP22) |
4217 | mvpp22_port_mii_set(port); | |
4218 | ||
08a23755 MW |
4219 | val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); |
4220 | ||
4221 | switch (port->phy_interface) { | |
4222 | case PHY_INTERFACE_MODE_SGMII: | |
4223 | val |= MVPP2_GMAC_INBAND_AN_MASK; | |
4224 | break; | |
4225 | case PHY_INTERFACE_MODE_RGMII: | |
4226 | val |= MVPP2_GMAC_PORT_RGMII_MASK; | |
4227 | default: | |
4228 | val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; | |
4229 | } | |
4230 | ||
4231 | writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); | |
4232 | } | |
3f518509 | 4233 | |
08a23755 MW |
4234 | static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port) |
4235 | { | |
4236 | u32 val; | |
4237 | ||
4238 | val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); | |
4239 | val |= MVPP2_GMAC_FC_ADV_EN; | |
4240 | writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); | |
3f518509 MW |
4241 | } |
4242 | ||
4243 | static void mvpp2_port_enable(struct mvpp2_port *port) | |
4244 | { | |
4245 | u32 val; | |
4246 | ||
4247 | val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); | |
4248 | val |= MVPP2_GMAC_PORT_EN_MASK; | |
4249 | val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; | |
4250 | writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); | |
4251 | } | |
4252 | ||
4253 | static void mvpp2_port_disable(struct mvpp2_port *port) | |
4254 | { | |
4255 | u32 val; | |
4256 | ||
4257 | val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); | |
4258 | val &= ~(MVPP2_GMAC_PORT_EN_MASK); | |
4259 | writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); | |
4260 | } | |
4261 | ||
4262 | /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ | |
4263 | static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) | |
4264 | { | |
4265 | u32 val; | |
4266 | ||
4267 | val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) & | |
4268 | ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; | |
4269 | writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); | |
4270 | } | |
4271 | ||
4272 | /* Configure loopback port */ | |
4273 | static void mvpp2_port_loopback_set(struct mvpp2_port *port) | |
4274 | { | |
4275 | u32 val; | |
4276 | ||
4277 | val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); | |
4278 | ||
4279 | if (port->speed == 1000) | |
4280 | val |= MVPP2_GMAC_GMII_LB_EN_MASK; | |
4281 | else | |
4282 | val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; | |
4283 | ||
4284 | if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) | |
4285 | val |= MVPP2_GMAC_PCS_LB_EN_MASK; | |
4286 | else | |
4287 | val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; | |
4288 | ||
4289 | writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); | |
4290 | } | |
4291 | ||
4292 | static void mvpp2_port_reset(struct mvpp2_port *port) | |
4293 | { | |
4294 | u32 val; | |
4295 | ||
4296 | val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) & | |
4297 | ~MVPP2_GMAC_PORT_RESET_MASK; | |
4298 | writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); | |
4299 | ||
4300 | while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & | |
4301 | MVPP2_GMAC_PORT_RESET_MASK) | |
4302 | continue; | |
4303 | } | |
4304 | ||
4305 | /* Change maximum receive size of the port */ | |
4306 | static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) | |
4307 | { | |
4308 | u32 val; | |
4309 | ||
4310 | val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); | |
4311 | val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; | |
4312 | val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << | |
4313 | MVPP2_GMAC_MAX_RX_SIZE_OFFS); | |
4314 | writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); | |
4315 | } | |
4316 | ||
4317 | /* Set defaults to the MVPP2 port */ | |
4318 | static void mvpp2_defaults_set(struct mvpp2_port *port) | |
4319 | { | |
4320 | int tx_port_num, val, queue, ptxq, lrxq; | |
4321 | ||
3d9017d9 TP |
4322 | if (port->priv->hw_version == MVPP21) { |
4323 | /* Configure port to loopback if needed */ | |
4324 | if (port->flags & MVPP2_F_LOOPBACK) | |
4325 | mvpp2_port_loopback_set(port); | |
4326 | ||
4327 | /* Update TX FIFO MIN Threshold */ | |
4328 | val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); | |
4329 | val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; | |
4330 | /* Min. TX threshold must be less than minimal packet length */ | |
4331 | val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); | |
4332 | writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); | |
4333 | } | |
3f518509 MW |
4334 | |
4335 | /* Disable Legacy WRR, Disable EJP, Release from reset */ | |
4336 | tx_port_num = mvpp2_egress_port(port); | |
4337 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, | |
4338 | tx_port_num); | |
4339 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); | |
4340 | ||
4341 | /* Close bandwidth for all queues */ | |
4342 | for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) { | |
4343 | ptxq = mvpp2_txq_phys(port->id, queue); | |
4344 | mvpp2_write(port->priv, | |
4345 | MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0); | |
4346 | } | |
4347 | ||
4348 | /* Set refill period to 1 usec, refill tokens | |
4349 | * and bucket size to maximum | |
4350 | */ | |
4351 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, | |
4352 | port->priv->tclk / USEC_PER_SEC); | |
4353 | val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG); | |
4354 | val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; | |
4355 | val |= MVPP2_TXP_REFILL_PERIOD_MASK(1); | |
4356 | val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; | |
4357 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val); | |
4358 | val = MVPP2_TXP_TOKEN_SIZE_MAX; | |
4359 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); | |
4360 | ||
4361 | /* Set MaximumLowLatencyPacketSize value to 256 */ | |
4362 | mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id), | |
4363 | MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | | |
4364 | MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); | |
4365 | ||
4366 | /* Enable Rx cache snoop */ | |
4367 | for (lrxq = 0; lrxq < rxq_number; lrxq++) { | |
4368 | queue = port->rxqs[lrxq]->id; | |
4369 | val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); | |
4370 | val |= MVPP2_SNOOP_PKT_SIZE_MASK | | |
4371 | MVPP2_SNOOP_BUF_HDR_MASK; | |
4372 | mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); | |
4373 | } | |
4374 | ||
4375 | /* At default, mask all interrupts to all present cpus */ | |
4376 | mvpp2_interrupts_disable(port); | |
4377 | } | |
4378 | ||
4379 | /* Enable/disable receiving packets */ | |
4380 | static void mvpp2_ingress_enable(struct mvpp2_port *port) | |
4381 | { | |
4382 | u32 val; | |
4383 | int lrxq, queue; | |
4384 | ||
4385 | for (lrxq = 0; lrxq < rxq_number; lrxq++) { | |
4386 | queue = port->rxqs[lrxq]->id; | |
4387 | val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); | |
4388 | val &= ~MVPP2_RXQ_DISABLE_MASK; | |
4389 | mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); | |
4390 | } | |
4391 | } | |
4392 | ||
4393 | static void mvpp2_ingress_disable(struct mvpp2_port *port) | |
4394 | { | |
4395 | u32 val; | |
4396 | int lrxq, queue; | |
4397 | ||
4398 | for (lrxq = 0; lrxq < rxq_number; lrxq++) { | |
4399 | queue = port->rxqs[lrxq]->id; | |
4400 | val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); | |
4401 | val |= MVPP2_RXQ_DISABLE_MASK; | |
4402 | mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); | |
4403 | } | |
4404 | } | |
4405 | ||
4406 | /* Enable transmit via physical egress queue | |
4407 | * - HW starts take descriptors from DRAM | |
4408 | */ | |
4409 | static void mvpp2_egress_enable(struct mvpp2_port *port) | |
4410 | { | |
4411 | u32 qmap; | |
4412 | int queue; | |
4413 | int tx_port_num = mvpp2_egress_port(port); | |
4414 | ||
4415 | /* Enable all initialized TXs. */ | |
4416 | qmap = 0; | |
4417 | for (queue = 0; queue < txq_number; queue++) { | |
4418 | struct mvpp2_tx_queue *txq = port->txqs[queue]; | |
4419 | ||
4420 | if (txq->descs != NULL) | |
4421 | qmap |= (1 << queue); | |
4422 | } | |
4423 | ||
4424 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); | |
4425 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); | |
4426 | } | |
4427 | ||
4428 | /* Disable transmit via physical egress queue | |
4429 | * - HW doesn't take descriptors from DRAM | |
4430 | */ | |
4431 | static void mvpp2_egress_disable(struct mvpp2_port *port) | |
4432 | { | |
4433 | u32 reg_data; | |
4434 | int delay; | |
4435 | int tx_port_num = mvpp2_egress_port(port); | |
4436 | ||
4437 | /* Issue stop command for active channels only */ | |
4438 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); | |
4439 | reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & | |
4440 | MVPP2_TXP_SCHED_ENQ_MASK; | |
4441 | if (reg_data != 0) | |
4442 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, | |
4443 | (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET)); | |
4444 | ||
4445 | /* Wait for all Tx activity to terminate. */ | |
4446 | delay = 0; | |
4447 | do { | |
4448 | if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { | |
4449 | netdev_warn(port->dev, | |
4450 | "Tx stop timed out, status=0x%08x\n", | |
4451 | reg_data); | |
4452 | break; | |
4453 | } | |
4454 | mdelay(1); | |
4455 | delay++; | |
4456 | ||
4457 | /* Check port TX Command register that all | |
4458 | * Tx queues are stopped | |
4459 | */ | |
4460 | reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); | |
4461 | } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK); | |
4462 | } | |
4463 | ||
4464 | /* Rx descriptors helper methods */ | |
4465 | ||
4466 | /* Get number of Rx descriptors occupied by received packets */ | |
4467 | static inline int | |
4468 | mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id) | |
4469 | { | |
4470 | u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); | |
4471 | ||
4472 | return val & MVPP2_RXQ_OCCUPIED_MASK; | |
4473 | } | |
4474 | ||
4475 | /* Update Rx queue status with the number of occupied and available | |
4476 | * Rx descriptor slots. | |
4477 | */ | |
4478 | static inline void | |
4479 | mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id, | |
4480 | int used_count, int free_count) | |
4481 | { | |
4482 | /* Decrement the number of used descriptors and increment count | |
4483 | * increment the number of free descriptors. | |
4484 | */ | |
4485 | u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET); | |
4486 | ||
4487 | mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val); | |
4488 | } | |
4489 | ||
4490 | /* Get pointer to next RX descriptor to be processed by SW */ | |
4491 | static inline struct mvpp2_rx_desc * | |
4492 | mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) | |
4493 | { | |
4494 | int rx_desc = rxq->next_desc_to_proc; | |
4495 | ||
4496 | rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); | |
4497 | prefetch(rxq->descs + rxq->next_desc_to_proc); | |
4498 | return rxq->descs + rx_desc; | |
4499 | } | |
4500 | ||
4501 | /* Set rx queue offset */ | |
4502 | static void mvpp2_rxq_offset_set(struct mvpp2_port *port, | |
4503 | int prxq, int offset) | |
4504 | { | |
4505 | u32 val; | |
4506 | ||
4507 | /* Convert offset from bytes to units of 32 bytes */ | |
4508 | offset = offset >> 5; | |
4509 | ||
4510 | val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); | |
4511 | val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; | |
4512 | ||
4513 | /* Offset is in */ | |
4514 | val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & | |
4515 | MVPP2_RXQ_PACKET_OFFSET_MASK); | |
4516 | ||
4517 | mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); | |
4518 | } | |
4519 | ||
4520 | /* Obtain BM cookie information from descriptor */ | |
ac3dd277 TP |
4521 | static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port, |
4522 | struct mvpp2_rx_desc *rx_desc) | |
3f518509 | 4523 | { |
3f518509 | 4524 | int cpu = smp_processor_id(); |
ac3dd277 TP |
4525 | int pool; |
4526 | ||
4527 | pool = (mvpp2_rxdesc_status_get(port, rx_desc) & | |
4528 | MVPP2_RXD_BM_POOL_ID_MASK) >> | |
4529 | MVPP2_RXD_BM_POOL_ID_OFFS; | |
3f518509 MW |
4530 | |
4531 | return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) | | |
4532 | ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS); | |
4533 | } | |
4534 | ||
4535 | /* Tx descriptors helper methods */ | |
4536 | ||
3f518509 MW |
4537 | /* Get pointer to next Tx descriptor to be processed (send) by HW */ |
4538 | static struct mvpp2_tx_desc * | |
4539 | mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) | |
4540 | { | |
4541 | int tx_desc = txq->next_desc_to_proc; | |
4542 | ||
4543 | txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); | |
4544 | return txq->descs + tx_desc; | |
4545 | } | |
4546 | ||
4547 | /* Update HW with number of aggregated Tx descriptors to be sent */ | |
4548 | static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) | |
4549 | { | |
4550 | /* aggregated access - relevant TXQ number is written in TX desc */ | |
a786841d TP |
4551 | mvpp2_percpu_write(port->priv, smp_processor_id(), |
4552 | MVPP2_AGGR_TXQ_UPDATE_REG, pending); | |
3f518509 MW |
4553 | } |
4554 | ||
4555 | ||
4556 | /* Check if there are enough free descriptors in aggregated txq. | |
4557 | * If not, update the number of occupied descriptors and repeat the check. | |
4558 | */ | |
4559 | static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv, | |
4560 | struct mvpp2_tx_queue *aggr_txq, int num) | |
4561 | { | |
4562 | if ((aggr_txq->count + num) > aggr_txq->size) { | |
4563 | /* Update number of occupied aggregated Tx descriptors */ | |
4564 | int cpu = smp_processor_id(); | |
4565 | u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu)); | |
4566 | ||
4567 | aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK; | |
4568 | } | |
4569 | ||
4570 | if ((aggr_txq->count + num) > aggr_txq->size) | |
4571 | return -ENOMEM; | |
4572 | ||
4573 | return 0; | |
4574 | } | |
4575 | ||
4576 | /* Reserved Tx descriptors allocation request */ | |
4577 | static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv, | |
4578 | struct mvpp2_tx_queue *txq, int num) | |
4579 | { | |
4580 | u32 val; | |
a786841d | 4581 | int cpu = smp_processor_id(); |
3f518509 MW |
4582 | |
4583 | val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; | |
a786841d | 4584 | mvpp2_percpu_write(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val); |
3f518509 | 4585 | |
a786841d | 4586 | val = mvpp2_percpu_read(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG); |
3f518509 MW |
4587 | |
4588 | return val & MVPP2_TXQ_RSVD_RSLT_MASK; | |
4589 | } | |
4590 | ||
4591 | /* Check if there are enough reserved descriptors for transmission. | |
4592 | * If not, request chunk of reserved descriptors and check again. | |
4593 | */ | |
4594 | static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv, | |
4595 | struct mvpp2_tx_queue *txq, | |
4596 | struct mvpp2_txq_pcpu *txq_pcpu, | |
4597 | int num) | |
4598 | { | |
4599 | int req, cpu, desc_count; | |
4600 | ||
4601 | if (txq_pcpu->reserved_num >= num) | |
4602 | return 0; | |
4603 | ||
4604 | /* Not enough descriptors reserved! Update the reserved descriptor | |
4605 | * count and check again. | |
4606 | */ | |
4607 | ||
4608 | desc_count = 0; | |
4609 | /* Compute total of used descriptors */ | |
4610 | for_each_present_cpu(cpu) { | |
4611 | struct mvpp2_txq_pcpu *txq_pcpu_aux; | |
4612 | ||
4613 | txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu); | |
4614 | desc_count += txq_pcpu_aux->count; | |
4615 | desc_count += txq_pcpu_aux->reserved_num; | |
4616 | } | |
4617 | ||
4618 | req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num); | |
4619 | desc_count += req; | |
4620 | ||
4621 | if (desc_count > | |
4622 | (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK))) | |
4623 | return -ENOMEM; | |
4624 | ||
4625 | txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req); | |
4626 | ||
4627 | /* OK, the descriptor cound has been updated: check again. */ | |
4628 | if (txq_pcpu->reserved_num < num) | |
4629 | return -ENOMEM; | |
4630 | return 0; | |
4631 | } | |
4632 | ||
4633 | /* Release the last allocated Tx descriptor. Useful to handle DMA | |
4634 | * mapping failures in the Tx path. | |
4635 | */ | |
4636 | static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq) | |
4637 | { | |
4638 | if (txq->next_desc_to_proc == 0) | |
4639 | txq->next_desc_to_proc = txq->last_desc - 1; | |
4640 | else | |
4641 | txq->next_desc_to_proc--; | |
4642 | } | |
4643 | ||
4644 | /* Set Tx descriptors fields relevant for CSUM calculation */ | |
4645 | static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto, | |
4646 | int ip_hdr_len, int l4_proto) | |
4647 | { | |
4648 | u32 command; | |
4649 | ||
4650 | /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, | |
4651 | * G_L4_chk, L4_type required only for checksum calculation | |
4652 | */ | |
4653 | command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT); | |
4654 | command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT); | |
4655 | command |= MVPP2_TXD_IP_CSUM_DISABLE; | |
4656 | ||
4657 | if (l3_proto == swab16(ETH_P_IP)) { | |
4658 | command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */ | |
4659 | command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */ | |
4660 | } else { | |
4661 | command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */ | |
4662 | } | |
4663 | ||
4664 | if (l4_proto == IPPROTO_TCP) { | |
4665 | command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */ | |
4666 | command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ | |
4667 | } else if (l4_proto == IPPROTO_UDP) { | |
4668 | command |= MVPP2_TXD_L4_UDP; /* enable UDP */ | |
4669 | command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ | |
4670 | } else { | |
4671 | command |= MVPP2_TXD_L4_CSUM_NOT; | |
4672 | } | |
4673 | ||
4674 | return command; | |
4675 | } | |
4676 | ||
4677 | /* Get number of sent descriptors and decrement counter. | |
4678 | * The number of sent descriptors is returned. | |
4679 | * Per-CPU access | |
4680 | */ | |
4681 | static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, | |
4682 | struct mvpp2_tx_queue *txq) | |
4683 | { | |
4684 | u32 val; | |
4685 | ||
4686 | /* Reading status reg resets transmitted descriptor counter */ | |
a786841d TP |
4687 | val = mvpp2_percpu_read(port->priv, smp_processor_id(), |
4688 | MVPP2_TXQ_SENT_REG(txq->id)); | |
3f518509 MW |
4689 | |
4690 | return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> | |
4691 | MVPP2_TRANSMITTED_COUNT_OFFSET; | |
4692 | } | |
4693 | ||
4694 | static void mvpp2_txq_sent_counter_clear(void *arg) | |
4695 | { | |
4696 | struct mvpp2_port *port = arg; | |
4697 | int queue; | |
4698 | ||
4699 | for (queue = 0; queue < txq_number; queue++) { | |
4700 | int id = port->txqs[queue]->id; | |
4701 | ||
a786841d TP |
4702 | mvpp2_percpu_read(port->priv, smp_processor_id(), |
4703 | MVPP2_TXQ_SENT_REG(id)); | |
3f518509 MW |
4704 | } |
4705 | } | |
4706 | ||
4707 | /* Set max sizes for Tx queues */ | |
4708 | static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) | |
4709 | { | |
4710 | u32 val, size, mtu; | |
4711 | int txq, tx_port_num; | |
4712 | ||
4713 | mtu = port->pkt_size * 8; | |
4714 | if (mtu > MVPP2_TXP_MTU_MAX) | |
4715 | mtu = MVPP2_TXP_MTU_MAX; | |
4716 | ||
4717 | /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ | |
4718 | mtu = 3 * mtu; | |
4719 | ||
4720 | /* Indirect access to registers */ | |
4721 | tx_port_num = mvpp2_egress_port(port); | |
4722 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); | |
4723 | ||
4724 | /* Set MTU */ | |
4725 | val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG); | |
4726 | val &= ~MVPP2_TXP_MTU_MAX; | |
4727 | val |= mtu; | |
4728 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val); | |
4729 | ||
4730 | /* TXP token size and all TXQs token size must be larger that MTU */ | |
4731 | val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); | |
4732 | size = val & MVPP2_TXP_TOKEN_SIZE_MAX; | |
4733 | if (size < mtu) { | |
4734 | size = mtu; | |
4735 | val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; | |
4736 | val |= size; | |
4737 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); | |
4738 | } | |
4739 | ||
4740 | for (txq = 0; txq < txq_number; txq++) { | |
4741 | val = mvpp2_read(port->priv, | |
4742 | MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); | |
4743 | size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; | |
4744 | ||
4745 | if (size < mtu) { | |
4746 | size = mtu; | |
4747 | val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; | |
4748 | val |= size; | |
4749 | mvpp2_write(port->priv, | |
4750 | MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), | |
4751 | val); | |
4752 | } | |
4753 | } | |
4754 | } | |
4755 | ||
4756 | /* Set the number of packets that will be received before Rx interrupt | |
4757 | * will be generated by HW. | |
4758 | */ | |
4759 | static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, | |
d63f9e41 | 4760 | struct mvpp2_rx_queue *rxq) |
3f518509 | 4761 | { |
a786841d TP |
4762 | int cpu = smp_processor_id(); |
4763 | ||
f8b0d5f8 TP |
4764 | if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) |
4765 | rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; | |
3f518509 | 4766 | |
a786841d TP |
4767 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); |
4768 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG, | |
4769 | rxq->pkts_coal); | |
3f518509 MW |
4770 | } |
4771 | ||
ab42676a TP |
4772 | static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) |
4773 | { | |
4774 | u64 tmp = (u64)clk_hz * usec; | |
4775 | ||
4776 | do_div(tmp, USEC_PER_SEC); | |
4777 | ||
4778 | return tmp > U32_MAX ? U32_MAX : tmp; | |
4779 | } | |
4780 | ||
4781 | static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz) | |
4782 | { | |
4783 | u64 tmp = (u64)cycles * USEC_PER_SEC; | |
4784 | ||
4785 | do_div(tmp, clk_hz); | |
4786 | ||
4787 | return tmp > U32_MAX ? U32_MAX : tmp; | |
4788 | } | |
4789 | ||
3f518509 MW |
4790 | /* Set the time delay in usec before Rx interrupt */ |
4791 | static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, | |
d63f9e41 | 4792 | struct mvpp2_rx_queue *rxq) |
3f518509 | 4793 | { |
ab42676a TP |
4794 | unsigned long freq = port->priv->tclk; |
4795 | u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq); | |
4796 | ||
4797 | if (val > MVPP2_MAX_ISR_RX_THRESHOLD) { | |
4798 | rxq->time_coal = | |
4799 | mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq); | |
4800 | ||
4801 | /* re-evaluate to get actual register value */ | |
4802 | val = mvpp2_usec_to_cycles(rxq->time_coal, freq); | |
4803 | } | |
3f518509 | 4804 | |
3f518509 | 4805 | mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); |
3f518509 MW |
4806 | } |
4807 | ||
3f518509 MW |
4808 | /* Free Tx queue skbuffs */ |
4809 | static void mvpp2_txq_bufs_free(struct mvpp2_port *port, | |
4810 | struct mvpp2_tx_queue *txq, | |
4811 | struct mvpp2_txq_pcpu *txq_pcpu, int num) | |
4812 | { | |
4813 | int i; | |
4814 | ||
4815 | for (i = 0; i < num; i++) { | |
8354491c TP |
4816 | struct mvpp2_txq_pcpu_buf *tx_buf = |
4817 | txq_pcpu->buffs + txq_pcpu->txq_get_index; | |
3f518509 | 4818 | |
20396136 | 4819 | dma_unmap_single(port->dev->dev.parent, tx_buf->dma, |
8354491c | 4820 | tx_buf->size, DMA_TO_DEVICE); |
36fb7435 TP |
4821 | if (tx_buf->skb) |
4822 | dev_kfree_skb_any(tx_buf->skb); | |
4823 | ||
4824 | mvpp2_txq_inc_get(txq_pcpu); | |
3f518509 MW |
4825 | } |
4826 | } | |
4827 | ||
4828 | static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, | |
4829 | u32 cause) | |
4830 | { | |
4831 | int queue = fls(cause) - 1; | |
4832 | ||
4833 | return port->rxqs[queue]; | |
4834 | } | |
4835 | ||
4836 | static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, | |
4837 | u32 cause) | |
4838 | { | |
edc660fa | 4839 | int queue = fls(cause) - 1; |
3f518509 MW |
4840 | |
4841 | return port->txqs[queue]; | |
4842 | } | |
4843 | ||
4844 | /* Handle end of transmission */ | |
4845 | static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, | |
4846 | struct mvpp2_txq_pcpu *txq_pcpu) | |
4847 | { | |
4848 | struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id); | |
4849 | int tx_done; | |
4850 | ||
4851 | if (txq_pcpu->cpu != smp_processor_id()) | |
4852 | netdev_err(port->dev, "wrong cpu on the end of Tx processing\n"); | |
4853 | ||
4854 | tx_done = mvpp2_txq_sent_desc_proc(port, txq); | |
4855 | if (!tx_done) | |
4856 | return; | |
4857 | mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done); | |
4858 | ||
4859 | txq_pcpu->count -= tx_done; | |
4860 | ||
4861 | if (netif_tx_queue_stopped(nq)) | |
4862 | if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1) | |
4863 | netif_tx_wake_queue(nq); | |
4864 | } | |
4865 | ||
edc660fa MW |
4866 | static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause) |
4867 | { | |
4868 | struct mvpp2_tx_queue *txq; | |
4869 | struct mvpp2_txq_pcpu *txq_pcpu; | |
4870 | unsigned int tx_todo = 0; | |
4871 | ||
4872 | while (cause) { | |
4873 | txq = mvpp2_get_tx_queue(port, cause); | |
4874 | if (!txq) | |
4875 | break; | |
4876 | ||
4877 | txq_pcpu = this_cpu_ptr(txq->pcpu); | |
4878 | ||
4879 | if (txq_pcpu->count) { | |
4880 | mvpp2_txq_done(port, txq, txq_pcpu); | |
4881 | tx_todo += txq_pcpu->count; | |
4882 | } | |
4883 | ||
4884 | cause &= ~(1 << txq->log_id); | |
4885 | } | |
4886 | return tx_todo; | |
4887 | } | |
4888 | ||
3f518509 MW |
4889 | /* Rx/Tx queue initialization/cleanup methods */ |
4890 | ||
4891 | /* Allocate and initialize descriptors for aggr TXQ */ | |
4892 | static int mvpp2_aggr_txq_init(struct platform_device *pdev, | |
4893 | struct mvpp2_tx_queue *aggr_txq, | |
4894 | int desc_num, int cpu, | |
4895 | struct mvpp2 *priv) | |
4896 | { | |
b02f31fb TP |
4897 | u32 txq_dma; |
4898 | ||
3f518509 MW |
4899 | /* Allocate memory for TX descriptors */ |
4900 | aggr_txq->descs = dma_alloc_coherent(&pdev->dev, | |
4901 | desc_num * MVPP2_DESC_ALIGNED_SIZE, | |
20396136 | 4902 | &aggr_txq->descs_dma, GFP_KERNEL); |
3f518509 MW |
4903 | if (!aggr_txq->descs) |
4904 | return -ENOMEM; | |
4905 | ||
3f518509 MW |
4906 | aggr_txq->last_desc = aggr_txq->size - 1; |
4907 | ||
4908 | /* Aggr TXQ no reset WA */ | |
4909 | aggr_txq->next_desc_to_proc = mvpp2_read(priv, | |
4910 | MVPP2_AGGR_TXQ_INDEX_REG(cpu)); | |
4911 | ||
b02f31fb TP |
4912 | /* Set Tx descriptors queue starting address indirect |
4913 | * access | |
4914 | */ | |
4915 | if (priv->hw_version == MVPP21) | |
4916 | txq_dma = aggr_txq->descs_dma; | |
4917 | else | |
4918 | txq_dma = aggr_txq->descs_dma >> | |
4919 | MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; | |
4920 | ||
4921 | mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma); | |
3f518509 MW |
4922 | mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num); |
4923 | ||
4924 | return 0; | |
4925 | } | |
4926 | ||
4927 | /* Create a specified Rx queue */ | |
4928 | static int mvpp2_rxq_init(struct mvpp2_port *port, | |
4929 | struct mvpp2_rx_queue *rxq) | |
4930 | ||
4931 | { | |
b02f31fb | 4932 | u32 rxq_dma; |
a786841d | 4933 | int cpu; |
b02f31fb | 4934 | |
3f518509 MW |
4935 | rxq->size = port->rx_ring_size; |
4936 | ||
4937 | /* Allocate memory for RX descriptors */ | |
4938 | rxq->descs = dma_alloc_coherent(port->dev->dev.parent, | |
4939 | rxq->size * MVPP2_DESC_ALIGNED_SIZE, | |
20396136 | 4940 | &rxq->descs_dma, GFP_KERNEL); |
3f518509 MW |
4941 | if (!rxq->descs) |
4942 | return -ENOMEM; | |
4943 | ||
3f518509 MW |
4944 | rxq->last_desc = rxq->size - 1; |
4945 | ||
4946 | /* Zero occupied and non-occupied counters - direct access */ | |
4947 | mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); | |
4948 | ||
4949 | /* Set Rx descriptors queue starting address - indirect access */ | |
a786841d TP |
4950 | cpu = smp_processor_id(); |
4951 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); | |
b02f31fb TP |
4952 | if (port->priv->hw_version == MVPP21) |
4953 | rxq_dma = rxq->descs_dma; | |
4954 | else | |
4955 | rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; | |
a786841d TP |
4956 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); |
4957 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); | |
4958 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0); | |
3f518509 MW |
4959 | |
4960 | /* Set Offset */ | |
4961 | mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); | |
4962 | ||
4963 | /* Set coalescing pkts and time */ | |
d63f9e41 TP |
4964 | mvpp2_rx_pkts_coal_set(port, rxq); |
4965 | mvpp2_rx_time_coal_set(port, rxq); | |
3f518509 MW |
4966 | |
4967 | /* Add number of descriptors ready for receiving packets */ | |
4968 | mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); | |
4969 | ||
4970 | return 0; | |
4971 | } | |
4972 | ||
4973 | /* Push packets received by the RXQ to BM pool */ | |
4974 | static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, | |
4975 | struct mvpp2_rx_queue *rxq) | |
4976 | { | |
4977 | int rx_received, i; | |
4978 | ||
4979 | rx_received = mvpp2_rxq_received(port, rxq->id); | |
4980 | if (!rx_received) | |
4981 | return; | |
4982 | ||
4983 | for (i = 0; i < rx_received; i++) { | |
4984 | struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); | |
ac3dd277 | 4985 | u32 bm = mvpp2_bm_cookie_build(port, rx_desc); |
3f518509 | 4986 | |
ac3dd277 TP |
4987 | mvpp2_pool_refill(port, bm, |
4988 | mvpp2_rxdesc_dma_addr_get(port, rx_desc), | |
4989 | mvpp2_rxdesc_cookie_get(port, rx_desc)); | |
3f518509 MW |
4990 | } |
4991 | mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); | |
4992 | } | |
4993 | ||
4994 | /* Cleanup Rx queue */ | |
4995 | static void mvpp2_rxq_deinit(struct mvpp2_port *port, | |
4996 | struct mvpp2_rx_queue *rxq) | |
4997 | { | |
a786841d TP |
4998 | int cpu; |
4999 | ||
3f518509 MW |
5000 | mvpp2_rxq_drop_pkts(port, rxq); |
5001 | ||
5002 | if (rxq->descs) | |
5003 | dma_free_coherent(port->dev->dev.parent, | |
5004 | rxq->size * MVPP2_DESC_ALIGNED_SIZE, | |
5005 | rxq->descs, | |
20396136 | 5006 | rxq->descs_dma); |
3f518509 MW |
5007 | |
5008 | rxq->descs = NULL; | |
5009 | rxq->last_desc = 0; | |
5010 | rxq->next_desc_to_proc = 0; | |
20396136 | 5011 | rxq->descs_dma = 0; |
3f518509 MW |
5012 | |
5013 | /* Clear Rx descriptors queue starting address and size; | |
5014 | * free descriptor number | |
5015 | */ | |
5016 | mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); | |
a786841d TP |
5017 | cpu = smp_processor_id(); |
5018 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); | |
5019 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0); | |
5020 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0); | |
3f518509 MW |
5021 | } |
5022 | ||
5023 | /* Create and initialize a Tx queue */ | |
5024 | static int mvpp2_txq_init(struct mvpp2_port *port, | |
5025 | struct mvpp2_tx_queue *txq) | |
5026 | { | |
5027 | u32 val; | |
5028 | int cpu, desc, desc_per_txq, tx_port_num; | |
5029 | struct mvpp2_txq_pcpu *txq_pcpu; | |
5030 | ||
5031 | txq->size = port->tx_ring_size; | |
5032 | ||
5033 | /* Allocate memory for Tx descriptors */ | |
5034 | txq->descs = dma_alloc_coherent(port->dev->dev.parent, | |
5035 | txq->size * MVPP2_DESC_ALIGNED_SIZE, | |
20396136 | 5036 | &txq->descs_dma, GFP_KERNEL); |
3f518509 MW |
5037 | if (!txq->descs) |
5038 | return -ENOMEM; | |
5039 | ||
3f518509 MW |
5040 | txq->last_desc = txq->size - 1; |
5041 | ||
5042 | /* Set Tx descriptors queue starting address - indirect access */ | |
a786841d TP |
5043 | cpu = smp_processor_id(); |
5044 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); | |
5045 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, | |
5046 | txq->descs_dma); | |
5047 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, | |
5048 | txq->size & MVPP2_TXQ_DESC_SIZE_MASK); | |
5049 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0); | |
5050 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG, | |
5051 | txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); | |
5052 | val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG); | |
3f518509 | 5053 | val &= ~MVPP2_TXQ_PENDING_MASK; |
a786841d | 5054 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val); |
3f518509 MW |
5055 | |
5056 | /* Calculate base address in prefetch buffer. We reserve 16 descriptors | |
5057 | * for each existing TXQ. | |
5058 | * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT | |
5059 | * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS | |
5060 | */ | |
5061 | desc_per_txq = 16; | |
5062 | desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + | |
5063 | (txq->log_id * desc_per_txq); | |
5064 | ||
a786841d TP |
5065 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, |
5066 | MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | | |
5067 | MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); | |
3f518509 MW |
5068 | |
5069 | /* WRR / EJP configuration - indirect access */ | |
5070 | tx_port_num = mvpp2_egress_port(port); | |
5071 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); | |
5072 | ||
5073 | val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); | |
5074 | val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; | |
5075 | val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); | |
5076 | val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; | |
5077 | mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); | |
5078 | ||
5079 | val = MVPP2_TXQ_TOKEN_SIZE_MAX; | |
5080 | mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), | |
5081 | val); | |
5082 | ||
5083 | for_each_present_cpu(cpu) { | |
5084 | txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); | |
5085 | txq_pcpu->size = txq->size; | |
02c91ece ME |
5086 | txq_pcpu->buffs = kmalloc_array(txq_pcpu->size, |
5087 | sizeof(*txq_pcpu->buffs), | |
5088 | GFP_KERNEL); | |
8354491c | 5089 | if (!txq_pcpu->buffs) |
71ce391d | 5090 | goto error; |
3f518509 MW |
5091 | |
5092 | txq_pcpu->count = 0; | |
5093 | txq_pcpu->reserved_num = 0; | |
5094 | txq_pcpu->txq_put_index = 0; | |
5095 | txq_pcpu->txq_get_index = 0; | |
5096 | } | |
5097 | ||
5098 | return 0; | |
71ce391d MW |
5099 | |
5100 | error: | |
5101 | for_each_present_cpu(cpu) { | |
5102 | txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); | |
8354491c | 5103 | kfree(txq_pcpu->buffs); |
71ce391d MW |
5104 | } |
5105 | ||
5106 | dma_free_coherent(port->dev->dev.parent, | |
5107 | txq->size * MVPP2_DESC_ALIGNED_SIZE, | |
20396136 | 5108 | txq->descs, txq->descs_dma); |
71ce391d MW |
5109 | |
5110 | return -ENOMEM; | |
3f518509 MW |
5111 | } |
5112 | ||
5113 | /* Free allocated TXQ resources */ | |
5114 | static void mvpp2_txq_deinit(struct mvpp2_port *port, | |
5115 | struct mvpp2_tx_queue *txq) | |
5116 | { | |
5117 | struct mvpp2_txq_pcpu *txq_pcpu; | |
5118 | int cpu; | |
5119 | ||
5120 | for_each_present_cpu(cpu) { | |
5121 | txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); | |
8354491c | 5122 | kfree(txq_pcpu->buffs); |
3f518509 MW |
5123 | } |
5124 | ||
5125 | if (txq->descs) | |
5126 | dma_free_coherent(port->dev->dev.parent, | |
5127 | txq->size * MVPP2_DESC_ALIGNED_SIZE, | |
20396136 | 5128 | txq->descs, txq->descs_dma); |
3f518509 MW |
5129 | |
5130 | txq->descs = NULL; | |
5131 | txq->last_desc = 0; | |
5132 | txq->next_desc_to_proc = 0; | |
20396136 | 5133 | txq->descs_dma = 0; |
3f518509 MW |
5134 | |
5135 | /* Set minimum bandwidth for disabled TXQs */ | |
5136 | mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); | |
5137 | ||
5138 | /* Set Tx descriptors queue starting address and size */ | |
a786841d TP |
5139 | cpu = smp_processor_id(); |
5140 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); | |
5141 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0); | |
5142 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0); | |
3f518509 MW |
5143 | } |
5144 | ||
5145 | /* Cleanup Tx ports */ | |
5146 | static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) | |
5147 | { | |
5148 | struct mvpp2_txq_pcpu *txq_pcpu; | |
5149 | int delay, pending, cpu; | |
5150 | u32 val; | |
5151 | ||
a786841d TP |
5152 | cpu = smp_processor_id(); |
5153 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); | |
5154 | val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG); | |
3f518509 | 5155 | val |= MVPP2_TXQ_DRAIN_EN_MASK; |
a786841d | 5156 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); |
3f518509 MW |
5157 | |
5158 | /* The napi queue has been stopped so wait for all packets | |
5159 | * to be transmitted. | |
5160 | */ | |
5161 | delay = 0; | |
5162 | do { | |
5163 | if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { | |
5164 | netdev_warn(port->dev, | |
5165 | "port %d: cleaning queue %d timed out\n", | |
5166 | port->id, txq->log_id); | |
5167 | break; | |
5168 | } | |
5169 | mdelay(1); | |
5170 | delay++; | |
5171 | ||
a786841d TP |
5172 | pending = mvpp2_percpu_read(port->priv, cpu, |
5173 | MVPP2_TXQ_PENDING_REG); | |
5174 | pending &= MVPP2_TXQ_PENDING_MASK; | |
3f518509 MW |
5175 | } while (pending); |
5176 | ||
5177 | val &= ~MVPP2_TXQ_DRAIN_EN_MASK; | |
a786841d | 5178 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); |
3f518509 MW |
5179 | |
5180 | for_each_present_cpu(cpu) { | |
5181 | txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); | |
5182 | ||
5183 | /* Release all packets */ | |
5184 | mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); | |
5185 | ||
5186 | /* Reset queue */ | |
5187 | txq_pcpu->count = 0; | |
5188 | txq_pcpu->txq_put_index = 0; | |
5189 | txq_pcpu->txq_get_index = 0; | |
5190 | } | |
5191 | } | |
5192 | ||
5193 | /* Cleanup all Tx queues */ | |
5194 | static void mvpp2_cleanup_txqs(struct mvpp2_port *port) | |
5195 | { | |
5196 | struct mvpp2_tx_queue *txq; | |
5197 | int queue; | |
5198 | u32 val; | |
5199 | ||
5200 | val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG); | |
5201 | ||
5202 | /* Reset Tx ports and delete Tx queues */ | |
5203 | val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); | |
5204 | mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); | |
5205 | ||
5206 | for (queue = 0; queue < txq_number; queue++) { | |
5207 | txq = port->txqs[queue]; | |
5208 | mvpp2_txq_clean(port, txq); | |
5209 | mvpp2_txq_deinit(port, txq); | |
5210 | } | |
5211 | ||
5212 | on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); | |
5213 | ||
5214 | val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id); | |
5215 | mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); | |
5216 | } | |
5217 | ||
5218 | /* Cleanup all Rx queues */ | |
5219 | static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) | |
5220 | { | |
5221 | int queue; | |
5222 | ||
5223 | for (queue = 0; queue < rxq_number; queue++) | |
5224 | mvpp2_rxq_deinit(port, port->rxqs[queue]); | |
5225 | } | |
5226 | ||
5227 | /* Init all Rx queues for port */ | |
5228 | static int mvpp2_setup_rxqs(struct mvpp2_port *port) | |
5229 | { | |
5230 | int queue, err; | |
5231 | ||
5232 | for (queue = 0; queue < rxq_number; queue++) { | |
5233 | err = mvpp2_rxq_init(port, port->rxqs[queue]); | |
5234 | if (err) | |
5235 | goto err_cleanup; | |
5236 | } | |
5237 | return 0; | |
5238 | ||
5239 | err_cleanup: | |
5240 | mvpp2_cleanup_rxqs(port); | |
5241 | return err; | |
5242 | } | |
5243 | ||
5244 | /* Init all tx queues for port */ | |
5245 | static int mvpp2_setup_txqs(struct mvpp2_port *port) | |
5246 | { | |
5247 | struct mvpp2_tx_queue *txq; | |
5248 | int queue, err; | |
5249 | ||
5250 | for (queue = 0; queue < txq_number; queue++) { | |
5251 | txq = port->txqs[queue]; | |
5252 | err = mvpp2_txq_init(port, txq); | |
5253 | if (err) | |
5254 | goto err_cleanup; | |
5255 | } | |
5256 | ||
3f518509 MW |
5257 | on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); |
5258 | return 0; | |
5259 | ||
5260 | err_cleanup: | |
5261 | mvpp2_cleanup_txqs(port); | |
5262 | return err; | |
5263 | } | |
5264 | ||
5265 | /* The callback for per-port interrupt */ | |
5266 | static irqreturn_t mvpp2_isr(int irq, void *dev_id) | |
5267 | { | |
5268 | struct mvpp2_port *port = (struct mvpp2_port *)dev_id; | |
5269 | ||
5270 | mvpp2_interrupts_disable(port); | |
5271 | ||
5272 | napi_schedule(&port->napi); | |
5273 | ||
5274 | return IRQ_HANDLED; | |
5275 | } | |
5276 | ||
5277 | /* Adjust link */ | |
5278 | static void mvpp2_link_event(struct net_device *dev) | |
5279 | { | |
5280 | struct mvpp2_port *port = netdev_priv(dev); | |
8e07269d | 5281 | struct phy_device *phydev = dev->phydev; |
3f518509 MW |
5282 | int status_change = 0; |
5283 | u32 val; | |
5284 | ||
5285 | if (phydev->link) { | |
5286 | if ((port->speed != phydev->speed) || | |
5287 | (port->duplex != phydev->duplex)) { | |
5288 | u32 val; | |
5289 | ||
5290 | val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); | |
5291 | val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | | |
5292 | MVPP2_GMAC_CONFIG_GMII_SPEED | | |
5293 | MVPP2_GMAC_CONFIG_FULL_DUPLEX | | |
5294 | MVPP2_GMAC_AN_SPEED_EN | | |
5295 | MVPP2_GMAC_AN_DUPLEX_EN); | |
5296 | ||
5297 | if (phydev->duplex) | |
5298 | val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; | |
5299 | ||
5300 | if (phydev->speed == SPEED_1000) | |
5301 | val |= MVPP2_GMAC_CONFIG_GMII_SPEED; | |
2add511e | 5302 | else if (phydev->speed == SPEED_100) |
3f518509 MW |
5303 | val |= MVPP2_GMAC_CONFIG_MII_SPEED; |
5304 | ||
5305 | writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); | |
5306 | ||
5307 | port->duplex = phydev->duplex; | |
5308 | port->speed = phydev->speed; | |
5309 | } | |
5310 | } | |
5311 | ||
5312 | if (phydev->link != port->link) { | |
5313 | if (!phydev->link) { | |
5314 | port->duplex = -1; | |
5315 | port->speed = 0; | |
5316 | } | |
5317 | ||
5318 | port->link = phydev->link; | |
5319 | status_change = 1; | |
5320 | } | |
5321 | ||
5322 | if (status_change) { | |
5323 | if (phydev->link) { | |
5324 | val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); | |
5325 | val |= (MVPP2_GMAC_FORCE_LINK_PASS | | |
5326 | MVPP2_GMAC_FORCE_LINK_DOWN); | |
5327 | writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); | |
5328 | mvpp2_egress_enable(port); | |
5329 | mvpp2_ingress_enable(port); | |
5330 | } else { | |
5331 | mvpp2_ingress_disable(port); | |
5332 | mvpp2_egress_disable(port); | |
5333 | } | |
5334 | phy_print_status(phydev); | |
5335 | } | |
5336 | } | |
5337 | ||
edc660fa MW |
5338 | static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu) |
5339 | { | |
5340 | ktime_t interval; | |
5341 | ||
5342 | if (!port_pcpu->timer_scheduled) { | |
5343 | port_pcpu->timer_scheduled = true; | |
8b0e1953 | 5344 | interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS; |
edc660fa MW |
5345 | hrtimer_start(&port_pcpu->tx_done_timer, interval, |
5346 | HRTIMER_MODE_REL_PINNED); | |
5347 | } | |
5348 | } | |
5349 | ||
5350 | static void mvpp2_tx_proc_cb(unsigned long data) | |
5351 | { | |
5352 | struct net_device *dev = (struct net_device *)data; | |
5353 | struct mvpp2_port *port = netdev_priv(dev); | |
5354 | struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); | |
5355 | unsigned int tx_todo, cause; | |
5356 | ||
5357 | if (!netif_running(dev)) | |
5358 | return; | |
5359 | port_pcpu->timer_scheduled = false; | |
5360 | ||
5361 | /* Process all the Tx queues */ | |
5362 | cause = (1 << txq_number) - 1; | |
5363 | tx_todo = mvpp2_tx_done(port, cause); | |
5364 | ||
5365 | /* Set the timer in case not all the packets were processed */ | |
5366 | if (tx_todo) | |
5367 | mvpp2_timer_set(port_pcpu); | |
5368 | } | |
5369 | ||
5370 | static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer) | |
5371 | { | |
5372 | struct mvpp2_port_pcpu *port_pcpu = container_of(timer, | |
5373 | struct mvpp2_port_pcpu, | |
5374 | tx_done_timer); | |
5375 | ||
5376 | tasklet_schedule(&port_pcpu->tx_done_tasklet); | |
5377 | ||
5378 | return HRTIMER_NORESTART; | |
5379 | } | |
5380 | ||
3f518509 MW |
5381 | /* Main RX/TX processing routines */ |
5382 | ||
5383 | /* Display more error info */ | |
5384 | static void mvpp2_rx_error(struct mvpp2_port *port, | |
5385 | struct mvpp2_rx_desc *rx_desc) | |
5386 | { | |
ac3dd277 TP |
5387 | u32 status = mvpp2_rxdesc_status_get(port, rx_desc); |
5388 | size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); | |
3f518509 MW |
5389 | |
5390 | switch (status & MVPP2_RXD_ERR_CODE_MASK) { | |
5391 | case MVPP2_RXD_ERR_CRC: | |
ac3dd277 TP |
5392 | netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n", |
5393 | status, sz); | |
3f518509 MW |
5394 | break; |
5395 | case MVPP2_RXD_ERR_OVERRUN: | |
ac3dd277 TP |
5396 | netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n", |
5397 | status, sz); | |
3f518509 MW |
5398 | break; |
5399 | case MVPP2_RXD_ERR_RESOURCE: | |
ac3dd277 TP |
5400 | netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n", |
5401 | status, sz); | |
3f518509 MW |
5402 | break; |
5403 | } | |
5404 | } | |
5405 | ||
5406 | /* Handle RX checksum offload */ | |
5407 | static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status, | |
5408 | struct sk_buff *skb) | |
5409 | { | |
5410 | if (((status & MVPP2_RXD_L3_IP4) && | |
5411 | !(status & MVPP2_RXD_IP4_HEADER_ERR)) || | |
5412 | (status & MVPP2_RXD_L3_IP6)) | |
5413 | if (((status & MVPP2_RXD_L4_UDP) || | |
5414 | (status & MVPP2_RXD_L4_TCP)) && | |
5415 | (status & MVPP2_RXD_L4_CSUM_OK)) { | |
5416 | skb->csum = 0; | |
5417 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
5418 | return; | |
5419 | } | |
5420 | ||
5421 | skb->ip_summed = CHECKSUM_NONE; | |
5422 | } | |
5423 | ||
5424 | /* Reuse skb if possible, or allocate a new skb and add it to BM pool */ | |
5425 | static int mvpp2_rx_refill(struct mvpp2_port *port, | |
7ef7e1d9 | 5426 | struct mvpp2_bm_pool *bm_pool, u32 bm) |
3f518509 | 5427 | { |
20396136 | 5428 | dma_addr_t dma_addr; |
4e4a105f | 5429 | phys_addr_t phys_addr; |
0e037281 | 5430 | void *buf; |
3f518509 | 5431 | |
3f518509 | 5432 | /* No recycle or too many buffers are in use, so allocate a new skb */ |
4e4a105f TP |
5433 | buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr, |
5434 | GFP_ATOMIC); | |
0e037281 | 5435 | if (!buf) |
3f518509 MW |
5436 | return -ENOMEM; |
5437 | ||
4e4a105f | 5438 | mvpp2_pool_refill(port, bm, dma_addr, phys_addr); |
7ef7e1d9 | 5439 | |
3f518509 MW |
5440 | return 0; |
5441 | } | |
5442 | ||
5443 | /* Handle tx checksum */ | |
5444 | static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) | |
5445 | { | |
5446 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
5447 | int ip_hdr_len = 0; | |
5448 | u8 l4_proto; | |
5449 | ||
5450 | if (skb->protocol == htons(ETH_P_IP)) { | |
5451 | struct iphdr *ip4h = ip_hdr(skb); | |
5452 | ||
5453 | /* Calculate IPv4 checksum and L4 checksum */ | |
5454 | ip_hdr_len = ip4h->ihl; | |
5455 | l4_proto = ip4h->protocol; | |
5456 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | |
5457 | struct ipv6hdr *ip6h = ipv6_hdr(skb); | |
5458 | ||
5459 | /* Read l4_protocol from one of IPv6 extra headers */ | |
5460 | if (skb_network_header_len(skb) > 0) | |
5461 | ip_hdr_len = (skb_network_header_len(skb) >> 2); | |
5462 | l4_proto = ip6h->nexthdr; | |
5463 | } else { | |
5464 | return MVPP2_TXD_L4_CSUM_NOT; | |
5465 | } | |
5466 | ||
5467 | return mvpp2_txq_desc_csum(skb_network_offset(skb), | |
5468 | skb->protocol, ip_hdr_len, l4_proto); | |
5469 | } | |
5470 | ||
5471 | return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE; | |
5472 | } | |
5473 | ||
3f518509 MW |
5474 | /* Main rx processing */ |
5475 | static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, | |
5476 | struct mvpp2_rx_queue *rxq) | |
5477 | { | |
5478 | struct net_device *dev = port->dev; | |
b5015854 MW |
5479 | int rx_received; |
5480 | int rx_done = 0; | |
3f518509 MW |
5481 | u32 rcvd_pkts = 0; |
5482 | u32 rcvd_bytes = 0; | |
5483 | ||
5484 | /* Get number of received packets and clamp the to-do */ | |
5485 | rx_received = mvpp2_rxq_received(port, rxq->id); | |
5486 | if (rx_todo > rx_received) | |
5487 | rx_todo = rx_received; | |
5488 | ||
b5015854 | 5489 | while (rx_done < rx_todo) { |
3f518509 MW |
5490 | struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); |
5491 | struct mvpp2_bm_pool *bm_pool; | |
5492 | struct sk_buff *skb; | |
0e037281 | 5493 | unsigned int frag_size; |
20396136 | 5494 | dma_addr_t dma_addr; |
ac3dd277 | 5495 | phys_addr_t phys_addr; |
3f518509 MW |
5496 | u32 bm, rx_status; |
5497 | int pool, rx_bytes, err; | |
0e037281 | 5498 | void *data; |
3f518509 | 5499 | |
b5015854 | 5500 | rx_done++; |
ac3dd277 TP |
5501 | rx_status = mvpp2_rxdesc_status_get(port, rx_desc); |
5502 | rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); | |
5503 | rx_bytes -= MVPP2_MH_SIZE; | |
5504 | dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); | |
5505 | phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); | |
5506 | data = (void *)phys_to_virt(phys_addr); | |
5507 | ||
5508 | bm = mvpp2_bm_cookie_build(port, rx_desc); | |
3f518509 MW |
5509 | pool = mvpp2_bm_cookie_pool_get(bm); |
5510 | bm_pool = &port->priv->bm_pools[pool]; | |
3f518509 MW |
5511 | |
5512 | /* In case of an error, release the requested buffer pointer | |
5513 | * to the Buffer Manager. This request process is controlled | |
5514 | * by the hardware, and the information about the buffer is | |
5515 | * comprised by the RX descriptor. | |
5516 | */ | |
5517 | if (rx_status & MVPP2_RXD_ERR_SUMMARY) { | |
b5015854 | 5518 | err_drop_frame: |
3f518509 MW |
5519 | dev->stats.rx_errors++; |
5520 | mvpp2_rx_error(port, rx_desc); | |
b5015854 | 5521 | /* Return the buffer to the pool */ |
ac3dd277 | 5522 | mvpp2_pool_refill(port, bm, dma_addr, phys_addr); |
3f518509 MW |
5523 | continue; |
5524 | } | |
5525 | ||
0e037281 TP |
5526 | if (bm_pool->frag_size > PAGE_SIZE) |
5527 | frag_size = 0; | |
5528 | else | |
5529 | frag_size = bm_pool->frag_size; | |
5530 | ||
5531 | skb = build_skb(data, frag_size); | |
5532 | if (!skb) { | |
5533 | netdev_warn(port->dev, "skb build failed\n"); | |
5534 | goto err_drop_frame; | |
5535 | } | |
3f518509 | 5536 | |
7ef7e1d9 | 5537 | err = mvpp2_rx_refill(port, bm_pool, bm); |
b5015854 MW |
5538 | if (err) { |
5539 | netdev_err(port->dev, "failed to refill BM pools\n"); | |
5540 | goto err_drop_frame; | |
5541 | } | |
5542 | ||
20396136 | 5543 | dma_unmap_single(dev->dev.parent, dma_addr, |
4229d502 MW |
5544 | bm_pool->buf_size, DMA_FROM_DEVICE); |
5545 | ||
3f518509 MW |
5546 | rcvd_pkts++; |
5547 | rcvd_bytes += rx_bytes; | |
3f518509 | 5548 | |
0e037281 | 5549 | skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD); |
3f518509 MW |
5550 | skb_put(skb, rx_bytes); |
5551 | skb->protocol = eth_type_trans(skb, dev); | |
5552 | mvpp2_rx_csum(port, rx_status, skb); | |
5553 | ||
5554 | napi_gro_receive(&port->napi, skb); | |
3f518509 MW |
5555 | } |
5556 | ||
5557 | if (rcvd_pkts) { | |
5558 | struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); | |
5559 | ||
5560 | u64_stats_update_begin(&stats->syncp); | |
5561 | stats->rx_packets += rcvd_pkts; | |
5562 | stats->rx_bytes += rcvd_bytes; | |
5563 | u64_stats_update_end(&stats->syncp); | |
5564 | } | |
5565 | ||
5566 | /* Update Rx queue management counters */ | |
5567 | wmb(); | |
b5015854 | 5568 | mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done); |
3f518509 MW |
5569 | |
5570 | return rx_todo; | |
5571 | } | |
5572 | ||
5573 | static inline void | |
ac3dd277 | 5574 | tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, |
3f518509 MW |
5575 | struct mvpp2_tx_desc *desc) |
5576 | { | |
ac3dd277 TP |
5577 | dma_addr_t buf_dma_addr = |
5578 | mvpp2_txdesc_dma_addr_get(port, desc); | |
5579 | size_t buf_sz = | |
5580 | mvpp2_txdesc_size_get(port, desc); | |
5581 | dma_unmap_single(port->dev->dev.parent, buf_dma_addr, | |
5582 | buf_sz, DMA_TO_DEVICE); | |
3f518509 MW |
5583 | mvpp2_txq_desc_put(txq); |
5584 | } | |
5585 | ||
5586 | /* Handle tx fragmentation processing */ | |
5587 | static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, | |
5588 | struct mvpp2_tx_queue *aggr_txq, | |
5589 | struct mvpp2_tx_queue *txq) | |
5590 | { | |
5591 | struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); | |
5592 | struct mvpp2_tx_desc *tx_desc; | |
5593 | int i; | |
20396136 | 5594 | dma_addr_t buf_dma_addr; |
3f518509 MW |
5595 | |
5596 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
5597 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
5598 | void *addr = page_address(frag->page.p) + frag->page_offset; | |
5599 | ||
5600 | tx_desc = mvpp2_txq_next_desc_get(aggr_txq); | |
ac3dd277 TP |
5601 | mvpp2_txdesc_txq_set(port, tx_desc, txq->id); |
5602 | mvpp2_txdesc_size_set(port, tx_desc, frag->size); | |
3f518509 | 5603 | |
20396136 | 5604 | buf_dma_addr = dma_map_single(port->dev->dev.parent, addr, |
ac3dd277 TP |
5605 | frag->size, |
5606 | DMA_TO_DEVICE); | |
20396136 | 5607 | if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) { |
3f518509 MW |
5608 | mvpp2_txq_desc_put(txq); |
5609 | goto error; | |
5610 | } | |
5611 | ||
ac3dd277 TP |
5612 | mvpp2_txdesc_offset_set(port, tx_desc, |
5613 | buf_dma_addr & MVPP2_TX_DESC_ALIGN); | |
5614 | mvpp2_txdesc_dma_addr_set(port, tx_desc, | |
5615 | buf_dma_addr & ~MVPP2_TX_DESC_ALIGN); | |
3f518509 MW |
5616 | |
5617 | if (i == (skb_shinfo(skb)->nr_frags - 1)) { | |
5618 | /* Last descriptor */ | |
ac3dd277 TP |
5619 | mvpp2_txdesc_cmd_set(port, tx_desc, |
5620 | MVPP2_TXD_L_DESC); | |
5621 | mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); | |
3f518509 MW |
5622 | } else { |
5623 | /* Descriptor in the middle: Not First, Not Last */ | |
ac3dd277 TP |
5624 | mvpp2_txdesc_cmd_set(port, tx_desc, 0); |
5625 | mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); | |
3f518509 MW |
5626 | } |
5627 | } | |
5628 | ||
5629 | return 0; | |
5630 | ||
5631 | error: | |
5632 | /* Release all descriptors that were used to map fragments of | |
5633 | * this packet, as well as the corresponding DMA mappings | |
5634 | */ | |
5635 | for (i = i - 1; i >= 0; i--) { | |
5636 | tx_desc = txq->descs + i; | |
ac3dd277 | 5637 | tx_desc_unmap_put(port, txq, tx_desc); |
3f518509 MW |
5638 | } |
5639 | ||
5640 | return -ENOMEM; | |
5641 | } | |
5642 | ||
5643 | /* Main tx processing */ | |
5644 | static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) | |
5645 | { | |
5646 | struct mvpp2_port *port = netdev_priv(dev); | |
5647 | struct mvpp2_tx_queue *txq, *aggr_txq; | |
5648 | struct mvpp2_txq_pcpu *txq_pcpu; | |
5649 | struct mvpp2_tx_desc *tx_desc; | |
20396136 | 5650 | dma_addr_t buf_dma_addr; |
3f518509 MW |
5651 | int frags = 0; |
5652 | u16 txq_id; | |
5653 | u32 tx_cmd; | |
5654 | ||
5655 | txq_id = skb_get_queue_mapping(skb); | |
5656 | txq = port->txqs[txq_id]; | |
5657 | txq_pcpu = this_cpu_ptr(txq->pcpu); | |
5658 | aggr_txq = &port->priv->aggr_txqs[smp_processor_id()]; | |
5659 | ||
5660 | frags = skb_shinfo(skb)->nr_frags + 1; | |
5661 | ||
5662 | /* Check number of available descriptors */ | |
5663 | if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) || | |
5664 | mvpp2_txq_reserved_desc_num_proc(port->priv, txq, | |
5665 | txq_pcpu, frags)) { | |
5666 | frags = 0; | |
5667 | goto out; | |
5668 | } | |
5669 | ||
5670 | /* Get a descriptor for the first part of the packet */ | |
5671 | tx_desc = mvpp2_txq_next_desc_get(aggr_txq); | |
ac3dd277 TP |
5672 | mvpp2_txdesc_txq_set(port, tx_desc, txq->id); |
5673 | mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb)); | |
3f518509 | 5674 | |
20396136 | 5675 | buf_dma_addr = dma_map_single(dev->dev.parent, skb->data, |
ac3dd277 | 5676 | skb_headlen(skb), DMA_TO_DEVICE); |
20396136 | 5677 | if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { |
3f518509 MW |
5678 | mvpp2_txq_desc_put(txq); |
5679 | frags = 0; | |
5680 | goto out; | |
5681 | } | |
ac3dd277 TP |
5682 | |
5683 | mvpp2_txdesc_offset_set(port, tx_desc, | |
5684 | buf_dma_addr & MVPP2_TX_DESC_ALIGN); | |
5685 | mvpp2_txdesc_dma_addr_set(port, tx_desc, | |
5686 | buf_dma_addr & ~MVPP2_TX_DESC_ALIGN); | |
3f518509 MW |
5687 | |
5688 | tx_cmd = mvpp2_skb_tx_csum(port, skb); | |
5689 | ||
5690 | if (frags == 1) { | |
5691 | /* First and Last descriptor */ | |
5692 | tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; | |
ac3dd277 TP |
5693 | mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); |
5694 | mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); | |
3f518509 MW |
5695 | } else { |
5696 | /* First but not Last */ | |
5697 | tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; | |
ac3dd277 TP |
5698 | mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); |
5699 | mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); | |
3f518509 MW |
5700 | |
5701 | /* Continue with other skb fragments */ | |
5702 | if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { | |
ac3dd277 | 5703 | tx_desc_unmap_put(port, txq, tx_desc); |
3f518509 MW |
5704 | frags = 0; |
5705 | goto out; | |
5706 | } | |
5707 | } | |
5708 | ||
5709 | txq_pcpu->reserved_num -= frags; | |
5710 | txq_pcpu->count += frags; | |
5711 | aggr_txq->count += frags; | |
5712 | ||
5713 | /* Enable transmit */ | |
5714 | wmb(); | |
5715 | mvpp2_aggr_txq_pend_desc_add(port, frags); | |
5716 | ||
5717 | if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) { | |
5718 | struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); | |
5719 | ||
5720 | netif_tx_stop_queue(nq); | |
5721 | } | |
5722 | out: | |
5723 | if (frags > 0) { | |
5724 | struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); | |
5725 | ||
5726 | u64_stats_update_begin(&stats->syncp); | |
5727 | stats->tx_packets++; | |
5728 | stats->tx_bytes += skb->len; | |
5729 | u64_stats_update_end(&stats->syncp); | |
5730 | } else { | |
5731 | dev->stats.tx_dropped++; | |
5732 | dev_kfree_skb_any(skb); | |
5733 | } | |
5734 | ||
edc660fa MW |
5735 | /* Finalize TX processing */ |
5736 | if (txq_pcpu->count >= txq->done_pkts_coal) | |
5737 | mvpp2_txq_done(port, txq, txq_pcpu); | |
5738 | ||
5739 | /* Set the timer in case not all frags were processed */ | |
5740 | if (txq_pcpu->count <= frags && txq_pcpu->count > 0) { | |
5741 | struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); | |
5742 | ||
5743 | mvpp2_timer_set(port_pcpu); | |
5744 | } | |
5745 | ||
3f518509 MW |
5746 | return NETDEV_TX_OK; |
5747 | } | |
5748 | ||
5749 | static inline void mvpp2_cause_error(struct net_device *dev, int cause) | |
5750 | { | |
5751 | if (cause & MVPP2_CAUSE_FCS_ERR_MASK) | |
5752 | netdev_err(dev, "FCS error\n"); | |
5753 | if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK) | |
5754 | netdev_err(dev, "rx fifo overrun error\n"); | |
5755 | if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK) | |
5756 | netdev_err(dev, "tx fifo underrun error\n"); | |
5757 | } | |
5758 | ||
edc660fa | 5759 | static int mvpp2_poll(struct napi_struct *napi, int budget) |
3f518509 | 5760 | { |
edc660fa MW |
5761 | u32 cause_rx_tx, cause_rx, cause_misc; |
5762 | int rx_done = 0; | |
5763 | struct mvpp2_port *port = netdev_priv(napi->dev); | |
a786841d | 5764 | int cpu = smp_processor_id(); |
3f518509 MW |
5765 | |
5766 | /* Rx/Tx cause register | |
5767 | * | |
5768 | * Bits 0-15: each bit indicates received packets on the Rx queue | |
5769 | * (bit 0 is for Rx queue 0). | |
5770 | * | |
5771 | * Bits 16-23: each bit indicates transmitted packets on the Tx queue | |
5772 | * (bit 16 is for Tx queue 0). | |
5773 | * | |
5774 | * Each CPU has its own Rx/Tx cause register | |
5775 | */ | |
a786841d TP |
5776 | cause_rx_tx = mvpp2_percpu_read(port->priv, cpu, |
5777 | MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); | |
edc660fa | 5778 | cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; |
3f518509 MW |
5779 | cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; |
5780 | ||
5781 | if (cause_misc) { | |
5782 | mvpp2_cause_error(port->dev, cause_misc); | |
5783 | ||
5784 | /* Clear the cause register */ | |
5785 | mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0); | |
a786841d TP |
5786 | mvpp2_percpu_write(port->priv, cpu, |
5787 | MVPP2_ISR_RX_TX_CAUSE_REG(port->id), | |
5788 | cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); | |
3f518509 MW |
5789 | } |
5790 | ||
3f518509 MW |
5791 | cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; |
5792 | ||
5793 | /* Process RX packets */ | |
5794 | cause_rx |= port->pending_cause_rx; | |
5795 | while (cause_rx && budget > 0) { | |
5796 | int count; | |
5797 | struct mvpp2_rx_queue *rxq; | |
5798 | ||
5799 | rxq = mvpp2_get_rx_queue(port, cause_rx); | |
5800 | if (!rxq) | |
5801 | break; | |
5802 | ||
5803 | count = mvpp2_rx(port, budget, rxq); | |
5804 | rx_done += count; | |
5805 | budget -= count; | |
5806 | if (budget > 0) { | |
5807 | /* Clear the bit associated to this Rx queue | |
5808 | * so that next iteration will continue from | |
5809 | * the next Rx queue. | |
5810 | */ | |
5811 | cause_rx &= ~(1 << rxq->logic_rxq); | |
5812 | } | |
5813 | } | |
5814 | ||
5815 | if (budget > 0) { | |
5816 | cause_rx = 0; | |
6ad20165 | 5817 | napi_complete_done(napi, rx_done); |
3f518509 MW |
5818 | |
5819 | mvpp2_interrupts_enable(port); | |
5820 | } | |
5821 | port->pending_cause_rx = cause_rx; | |
5822 | return rx_done; | |
5823 | } | |
5824 | ||
5825 | /* Set hw internals when starting port */ | |
5826 | static void mvpp2_start_dev(struct mvpp2_port *port) | |
5827 | { | |
8e07269d PR |
5828 | struct net_device *ndev = port->dev; |
5829 | ||
3f518509 MW |
5830 | mvpp2_gmac_max_rx_size_set(port); |
5831 | mvpp2_txp_max_tx_size_set(port); | |
5832 | ||
5833 | napi_enable(&port->napi); | |
5834 | ||
5835 | /* Enable interrupts on all CPUs */ | |
5836 | mvpp2_interrupts_enable(port); | |
5837 | ||
5838 | mvpp2_port_enable(port); | |
8e07269d | 5839 | phy_start(ndev->phydev); |
3f518509 MW |
5840 | netif_tx_start_all_queues(port->dev); |
5841 | } | |
5842 | ||
5843 | /* Set hw internals when stopping port */ | |
5844 | static void mvpp2_stop_dev(struct mvpp2_port *port) | |
5845 | { | |
8e07269d PR |
5846 | struct net_device *ndev = port->dev; |
5847 | ||
3f518509 MW |
5848 | /* Stop new packets from arriving to RXQs */ |
5849 | mvpp2_ingress_disable(port); | |
5850 | ||
5851 | mdelay(10); | |
5852 | ||
5853 | /* Disable interrupts on all CPUs */ | |
5854 | mvpp2_interrupts_disable(port); | |
5855 | ||
5856 | napi_disable(&port->napi); | |
5857 | ||
5858 | netif_carrier_off(port->dev); | |
5859 | netif_tx_stop_all_queues(port->dev); | |
5860 | ||
5861 | mvpp2_egress_disable(port); | |
5862 | mvpp2_port_disable(port); | |
8e07269d | 5863 | phy_stop(ndev->phydev); |
3f518509 MW |
5864 | } |
5865 | ||
3f518509 MW |
5866 | static int mvpp2_check_ringparam_valid(struct net_device *dev, |
5867 | struct ethtool_ringparam *ring) | |
5868 | { | |
5869 | u16 new_rx_pending = ring->rx_pending; | |
5870 | u16 new_tx_pending = ring->tx_pending; | |
5871 | ||
5872 | if (ring->rx_pending == 0 || ring->tx_pending == 0) | |
5873 | return -EINVAL; | |
5874 | ||
5875 | if (ring->rx_pending > MVPP2_MAX_RXD) | |
5876 | new_rx_pending = MVPP2_MAX_RXD; | |
5877 | else if (!IS_ALIGNED(ring->rx_pending, 16)) | |
5878 | new_rx_pending = ALIGN(ring->rx_pending, 16); | |
5879 | ||
5880 | if (ring->tx_pending > MVPP2_MAX_TXD) | |
5881 | new_tx_pending = MVPP2_MAX_TXD; | |
5882 | else if (!IS_ALIGNED(ring->tx_pending, 32)) | |
5883 | new_tx_pending = ALIGN(ring->tx_pending, 32); | |
5884 | ||
5885 | if (ring->rx_pending != new_rx_pending) { | |
5886 | netdev_info(dev, "illegal Rx ring size value %d, round to %d\n", | |
5887 | ring->rx_pending, new_rx_pending); | |
5888 | ring->rx_pending = new_rx_pending; | |
5889 | } | |
5890 | ||
5891 | if (ring->tx_pending != new_tx_pending) { | |
5892 | netdev_info(dev, "illegal Tx ring size value %d, round to %d\n", | |
5893 | ring->tx_pending, new_tx_pending); | |
5894 | ring->tx_pending = new_tx_pending; | |
5895 | } | |
5896 | ||
5897 | return 0; | |
5898 | } | |
5899 | ||
26975821 | 5900 | static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr) |
3f518509 MW |
5901 | { |
5902 | u32 mac_addr_l, mac_addr_m, mac_addr_h; | |
5903 | ||
5904 | mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG); | |
5905 | mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE); | |
5906 | mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH); | |
5907 | addr[0] = (mac_addr_h >> 24) & 0xFF; | |
5908 | addr[1] = (mac_addr_h >> 16) & 0xFF; | |
5909 | addr[2] = (mac_addr_h >> 8) & 0xFF; | |
5910 | addr[3] = mac_addr_h & 0xFF; | |
5911 | addr[4] = mac_addr_m & 0xFF; | |
5912 | addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF; | |
5913 | } | |
5914 | ||
5915 | static int mvpp2_phy_connect(struct mvpp2_port *port) | |
5916 | { | |
5917 | struct phy_device *phy_dev; | |
5918 | ||
5919 | phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0, | |
5920 | port->phy_interface); | |
5921 | if (!phy_dev) { | |
5922 | netdev_err(port->dev, "cannot connect to phy\n"); | |
5923 | return -ENODEV; | |
5924 | } | |
5925 | phy_dev->supported &= PHY_GBIT_FEATURES; | |
5926 | phy_dev->advertising = phy_dev->supported; | |
5927 | ||
3f518509 MW |
5928 | port->link = 0; |
5929 | port->duplex = 0; | |
5930 | port->speed = 0; | |
5931 | ||
5932 | return 0; | |
5933 | } | |
5934 | ||
5935 | static void mvpp2_phy_disconnect(struct mvpp2_port *port) | |
5936 | { | |
8e07269d PR |
5937 | struct net_device *ndev = port->dev; |
5938 | ||
5939 | phy_disconnect(ndev->phydev); | |
3f518509 MW |
5940 | } |
5941 | ||
5942 | static int mvpp2_open(struct net_device *dev) | |
5943 | { | |
5944 | struct mvpp2_port *port = netdev_priv(dev); | |
5945 | unsigned char mac_bcast[ETH_ALEN] = { | |
5946 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; | |
5947 | int err; | |
5948 | ||
5949 | err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true); | |
5950 | if (err) { | |
5951 | netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n"); | |
5952 | return err; | |
5953 | } | |
5954 | err = mvpp2_prs_mac_da_accept(port->priv, port->id, | |
5955 | dev->dev_addr, true); | |
5956 | if (err) { | |
5957 | netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n"); | |
5958 | return err; | |
5959 | } | |
5960 | err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH); | |
5961 | if (err) { | |
5962 | netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n"); | |
5963 | return err; | |
5964 | } | |
5965 | err = mvpp2_prs_def_flow(port); | |
5966 | if (err) { | |
5967 | netdev_err(dev, "mvpp2_prs_def_flow failed\n"); | |
5968 | return err; | |
5969 | } | |
5970 | ||
5971 | /* Allocate the Rx/Tx queues */ | |
5972 | err = mvpp2_setup_rxqs(port); | |
5973 | if (err) { | |
5974 | netdev_err(port->dev, "cannot allocate Rx queues\n"); | |
5975 | return err; | |
5976 | } | |
5977 | ||
5978 | err = mvpp2_setup_txqs(port); | |
5979 | if (err) { | |
5980 | netdev_err(port->dev, "cannot allocate Tx queues\n"); | |
5981 | goto err_cleanup_rxqs; | |
5982 | } | |
5983 | ||
5984 | err = request_irq(port->irq, mvpp2_isr, 0, dev->name, port); | |
5985 | if (err) { | |
5986 | netdev_err(port->dev, "cannot request IRQ %d\n", port->irq); | |
5987 | goto err_cleanup_txqs; | |
5988 | } | |
5989 | ||
5990 | /* In default link is down */ | |
5991 | netif_carrier_off(port->dev); | |
5992 | ||
5993 | err = mvpp2_phy_connect(port); | |
5994 | if (err < 0) | |
5995 | goto err_free_irq; | |
5996 | ||
5997 | /* Unmask interrupts on all CPUs */ | |
5998 | on_each_cpu(mvpp2_interrupts_unmask, port, 1); | |
5999 | ||
6000 | mvpp2_start_dev(port); | |
6001 | ||
6002 | return 0; | |
6003 | ||
6004 | err_free_irq: | |
6005 | free_irq(port->irq, port); | |
6006 | err_cleanup_txqs: | |
6007 | mvpp2_cleanup_txqs(port); | |
6008 | err_cleanup_rxqs: | |
6009 | mvpp2_cleanup_rxqs(port); | |
6010 | return err; | |
6011 | } | |
6012 | ||
6013 | static int mvpp2_stop(struct net_device *dev) | |
6014 | { | |
6015 | struct mvpp2_port *port = netdev_priv(dev); | |
edc660fa MW |
6016 | struct mvpp2_port_pcpu *port_pcpu; |
6017 | int cpu; | |
3f518509 MW |
6018 | |
6019 | mvpp2_stop_dev(port); | |
6020 | mvpp2_phy_disconnect(port); | |
6021 | ||
6022 | /* Mask interrupts on all CPUs */ | |
6023 | on_each_cpu(mvpp2_interrupts_mask, port, 1); | |
6024 | ||
6025 | free_irq(port->irq, port); | |
edc660fa MW |
6026 | for_each_present_cpu(cpu) { |
6027 | port_pcpu = per_cpu_ptr(port->pcpu, cpu); | |
6028 | ||
6029 | hrtimer_cancel(&port_pcpu->tx_done_timer); | |
6030 | port_pcpu->timer_scheduled = false; | |
6031 | tasklet_kill(&port_pcpu->tx_done_tasklet); | |
6032 | } | |
3f518509 MW |
6033 | mvpp2_cleanup_rxqs(port); |
6034 | mvpp2_cleanup_txqs(port); | |
6035 | ||
6036 | return 0; | |
6037 | } | |
6038 | ||
6039 | static void mvpp2_set_rx_mode(struct net_device *dev) | |
6040 | { | |
6041 | struct mvpp2_port *port = netdev_priv(dev); | |
6042 | struct mvpp2 *priv = port->priv; | |
6043 | struct netdev_hw_addr *ha; | |
6044 | int id = port->id; | |
6045 | bool allmulti = dev->flags & IFF_ALLMULTI; | |
6046 | ||
6047 | mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC); | |
6048 | mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti); | |
6049 | mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti); | |
6050 | ||
6051 | /* Remove all port->id's mcast enries */ | |
6052 | mvpp2_prs_mcast_del_all(priv, id); | |
6053 | ||
6054 | if (allmulti && !netdev_mc_empty(dev)) { | |
6055 | netdev_for_each_mc_addr(ha, dev) | |
6056 | mvpp2_prs_mac_da_accept(priv, id, ha->addr, true); | |
6057 | } | |
6058 | } | |
6059 | ||
6060 | static int mvpp2_set_mac_address(struct net_device *dev, void *p) | |
6061 | { | |
6062 | struct mvpp2_port *port = netdev_priv(dev); | |
6063 | const struct sockaddr *addr = p; | |
6064 | int err; | |
6065 | ||
6066 | if (!is_valid_ether_addr(addr->sa_data)) { | |
6067 | err = -EADDRNOTAVAIL; | |
6068 | goto error; | |
6069 | } | |
6070 | ||
6071 | if (!netif_running(dev)) { | |
6072 | err = mvpp2_prs_update_mac_da(dev, addr->sa_data); | |
6073 | if (!err) | |
6074 | return 0; | |
6075 | /* Reconfigure parser to accept the original MAC address */ | |
6076 | err = mvpp2_prs_update_mac_da(dev, dev->dev_addr); | |
6077 | if (err) | |
6078 | goto error; | |
6079 | } | |
6080 | ||
6081 | mvpp2_stop_dev(port); | |
6082 | ||
6083 | err = mvpp2_prs_update_mac_da(dev, addr->sa_data); | |
6084 | if (!err) | |
6085 | goto out_start; | |
6086 | ||
6087 | /* Reconfigure parser accept the original MAC address */ | |
6088 | err = mvpp2_prs_update_mac_da(dev, dev->dev_addr); | |
6089 | if (err) | |
6090 | goto error; | |
6091 | out_start: | |
6092 | mvpp2_start_dev(port); | |
6093 | mvpp2_egress_enable(port); | |
6094 | mvpp2_ingress_enable(port); | |
6095 | return 0; | |
6096 | ||
6097 | error: | |
6098 | netdev_err(dev, "fail to change MAC address\n"); | |
6099 | return err; | |
6100 | } | |
6101 | ||
6102 | static int mvpp2_change_mtu(struct net_device *dev, int mtu) | |
6103 | { | |
6104 | struct mvpp2_port *port = netdev_priv(dev); | |
6105 | int err; | |
6106 | ||
5777987e JW |
6107 | if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) { |
6108 | netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu, | |
6109 | ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8)); | |
6110 | mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); | |
3f518509 MW |
6111 | } |
6112 | ||
6113 | if (!netif_running(dev)) { | |
6114 | err = mvpp2_bm_update_mtu(dev, mtu); | |
6115 | if (!err) { | |
6116 | port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); | |
6117 | return 0; | |
6118 | } | |
6119 | ||
6120 | /* Reconfigure BM to the original MTU */ | |
6121 | err = mvpp2_bm_update_mtu(dev, dev->mtu); | |
6122 | if (err) | |
6123 | goto error; | |
6124 | } | |
6125 | ||
6126 | mvpp2_stop_dev(port); | |
6127 | ||
6128 | err = mvpp2_bm_update_mtu(dev, mtu); | |
6129 | if (!err) { | |
6130 | port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); | |
6131 | goto out_start; | |
6132 | } | |
6133 | ||
6134 | /* Reconfigure BM to the original MTU */ | |
6135 | err = mvpp2_bm_update_mtu(dev, dev->mtu); | |
6136 | if (err) | |
6137 | goto error; | |
6138 | ||
6139 | out_start: | |
6140 | mvpp2_start_dev(port); | |
6141 | mvpp2_egress_enable(port); | |
6142 | mvpp2_ingress_enable(port); | |
6143 | ||
6144 | return 0; | |
6145 | ||
6146 | error: | |
6147 | netdev_err(dev, "fail to change MTU\n"); | |
6148 | return err; | |
6149 | } | |
6150 | ||
bc1f4470 | 6151 | static void |
3f518509 MW |
6152 | mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) |
6153 | { | |
6154 | struct mvpp2_port *port = netdev_priv(dev); | |
6155 | unsigned int start; | |
6156 | int cpu; | |
6157 | ||
6158 | for_each_possible_cpu(cpu) { | |
6159 | struct mvpp2_pcpu_stats *cpu_stats; | |
6160 | u64 rx_packets; | |
6161 | u64 rx_bytes; | |
6162 | u64 tx_packets; | |
6163 | u64 tx_bytes; | |
6164 | ||
6165 | cpu_stats = per_cpu_ptr(port->stats, cpu); | |
6166 | do { | |
6167 | start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); | |
6168 | rx_packets = cpu_stats->rx_packets; | |
6169 | rx_bytes = cpu_stats->rx_bytes; | |
6170 | tx_packets = cpu_stats->tx_packets; | |
6171 | tx_bytes = cpu_stats->tx_bytes; | |
6172 | } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); | |
6173 | ||
6174 | stats->rx_packets += rx_packets; | |
6175 | stats->rx_bytes += rx_bytes; | |
6176 | stats->tx_packets += tx_packets; | |
6177 | stats->tx_bytes += tx_bytes; | |
6178 | } | |
6179 | ||
6180 | stats->rx_errors = dev->stats.rx_errors; | |
6181 | stats->rx_dropped = dev->stats.rx_dropped; | |
6182 | stats->tx_dropped = dev->stats.tx_dropped; | |
3f518509 MW |
6183 | } |
6184 | ||
bd695a5f TP |
6185 | static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
6186 | { | |
bd695a5f TP |
6187 | int ret; |
6188 | ||
8e07269d | 6189 | if (!dev->phydev) |
bd695a5f TP |
6190 | return -ENOTSUPP; |
6191 | ||
8e07269d | 6192 | ret = phy_mii_ioctl(dev->phydev, ifr, cmd); |
bd695a5f TP |
6193 | if (!ret) |
6194 | mvpp2_link_event(dev); | |
6195 | ||
6196 | return ret; | |
6197 | } | |
6198 | ||
3f518509 MW |
6199 | /* Ethtool methods */ |
6200 | ||
3f518509 MW |
6201 | /* Set interrupt coalescing for ethtools */ |
6202 | static int mvpp2_ethtool_set_coalesce(struct net_device *dev, | |
6203 | struct ethtool_coalesce *c) | |
6204 | { | |
6205 | struct mvpp2_port *port = netdev_priv(dev); | |
6206 | int queue; | |
6207 | ||
6208 | for (queue = 0; queue < rxq_number; queue++) { | |
6209 | struct mvpp2_rx_queue *rxq = port->rxqs[queue]; | |
6210 | ||
6211 | rxq->time_coal = c->rx_coalesce_usecs; | |
6212 | rxq->pkts_coal = c->rx_max_coalesced_frames; | |
d63f9e41 TP |
6213 | mvpp2_rx_pkts_coal_set(port, rxq); |
6214 | mvpp2_rx_time_coal_set(port, rxq); | |
3f518509 MW |
6215 | } |
6216 | ||
6217 | for (queue = 0; queue < txq_number; queue++) { | |
6218 | struct mvpp2_tx_queue *txq = port->txqs[queue]; | |
6219 | ||
6220 | txq->done_pkts_coal = c->tx_max_coalesced_frames; | |
6221 | } | |
6222 | ||
3f518509 MW |
6223 | return 0; |
6224 | } | |
6225 | ||
6226 | /* get coalescing for ethtools */ | |
6227 | static int mvpp2_ethtool_get_coalesce(struct net_device *dev, | |
6228 | struct ethtool_coalesce *c) | |
6229 | { | |
6230 | struct mvpp2_port *port = netdev_priv(dev); | |
6231 | ||
6232 | c->rx_coalesce_usecs = port->rxqs[0]->time_coal; | |
6233 | c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal; | |
6234 | c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal; | |
6235 | return 0; | |
6236 | } | |
6237 | ||
6238 | static void mvpp2_ethtool_get_drvinfo(struct net_device *dev, | |
6239 | struct ethtool_drvinfo *drvinfo) | |
6240 | { | |
6241 | strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME, | |
6242 | sizeof(drvinfo->driver)); | |
6243 | strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION, | |
6244 | sizeof(drvinfo->version)); | |
6245 | strlcpy(drvinfo->bus_info, dev_name(&dev->dev), | |
6246 | sizeof(drvinfo->bus_info)); | |
6247 | } | |
6248 | ||
6249 | static void mvpp2_ethtool_get_ringparam(struct net_device *dev, | |
6250 | struct ethtool_ringparam *ring) | |
6251 | { | |
6252 | struct mvpp2_port *port = netdev_priv(dev); | |
6253 | ||
6254 | ring->rx_max_pending = MVPP2_MAX_RXD; | |
6255 | ring->tx_max_pending = MVPP2_MAX_TXD; | |
6256 | ring->rx_pending = port->rx_ring_size; | |
6257 | ring->tx_pending = port->tx_ring_size; | |
6258 | } | |
6259 | ||
6260 | static int mvpp2_ethtool_set_ringparam(struct net_device *dev, | |
6261 | struct ethtool_ringparam *ring) | |
6262 | { | |
6263 | struct mvpp2_port *port = netdev_priv(dev); | |
6264 | u16 prev_rx_ring_size = port->rx_ring_size; | |
6265 | u16 prev_tx_ring_size = port->tx_ring_size; | |
6266 | int err; | |
6267 | ||
6268 | err = mvpp2_check_ringparam_valid(dev, ring); | |
6269 | if (err) | |
6270 | return err; | |
6271 | ||
6272 | if (!netif_running(dev)) { | |
6273 | port->rx_ring_size = ring->rx_pending; | |
6274 | port->tx_ring_size = ring->tx_pending; | |
6275 | return 0; | |
6276 | } | |
6277 | ||
6278 | /* The interface is running, so we have to force a | |
6279 | * reallocation of the queues | |
6280 | */ | |
6281 | mvpp2_stop_dev(port); | |
6282 | mvpp2_cleanup_rxqs(port); | |
6283 | mvpp2_cleanup_txqs(port); | |
6284 | ||
6285 | port->rx_ring_size = ring->rx_pending; | |
6286 | port->tx_ring_size = ring->tx_pending; | |
6287 | ||
6288 | err = mvpp2_setup_rxqs(port); | |
6289 | if (err) { | |
6290 | /* Reallocate Rx queues with the original ring size */ | |
6291 | port->rx_ring_size = prev_rx_ring_size; | |
6292 | ring->rx_pending = prev_rx_ring_size; | |
6293 | err = mvpp2_setup_rxqs(port); | |
6294 | if (err) | |
6295 | goto err_out; | |
6296 | } | |
6297 | err = mvpp2_setup_txqs(port); | |
6298 | if (err) { | |
6299 | /* Reallocate Tx queues with the original ring size */ | |
6300 | port->tx_ring_size = prev_tx_ring_size; | |
6301 | ring->tx_pending = prev_tx_ring_size; | |
6302 | err = mvpp2_setup_txqs(port); | |
6303 | if (err) | |
6304 | goto err_clean_rxqs; | |
6305 | } | |
6306 | ||
6307 | mvpp2_start_dev(port); | |
6308 | mvpp2_egress_enable(port); | |
6309 | mvpp2_ingress_enable(port); | |
6310 | ||
6311 | return 0; | |
6312 | ||
6313 | err_clean_rxqs: | |
6314 | mvpp2_cleanup_rxqs(port); | |
6315 | err_out: | |
6316 | netdev_err(dev, "fail to change ring parameters"); | |
6317 | return err; | |
6318 | } | |
6319 | ||
6320 | /* Device ops */ | |
6321 | ||
6322 | static const struct net_device_ops mvpp2_netdev_ops = { | |
6323 | .ndo_open = mvpp2_open, | |
6324 | .ndo_stop = mvpp2_stop, | |
6325 | .ndo_start_xmit = mvpp2_tx, | |
6326 | .ndo_set_rx_mode = mvpp2_set_rx_mode, | |
6327 | .ndo_set_mac_address = mvpp2_set_mac_address, | |
6328 | .ndo_change_mtu = mvpp2_change_mtu, | |
6329 | .ndo_get_stats64 = mvpp2_get_stats64, | |
bd695a5f | 6330 | .ndo_do_ioctl = mvpp2_ioctl, |
3f518509 MW |
6331 | }; |
6332 | ||
6333 | static const struct ethtool_ops mvpp2_eth_tool_ops = { | |
00606c49 | 6334 | .nway_reset = phy_ethtool_nway_reset, |
3f518509 | 6335 | .get_link = ethtool_op_get_link, |
3f518509 MW |
6336 | .set_coalesce = mvpp2_ethtool_set_coalesce, |
6337 | .get_coalesce = mvpp2_ethtool_get_coalesce, | |
6338 | .get_drvinfo = mvpp2_ethtool_get_drvinfo, | |
6339 | .get_ringparam = mvpp2_ethtool_get_ringparam, | |
6340 | .set_ringparam = mvpp2_ethtool_set_ringparam, | |
fb773e97 PR |
6341 | .get_link_ksettings = phy_ethtool_get_link_ksettings, |
6342 | .set_link_ksettings = phy_ethtool_set_link_ksettings, | |
3f518509 MW |
6343 | }; |
6344 | ||
3f518509 MW |
6345 | /* Initialize port HW */ |
6346 | static int mvpp2_port_init(struct mvpp2_port *port) | |
6347 | { | |
6348 | struct device *dev = port->dev->dev.parent; | |
6349 | struct mvpp2 *priv = port->priv; | |
6350 | struct mvpp2_txq_pcpu *txq_pcpu; | |
6351 | int queue, cpu, err; | |
6352 | ||
59b9a31e TP |
6353 | if (port->first_rxq + rxq_number > |
6354 | MVPP2_MAX_PORTS * priv->max_port_rxqs) | |
3f518509 MW |
6355 | return -EINVAL; |
6356 | ||
6357 | /* Disable port */ | |
6358 | mvpp2_egress_disable(port); | |
6359 | mvpp2_port_disable(port); | |
6360 | ||
6361 | port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs), | |
6362 | GFP_KERNEL); | |
6363 | if (!port->txqs) | |
6364 | return -ENOMEM; | |
6365 | ||
6366 | /* Associate physical Tx queues to this port and initialize. | |
6367 | * The mapping is predefined. | |
6368 | */ | |
6369 | for (queue = 0; queue < txq_number; queue++) { | |
6370 | int queue_phy_id = mvpp2_txq_phys(port->id, queue); | |
6371 | struct mvpp2_tx_queue *txq; | |
6372 | ||
6373 | txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); | |
177c8d1c CJ |
6374 | if (!txq) { |
6375 | err = -ENOMEM; | |
6376 | goto err_free_percpu; | |
6377 | } | |
3f518509 MW |
6378 | |
6379 | txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu); | |
6380 | if (!txq->pcpu) { | |
6381 | err = -ENOMEM; | |
6382 | goto err_free_percpu; | |
6383 | } | |
6384 | ||
6385 | txq->id = queue_phy_id; | |
6386 | txq->log_id = queue; | |
6387 | txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; | |
6388 | for_each_present_cpu(cpu) { | |
6389 | txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); | |
6390 | txq_pcpu->cpu = cpu; | |
6391 | } | |
6392 | ||
6393 | port->txqs[queue] = txq; | |
6394 | } | |
6395 | ||
6396 | port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs), | |
6397 | GFP_KERNEL); | |
6398 | if (!port->rxqs) { | |
6399 | err = -ENOMEM; | |
6400 | goto err_free_percpu; | |
6401 | } | |
6402 | ||
6403 | /* Allocate and initialize Rx queue for this port */ | |
6404 | for (queue = 0; queue < rxq_number; queue++) { | |
6405 | struct mvpp2_rx_queue *rxq; | |
6406 | ||
6407 | /* Map physical Rx queue to port's logical Rx queue */ | |
6408 | rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); | |
d82b0c21 JZ |
6409 | if (!rxq) { |
6410 | err = -ENOMEM; | |
3f518509 | 6411 | goto err_free_percpu; |
d82b0c21 | 6412 | } |
3f518509 MW |
6413 | /* Map this Rx queue to a physical queue */ |
6414 | rxq->id = port->first_rxq + queue; | |
6415 | rxq->port = port->id; | |
6416 | rxq->logic_rxq = queue; | |
6417 | ||
6418 | port->rxqs[queue] = rxq; | |
6419 | } | |
6420 | ||
6421 | /* Configure Rx queue group interrupt for this port */ | |
a73fef10 TP |
6422 | if (priv->hw_version == MVPP21) { |
6423 | mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), | |
6424 | rxq_number); | |
6425 | } else { | |
6426 | u32 val; | |
6427 | ||
6428 | val = (port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET); | |
6429 | mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); | |
6430 | ||
6431 | val = (rxq_number << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET); | |
6432 | mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); | |
6433 | } | |
3f518509 MW |
6434 | |
6435 | /* Create Rx descriptor rings */ | |
6436 | for (queue = 0; queue < rxq_number; queue++) { | |
6437 | struct mvpp2_rx_queue *rxq = port->rxqs[queue]; | |
6438 | ||
6439 | rxq->size = port->rx_ring_size; | |
6440 | rxq->pkts_coal = MVPP2_RX_COAL_PKTS; | |
6441 | rxq->time_coal = MVPP2_RX_COAL_USEC; | |
6442 | } | |
6443 | ||
6444 | mvpp2_ingress_disable(port); | |
6445 | ||
6446 | /* Port default configuration */ | |
6447 | mvpp2_defaults_set(port); | |
6448 | ||
6449 | /* Port's classifier configuration */ | |
6450 | mvpp2_cls_oversize_rxq_set(port); | |
6451 | mvpp2_cls_port_config(port); | |
6452 | ||
6453 | /* Provide an initial Rx packet size */ | |
6454 | port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu); | |
6455 | ||
6456 | /* Initialize pools for swf */ | |
6457 | err = mvpp2_swf_bm_pool_init(port); | |
6458 | if (err) | |
6459 | goto err_free_percpu; | |
6460 | ||
6461 | return 0; | |
6462 | ||
6463 | err_free_percpu: | |
6464 | for (queue = 0; queue < txq_number; queue++) { | |
6465 | if (!port->txqs[queue]) | |
6466 | continue; | |
6467 | free_percpu(port->txqs[queue]->pcpu); | |
6468 | } | |
6469 | return err; | |
6470 | } | |
6471 | ||
6472 | /* Ports initialization */ | |
6473 | static int mvpp2_port_probe(struct platform_device *pdev, | |
6474 | struct device_node *port_node, | |
59b9a31e | 6475 | struct mvpp2 *priv) |
3f518509 MW |
6476 | { |
6477 | struct device_node *phy_node; | |
6478 | struct mvpp2_port *port; | |
edc660fa | 6479 | struct mvpp2_port_pcpu *port_pcpu; |
3f518509 MW |
6480 | struct net_device *dev; |
6481 | struct resource *res; | |
6482 | const char *dt_mac_addr; | |
6483 | const char *mac_from; | |
6484 | char hw_mac_addr[ETH_ALEN]; | |
6485 | u32 id; | |
6486 | int features; | |
6487 | int phy_mode; | |
edc660fa | 6488 | int err, i, cpu; |
3f518509 MW |
6489 | |
6490 | dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number, | |
6491 | rxq_number); | |
6492 | if (!dev) | |
6493 | return -ENOMEM; | |
6494 | ||
6495 | phy_node = of_parse_phandle(port_node, "phy", 0); | |
6496 | if (!phy_node) { | |
6497 | dev_err(&pdev->dev, "missing phy\n"); | |
6498 | err = -ENODEV; | |
6499 | goto err_free_netdev; | |
6500 | } | |
6501 | ||
6502 | phy_mode = of_get_phy_mode(port_node); | |
6503 | if (phy_mode < 0) { | |
6504 | dev_err(&pdev->dev, "incorrect phy mode\n"); | |
6505 | err = phy_mode; | |
6506 | goto err_free_netdev; | |
6507 | } | |
6508 | ||
6509 | if (of_property_read_u32(port_node, "port-id", &id)) { | |
6510 | err = -EINVAL; | |
6511 | dev_err(&pdev->dev, "missing port-id value\n"); | |
6512 | goto err_free_netdev; | |
6513 | } | |
6514 | ||
6515 | dev->tx_queue_len = MVPP2_MAX_TXD; | |
6516 | dev->watchdog_timeo = 5 * HZ; | |
6517 | dev->netdev_ops = &mvpp2_netdev_ops; | |
6518 | dev->ethtool_ops = &mvpp2_eth_tool_ops; | |
6519 | ||
6520 | port = netdev_priv(dev); | |
6521 | ||
6522 | port->irq = irq_of_parse_and_map(port_node, 0); | |
6523 | if (port->irq <= 0) { | |
6524 | err = -EINVAL; | |
6525 | goto err_free_netdev; | |
6526 | } | |
6527 | ||
6528 | if (of_property_read_bool(port_node, "marvell,loopback")) | |
6529 | port->flags |= MVPP2_F_LOOPBACK; | |
6530 | ||
6531 | port->priv = priv; | |
6532 | port->id = id; | |
59b9a31e TP |
6533 | if (priv->hw_version == MVPP21) |
6534 | port->first_rxq = port->id * rxq_number; | |
6535 | else | |
6536 | port->first_rxq = port->id * priv->max_port_rxqs; | |
6537 | ||
3f518509 MW |
6538 | port->phy_node = phy_node; |
6539 | port->phy_interface = phy_mode; | |
6540 | ||
a786841d TP |
6541 | if (priv->hw_version == MVPP21) { |
6542 | res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id); | |
6543 | port->base = devm_ioremap_resource(&pdev->dev, res); | |
6544 | if (IS_ERR(port->base)) { | |
6545 | err = PTR_ERR(port->base); | |
6546 | goto err_free_irq; | |
6547 | } | |
6548 | } else { | |
6549 | if (of_property_read_u32(port_node, "gop-port-id", | |
6550 | &port->gop_id)) { | |
6551 | err = -EINVAL; | |
6552 | dev_err(&pdev->dev, "missing gop-port-id value\n"); | |
6553 | goto err_free_irq; | |
6554 | } | |
6555 | ||
6556 | port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id); | |
3f518509 MW |
6557 | } |
6558 | ||
6559 | /* Alloc per-cpu stats */ | |
6560 | port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats); | |
6561 | if (!port->stats) { | |
6562 | err = -ENOMEM; | |
6563 | goto err_free_irq; | |
6564 | } | |
6565 | ||
6566 | dt_mac_addr = of_get_mac_address(port_node); | |
6567 | if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) { | |
6568 | mac_from = "device tree"; | |
6569 | ether_addr_copy(dev->dev_addr, dt_mac_addr); | |
6570 | } else { | |
26975821 TP |
6571 | if (priv->hw_version == MVPP21) |
6572 | mvpp21_get_mac_address(port, hw_mac_addr); | |
3f518509 MW |
6573 | if (is_valid_ether_addr(hw_mac_addr)) { |
6574 | mac_from = "hardware"; | |
6575 | ether_addr_copy(dev->dev_addr, hw_mac_addr); | |
6576 | } else { | |
6577 | mac_from = "random"; | |
6578 | eth_hw_addr_random(dev); | |
6579 | } | |
6580 | } | |
6581 | ||
6582 | port->tx_ring_size = MVPP2_MAX_TXD; | |
6583 | port->rx_ring_size = MVPP2_MAX_RXD; | |
6584 | port->dev = dev; | |
6585 | SET_NETDEV_DEV(dev, &pdev->dev); | |
6586 | ||
6587 | err = mvpp2_port_init(port); | |
6588 | if (err < 0) { | |
6589 | dev_err(&pdev->dev, "failed to init port %d\n", id); | |
6590 | goto err_free_stats; | |
6591 | } | |
26975821 TP |
6592 | |
6593 | mvpp2_port_mii_set(port); | |
6594 | mvpp2_port_periodic_xon_disable(port); | |
6595 | ||
6596 | if (priv->hw_version == MVPP21) | |
6597 | mvpp2_port_fc_adv_enable(port); | |
6598 | ||
6599 | mvpp2_port_reset(port); | |
3f518509 | 6600 | |
edc660fa MW |
6601 | port->pcpu = alloc_percpu(struct mvpp2_port_pcpu); |
6602 | if (!port->pcpu) { | |
6603 | err = -ENOMEM; | |
6604 | goto err_free_txq_pcpu; | |
6605 | } | |
6606 | ||
6607 | for_each_present_cpu(cpu) { | |
6608 | port_pcpu = per_cpu_ptr(port->pcpu, cpu); | |
6609 | ||
6610 | hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, | |
6611 | HRTIMER_MODE_REL_PINNED); | |
6612 | port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; | |
6613 | port_pcpu->timer_scheduled = false; | |
6614 | ||
6615 | tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb, | |
6616 | (unsigned long)dev); | |
6617 | } | |
6618 | ||
3f518509 MW |
6619 | netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT); |
6620 | features = NETIF_F_SG | NETIF_F_IP_CSUM; | |
6621 | dev->features = features | NETIF_F_RXCSUM; | |
6622 | dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO; | |
6623 | dev->vlan_features |= features; | |
6624 | ||
5777987e JW |
6625 | /* MTU range: 68 - 9676 */ |
6626 | dev->min_mtu = ETH_MIN_MTU; | |
6627 | /* 9676 == 9700 - 20 and rounding to 8 */ | |
6628 | dev->max_mtu = 9676; | |
6629 | ||
3f518509 MW |
6630 | err = register_netdev(dev); |
6631 | if (err < 0) { | |
6632 | dev_err(&pdev->dev, "failed to register netdev\n"); | |
edc660fa | 6633 | goto err_free_port_pcpu; |
3f518509 MW |
6634 | } |
6635 | netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); | |
6636 | ||
3f518509 MW |
6637 | priv->port_list[id] = port; |
6638 | return 0; | |
6639 | ||
edc660fa MW |
6640 | err_free_port_pcpu: |
6641 | free_percpu(port->pcpu); | |
3f518509 MW |
6642 | err_free_txq_pcpu: |
6643 | for (i = 0; i < txq_number; i++) | |
6644 | free_percpu(port->txqs[i]->pcpu); | |
6645 | err_free_stats: | |
6646 | free_percpu(port->stats); | |
6647 | err_free_irq: | |
6648 | irq_dispose_mapping(port->irq); | |
6649 | err_free_netdev: | |
ccb80393 | 6650 | of_node_put(phy_node); |
3f518509 MW |
6651 | free_netdev(dev); |
6652 | return err; | |
6653 | } | |
6654 | ||
6655 | /* Ports removal routine */ | |
6656 | static void mvpp2_port_remove(struct mvpp2_port *port) | |
6657 | { | |
6658 | int i; | |
6659 | ||
6660 | unregister_netdev(port->dev); | |
ccb80393 | 6661 | of_node_put(port->phy_node); |
edc660fa | 6662 | free_percpu(port->pcpu); |
3f518509 MW |
6663 | free_percpu(port->stats); |
6664 | for (i = 0; i < txq_number; i++) | |
6665 | free_percpu(port->txqs[i]->pcpu); | |
6666 | irq_dispose_mapping(port->irq); | |
6667 | free_netdev(port->dev); | |
6668 | } | |
6669 | ||
6670 | /* Initialize decoding windows */ | |
6671 | static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram, | |
6672 | struct mvpp2 *priv) | |
6673 | { | |
6674 | u32 win_enable; | |
6675 | int i; | |
6676 | ||
6677 | for (i = 0; i < 6; i++) { | |
6678 | mvpp2_write(priv, MVPP2_WIN_BASE(i), 0); | |
6679 | mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0); | |
6680 | ||
6681 | if (i < 4) | |
6682 | mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0); | |
6683 | } | |
6684 | ||
6685 | win_enable = 0; | |
6686 | ||
6687 | for (i = 0; i < dram->num_cs; i++) { | |
6688 | const struct mbus_dram_window *cs = dram->cs + i; | |
6689 | ||
6690 | mvpp2_write(priv, MVPP2_WIN_BASE(i), | |
6691 | (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | | |
6692 | dram->mbus_dram_target_id); | |
6693 | ||
6694 | mvpp2_write(priv, MVPP2_WIN_SIZE(i), | |
6695 | (cs->size - 1) & 0xffff0000); | |
6696 | ||
6697 | win_enable |= (1 << i); | |
6698 | } | |
6699 | ||
6700 | mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable); | |
6701 | } | |
6702 | ||
6703 | /* Initialize Rx FIFO's */ | |
6704 | static void mvpp2_rx_fifo_init(struct mvpp2 *priv) | |
6705 | { | |
6706 | int port; | |
6707 | ||
6708 | for (port = 0; port < MVPP2_MAX_PORTS; port++) { | |
6709 | mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), | |
6710 | MVPP2_RX_FIFO_PORT_DATA_SIZE); | |
6711 | mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), | |
6712 | MVPP2_RX_FIFO_PORT_ATTR_SIZE); | |
6713 | } | |
6714 | ||
6715 | mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, | |
6716 | MVPP2_RX_FIFO_PORT_MIN_PKT); | |
6717 | mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); | |
6718 | } | |
6719 | ||
6763ce31 TP |
6720 | static void mvpp2_axi_init(struct mvpp2 *priv) |
6721 | { | |
6722 | u32 val, rdval, wrval; | |
6723 | ||
6724 | mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0); | |
6725 | ||
6726 | /* AXI Bridge Configuration */ | |
6727 | ||
6728 | rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE | |
6729 | << MVPP22_AXI_ATTR_CACHE_OFFS; | |
6730 | rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM | |
6731 | << MVPP22_AXI_ATTR_DOMAIN_OFFS; | |
6732 | ||
6733 | wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE | |
6734 | << MVPP22_AXI_ATTR_CACHE_OFFS; | |
6735 | wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM | |
6736 | << MVPP22_AXI_ATTR_DOMAIN_OFFS; | |
6737 | ||
6738 | /* BM */ | |
6739 | mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval); | |
6740 | mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval); | |
6741 | ||
6742 | /* Descriptors */ | |
6743 | mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval); | |
6744 | mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval); | |
6745 | mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval); | |
6746 | mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval); | |
6747 | ||
6748 | /* Buffer Data */ | |
6749 | mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval); | |
6750 | mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval); | |
6751 | ||
6752 | val = MVPP22_AXI_CODE_CACHE_NON_CACHE | |
6753 | << MVPP22_AXI_CODE_CACHE_OFFS; | |
6754 | val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM | |
6755 | << MVPP22_AXI_CODE_DOMAIN_OFFS; | |
6756 | mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val); | |
6757 | mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val); | |
6758 | ||
6759 | val = MVPP22_AXI_CODE_CACHE_RD_CACHE | |
6760 | << MVPP22_AXI_CODE_CACHE_OFFS; | |
6761 | val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM | |
6762 | << MVPP22_AXI_CODE_DOMAIN_OFFS; | |
6763 | ||
6764 | mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val); | |
6765 | ||
6766 | val = MVPP22_AXI_CODE_CACHE_WR_CACHE | |
6767 | << MVPP22_AXI_CODE_CACHE_OFFS; | |
6768 | val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM | |
6769 | << MVPP22_AXI_CODE_DOMAIN_OFFS; | |
6770 | ||
6771 | mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val); | |
6772 | } | |
6773 | ||
3f518509 MW |
6774 | /* Initialize network controller common part HW */ |
6775 | static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) | |
6776 | { | |
6777 | const struct mbus_dram_target_info *dram_target_info; | |
6778 | int err, i; | |
08a23755 | 6779 | u32 val; |
3f518509 MW |
6780 | |
6781 | /* Checks for hardware constraints */ | |
59b9a31e | 6782 | if (rxq_number % 4 || (rxq_number > priv->max_port_rxqs) || |
3f518509 MW |
6783 | (txq_number > MVPP2_MAX_TXQ)) { |
6784 | dev_err(&pdev->dev, "invalid queue size parameter\n"); | |
6785 | return -EINVAL; | |
6786 | } | |
6787 | ||
6788 | /* MBUS windows configuration */ | |
6789 | dram_target_info = mv_mbus_dram_info(); | |
6790 | if (dram_target_info) | |
6791 | mvpp2_conf_mbus_windows(dram_target_info, priv); | |
6792 | ||
6763ce31 TP |
6793 | if (priv->hw_version == MVPP22) |
6794 | mvpp2_axi_init(priv); | |
6795 | ||
08a23755 | 6796 | /* Disable HW PHY polling */ |
26975821 TP |
6797 | if (priv->hw_version == MVPP21) { |
6798 | val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); | |
6799 | val |= MVPP2_PHY_AN_STOP_SMI0_MASK; | |
6800 | writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); | |
6801 | } else { | |
6802 | val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG); | |
6803 | val &= ~MVPP22_SMI_POLLING_EN; | |
6804 | writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG); | |
6805 | } | |
08a23755 | 6806 | |
3f518509 MW |
6807 | /* Allocate and initialize aggregated TXQs */ |
6808 | priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(), | |
6809 | sizeof(struct mvpp2_tx_queue), | |
6810 | GFP_KERNEL); | |
6811 | if (!priv->aggr_txqs) | |
6812 | return -ENOMEM; | |
6813 | ||
6814 | for_each_present_cpu(i) { | |
6815 | priv->aggr_txqs[i].id = i; | |
6816 | priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; | |
6817 | err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], | |
6818 | MVPP2_AGGR_TXQ_SIZE, i, priv); | |
6819 | if (err < 0) | |
6820 | return err; | |
6821 | } | |
6822 | ||
6823 | /* Rx Fifo Init */ | |
6824 | mvpp2_rx_fifo_init(priv); | |
6825 | ||
6826 | /* Reset Rx queue group interrupt configuration */ | |
a73fef10 TP |
6827 | for (i = 0; i < MVPP2_MAX_PORTS; i++) { |
6828 | if (priv->hw_version == MVPP21) { | |
6829 | mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(i), | |
6830 | rxq_number); | |
6831 | continue; | |
6832 | } else { | |
6833 | u32 val; | |
6834 | ||
6835 | val = (i << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET); | |
6836 | mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); | |
6837 | ||
6838 | val = (rxq_number << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET); | |
6839 | mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); | |
6840 | } | |
6841 | } | |
3f518509 | 6842 | |
26975821 TP |
6843 | if (priv->hw_version == MVPP21) |
6844 | writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, | |
6845 | priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); | |
3f518509 MW |
6846 | |
6847 | /* Allow cache snoop when transmiting packets */ | |
6848 | mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); | |
6849 | ||
6850 | /* Buffer Manager initialization */ | |
6851 | err = mvpp2_bm_init(pdev, priv); | |
6852 | if (err < 0) | |
6853 | return err; | |
6854 | ||
6855 | /* Parser default initialization */ | |
6856 | err = mvpp2_prs_default_init(pdev, priv); | |
6857 | if (err < 0) | |
6858 | return err; | |
6859 | ||
6860 | /* Classifier default initialization */ | |
6861 | mvpp2_cls_init(priv); | |
6862 | ||
6863 | return 0; | |
6864 | } | |
6865 | ||
6866 | static int mvpp2_probe(struct platform_device *pdev) | |
6867 | { | |
6868 | struct device_node *dn = pdev->dev.of_node; | |
6869 | struct device_node *port_node; | |
6870 | struct mvpp2 *priv; | |
6871 | struct resource *res; | |
a786841d | 6872 | void __iomem *base; |
59b9a31e | 6873 | int port_count, cpu; |
3f518509 MW |
6874 | int err; |
6875 | ||
6876 | priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL); | |
6877 | if (!priv) | |
6878 | return -ENOMEM; | |
6879 | ||
faca9247 TP |
6880 | priv->hw_version = |
6881 | (unsigned long)of_device_get_match_data(&pdev->dev); | |
6882 | ||
3f518509 | 6883 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
a786841d TP |
6884 | base = devm_ioremap_resource(&pdev->dev, res); |
6885 | if (IS_ERR(base)) | |
6886 | return PTR_ERR(base); | |
6887 | ||
6888 | if (priv->hw_version == MVPP21) { | |
6889 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | |
6890 | priv->lms_base = devm_ioremap_resource(&pdev->dev, res); | |
6891 | if (IS_ERR(priv->lms_base)) | |
6892 | return PTR_ERR(priv->lms_base); | |
6893 | } else { | |
6894 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | |
6895 | priv->iface_base = devm_ioremap_resource(&pdev->dev, res); | |
6896 | if (IS_ERR(priv->iface_base)) | |
6897 | return PTR_ERR(priv->iface_base); | |
6898 | } | |
6899 | ||
6900 | for_each_present_cpu(cpu) { | |
6901 | u32 addr_space_sz; | |
6902 | ||
6903 | addr_space_sz = (priv->hw_version == MVPP21 ? | |
6904 | MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ); | |
6905 | priv->cpu_base[cpu] = base + cpu * addr_space_sz; | |
6906 | } | |
3f518509 | 6907 | |
59b9a31e TP |
6908 | if (priv->hw_version == MVPP21) |
6909 | priv->max_port_rxqs = 8; | |
6910 | else | |
6911 | priv->max_port_rxqs = 32; | |
6912 | ||
3f518509 MW |
6913 | priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk"); |
6914 | if (IS_ERR(priv->pp_clk)) | |
6915 | return PTR_ERR(priv->pp_clk); | |
6916 | err = clk_prepare_enable(priv->pp_clk); | |
6917 | if (err < 0) | |
6918 | return err; | |
6919 | ||
6920 | priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk"); | |
6921 | if (IS_ERR(priv->gop_clk)) { | |
6922 | err = PTR_ERR(priv->gop_clk); | |
6923 | goto err_pp_clk; | |
6924 | } | |
6925 | err = clk_prepare_enable(priv->gop_clk); | |
6926 | if (err < 0) | |
6927 | goto err_pp_clk; | |
6928 | ||
fceb55d4 TP |
6929 | if (priv->hw_version == MVPP22) { |
6930 | priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk"); | |
6931 | if (IS_ERR(priv->mg_clk)) { | |
6932 | err = PTR_ERR(priv->mg_clk); | |
6933 | goto err_gop_clk; | |
6934 | } | |
6935 | ||
6936 | err = clk_prepare_enable(priv->mg_clk); | |
6937 | if (err < 0) | |
6938 | goto err_gop_clk; | |
6939 | } | |
6940 | ||
3f518509 MW |
6941 | /* Get system's tclk rate */ |
6942 | priv->tclk = clk_get_rate(priv->pp_clk); | |
6943 | ||
2067e0a1 TP |
6944 | if (priv->hw_version == MVPP22) { |
6945 | err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); | |
6946 | if (err) | |
6947 | goto err_mg_clk; | |
6948 | /* Sadly, the BM pools all share the same register to | |
6949 | * store the high 32 bits of their address. So they | |
6950 | * must all have the same high 32 bits, which forces | |
6951 | * us to restrict coherent memory to DMA_BIT_MASK(32). | |
6952 | */ | |
6953 | err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); | |
6954 | if (err) | |
6955 | goto err_mg_clk; | |
6956 | } | |
6957 | ||
3f518509 MW |
6958 | /* Initialize network controller */ |
6959 | err = mvpp2_init(pdev, priv); | |
6960 | if (err < 0) { | |
6961 | dev_err(&pdev->dev, "failed to initialize controller\n"); | |
fceb55d4 | 6962 | goto err_mg_clk; |
3f518509 MW |
6963 | } |
6964 | ||
6965 | port_count = of_get_available_child_count(dn); | |
6966 | if (port_count == 0) { | |
6967 | dev_err(&pdev->dev, "no ports enabled\n"); | |
575a1935 | 6968 | err = -ENODEV; |
fceb55d4 | 6969 | goto err_mg_clk; |
3f518509 MW |
6970 | } |
6971 | ||
6972 | priv->port_list = devm_kcalloc(&pdev->dev, port_count, | |
6973 | sizeof(struct mvpp2_port *), | |
6974 | GFP_KERNEL); | |
6975 | if (!priv->port_list) { | |
6976 | err = -ENOMEM; | |
fceb55d4 | 6977 | goto err_mg_clk; |
3f518509 MW |
6978 | } |
6979 | ||
6980 | /* Initialize ports */ | |
3f518509 | 6981 | for_each_available_child_of_node(dn, port_node) { |
59b9a31e | 6982 | err = mvpp2_port_probe(pdev, port_node, priv); |
3f518509 | 6983 | if (err < 0) |
fceb55d4 | 6984 | goto err_mg_clk; |
3f518509 MW |
6985 | } |
6986 | ||
6987 | platform_set_drvdata(pdev, priv); | |
6988 | return 0; | |
6989 | ||
fceb55d4 TP |
6990 | err_mg_clk: |
6991 | if (priv->hw_version == MVPP22) | |
6992 | clk_disable_unprepare(priv->mg_clk); | |
3f518509 MW |
6993 | err_gop_clk: |
6994 | clk_disable_unprepare(priv->gop_clk); | |
6995 | err_pp_clk: | |
6996 | clk_disable_unprepare(priv->pp_clk); | |
6997 | return err; | |
6998 | } | |
6999 | ||
7000 | static int mvpp2_remove(struct platform_device *pdev) | |
7001 | { | |
7002 | struct mvpp2 *priv = platform_get_drvdata(pdev); | |
7003 | struct device_node *dn = pdev->dev.of_node; | |
7004 | struct device_node *port_node; | |
7005 | int i = 0; | |
7006 | ||
7007 | for_each_available_child_of_node(dn, port_node) { | |
7008 | if (priv->port_list[i]) | |
7009 | mvpp2_port_remove(priv->port_list[i]); | |
7010 | i++; | |
7011 | } | |
7012 | ||
7013 | for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { | |
7014 | struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; | |
7015 | ||
7016 | mvpp2_bm_pool_destroy(pdev, priv, bm_pool); | |
7017 | } | |
7018 | ||
7019 | for_each_present_cpu(i) { | |
7020 | struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i]; | |
7021 | ||
7022 | dma_free_coherent(&pdev->dev, | |
7023 | MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, | |
7024 | aggr_txq->descs, | |
20396136 | 7025 | aggr_txq->descs_dma); |
3f518509 MW |
7026 | } |
7027 | ||
fceb55d4 | 7028 | clk_disable_unprepare(priv->mg_clk); |
3f518509 MW |
7029 | clk_disable_unprepare(priv->pp_clk); |
7030 | clk_disable_unprepare(priv->gop_clk); | |
7031 | ||
7032 | return 0; | |
7033 | } | |
7034 | ||
7035 | static const struct of_device_id mvpp2_match[] = { | |
faca9247 TP |
7036 | { |
7037 | .compatible = "marvell,armada-375-pp2", | |
7038 | .data = (void *)MVPP21, | |
7039 | }, | |
fc5e1550 TP |
7040 | { |
7041 | .compatible = "marvell,armada-7k-pp22", | |
7042 | .data = (void *)MVPP22, | |
7043 | }, | |
3f518509 MW |
7044 | { } |
7045 | }; | |
7046 | MODULE_DEVICE_TABLE(of, mvpp2_match); | |
7047 | ||
7048 | static struct platform_driver mvpp2_driver = { | |
7049 | .probe = mvpp2_probe, | |
7050 | .remove = mvpp2_remove, | |
7051 | .driver = { | |
7052 | .name = MVPP2_DRIVER_NAME, | |
7053 | .of_match_table = mvpp2_match, | |
7054 | }, | |
7055 | }; | |
7056 | ||
7057 | module_platform_driver(mvpp2_driver); | |
7058 | ||
7059 | MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com"); | |
7060 | MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>"); | |
c634099d | 7061 | MODULE_LICENSE("GPL v2"); |