]>
Commit | Line | Data |
---|---|---|
e6a84624 GS |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Texas Instruments Ethernet Switch Driver | |
4 | * | |
5 | * Copyright (C) 2019 Texas Instruments | |
6 | */ | |
7 | ||
c5013ac1 GS |
8 | #include <linux/bpf.h> |
9 | #include <linux/bpf_trace.h> | |
e6a84624 GS |
10 | #include <linux/if_ether.h> |
11 | #include <linux/if_vlan.h> | |
c5013ac1 | 12 | #include <linux/kmemleak.h> |
e6a84624 GS |
13 | #include <linux/module.h> |
14 | #include <linux/netdevice.h> | |
c5013ac1 | 15 | #include <linux/net_tstamp.h> |
ed3525ed | 16 | #include <linux/of.h> |
e6a84624 GS |
17 | #include <linux/phy.h> |
18 | #include <linux/platform_device.h> | |
c5013ac1 | 19 | #include <linux/pm_runtime.h> |
e6a84624 | 20 | #include <linux/skbuff.h> |
c5013ac1 GS |
21 | #include <net/page_pool.h> |
22 | #include <net/pkt_cls.h> | |
e6a84624 | 23 | |
c5013ac1 | 24 | #include "cpsw.h" |
e6a84624 GS |
25 | #include "cpts.h" |
26 | #include "cpsw_ale.h" | |
27 | #include "cpsw_priv.h" | |
cfc08345 | 28 | #include "cpsw_sl.h" |
e6a84624 GS |
29 | #include "davinci_cpdma.h" |
30 | ||
b78aba49 GS |
31 | #define CPTS_N_ETX_TS 4 |
32 | ||
51a95337 GS |
33 | int (*cpsw_slave_index)(struct cpsw_common *cpsw, struct cpsw_priv *priv); |
34 | ||
c5013ac1 GS |
35 | void cpsw_intr_enable(struct cpsw_common *cpsw) |
36 | { | |
37 | writel_relaxed(0xFF, &cpsw->wr_regs->tx_en); | |
38 | writel_relaxed(0xFF, &cpsw->wr_regs->rx_en); | |
39 | ||
40 | cpdma_ctlr_int_ctrl(cpsw->dma, true); | |
41 | } | |
42 | ||
43 | void cpsw_intr_disable(struct cpsw_common *cpsw) | |
44 | { | |
45 | writel_relaxed(0, &cpsw->wr_regs->tx_en); | |
46 | writel_relaxed(0, &cpsw->wr_regs->rx_en); | |
47 | ||
48 | cpdma_ctlr_int_ctrl(cpsw->dma, false); | |
49 | } | |
50 | ||
51 | void cpsw_tx_handler(void *token, int len, int status) | |
52 | { | |
53 | struct cpsw_meta_xdp *xmeta; | |
54 | struct xdp_frame *xdpf; | |
55 | struct net_device *ndev; | |
56 | struct netdev_queue *txq; | |
57 | struct sk_buff *skb; | |
58 | int ch; | |
59 | ||
60 | if (cpsw_is_xdpf_handle(token)) { | |
61 | xdpf = cpsw_handle_to_xdpf(token); | |
62 | xmeta = (void *)xdpf + CPSW_XMETA_OFFSET; | |
63 | ndev = xmeta->ndev; | |
64 | ch = xmeta->ch; | |
65 | xdp_return_frame(xdpf); | |
66 | } else { | |
67 | skb = token; | |
68 | ndev = skb->dev; | |
69 | ch = skb_get_queue_mapping(skb); | |
70 | cpts_tx_timestamp(ndev_to_cpsw(ndev)->cpts, skb); | |
71 | dev_kfree_skb_any(skb); | |
72 | } | |
73 | ||
74 | /* Check whether the queue is stopped due to stalled tx dma, if the | |
75 | * queue is stopped then start the queue as we have free desc for tx | |
76 | */ | |
77 | txq = netdev_get_tx_queue(ndev, ch); | |
78 | if (unlikely(netif_tx_queue_stopped(txq))) | |
79 | netif_tx_wake_queue(txq); | |
80 | ||
81 | ndev->stats.tx_packets++; | |
82 | ndev->stats.tx_bytes += len; | |
83 | } | |
84 | ||
85 | irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id) | |
86 | { | |
87 | struct cpsw_common *cpsw = dev_id; | |
88 | ||
89 | writel(0, &cpsw->wr_regs->tx_en); | |
90 | cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX); | |
91 | ||
92 | if (cpsw->quirk_irq) { | |
93 | disable_irq_nosync(cpsw->irqs_table[1]); | |
94 | cpsw->tx_irq_disabled = true; | |
95 | } | |
96 | ||
97 | napi_schedule(&cpsw->napi_tx); | |
98 | return IRQ_HANDLED; | |
99 | } | |
100 | ||
101 | irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id) | |
102 | { | |
103 | struct cpsw_common *cpsw = dev_id; | |
104 | ||
c5013ac1 | 105 | writel(0, &cpsw->wr_regs->rx_en); |
51302f77 | 106 | cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX); |
c5013ac1 GS |
107 | |
108 | if (cpsw->quirk_irq) { | |
109 | disable_irq_nosync(cpsw->irqs_table[0]); | |
110 | cpsw->rx_irq_disabled = true; | |
111 | } | |
112 | ||
113 | napi_schedule(&cpsw->napi_rx); | |
114 | return IRQ_HANDLED; | |
115 | } | |
116 | ||
84ea9c0a GS |
117 | irqreturn_t cpsw_misc_interrupt(int irq, void *dev_id) |
118 | { | |
119 | struct cpsw_common *cpsw = dev_id; | |
120 | ||
121 | writel(0, &cpsw->wr_regs->misc_en); | |
122 | cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_MISC); | |
123 | cpts_misc_interrupt(cpsw->cpts); | |
124 | writel(0x10, &cpsw->wr_regs->misc_en); | |
125 | ||
126 | return IRQ_HANDLED; | |
127 | } | |
128 | ||
c5013ac1 GS |
129 | int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget) |
130 | { | |
131 | struct cpsw_common *cpsw = napi_to_cpsw(napi_tx); | |
132 | int num_tx, cur_budget, ch; | |
133 | u32 ch_map; | |
134 | struct cpsw_vector *txv; | |
135 | ||
136 | /* process every unprocessed channel */ | |
137 | ch_map = cpdma_ctrl_txchs_state(cpsw->dma); | |
138 | for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) { | |
139 | if (!(ch_map & 0x80)) | |
140 | continue; | |
141 | ||
142 | txv = &cpsw->txv[ch]; | |
143 | if (unlikely(txv->budget > budget - num_tx)) | |
144 | cur_budget = budget - num_tx; | |
145 | else | |
146 | cur_budget = txv->budget; | |
147 | ||
148 | num_tx += cpdma_chan_process(txv->ch, cur_budget); | |
149 | if (num_tx >= budget) | |
150 | break; | |
151 | } | |
152 | ||
153 | if (num_tx < budget) { | |
154 | napi_complete(napi_tx); | |
155 | writel(0xff, &cpsw->wr_regs->tx_en); | |
156 | } | |
157 | ||
158 | return num_tx; | |
159 | } | |
160 | ||
161 | int cpsw_tx_poll(struct napi_struct *napi_tx, int budget) | |
162 | { | |
163 | struct cpsw_common *cpsw = napi_to_cpsw(napi_tx); | |
164 | int num_tx; | |
165 | ||
166 | num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget); | |
167 | if (num_tx < budget) { | |
168 | napi_complete(napi_tx); | |
169 | writel(0xff, &cpsw->wr_regs->tx_en); | |
170 | if (cpsw->tx_irq_disabled) { | |
171 | cpsw->tx_irq_disabled = false; | |
172 | enable_irq(cpsw->irqs_table[1]); | |
173 | } | |
174 | } | |
175 | ||
176 | return num_tx; | |
177 | } | |
178 | ||
179 | int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget) | |
180 | { | |
181 | struct cpsw_common *cpsw = napi_to_cpsw(napi_rx); | |
182 | int num_rx, cur_budget, ch; | |
183 | u32 ch_map; | |
184 | struct cpsw_vector *rxv; | |
185 | ||
186 | /* process every unprocessed channel */ | |
187 | ch_map = cpdma_ctrl_rxchs_state(cpsw->dma); | |
188 | for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) { | |
189 | if (!(ch_map & 0x01)) | |
190 | continue; | |
191 | ||
192 | rxv = &cpsw->rxv[ch]; | |
193 | if (unlikely(rxv->budget > budget - num_rx)) | |
194 | cur_budget = budget - num_rx; | |
195 | else | |
196 | cur_budget = rxv->budget; | |
197 | ||
198 | num_rx += cpdma_chan_process(rxv->ch, cur_budget); | |
199 | if (num_rx >= budget) | |
200 | break; | |
201 | } | |
202 | ||
203 | if (num_rx < budget) { | |
204 | napi_complete_done(napi_rx, num_rx); | |
205 | writel(0xff, &cpsw->wr_regs->rx_en); | |
206 | } | |
207 | ||
208 | return num_rx; | |
209 | } | |
210 | ||
211 | int cpsw_rx_poll(struct napi_struct *napi_rx, int budget) | |
212 | { | |
213 | struct cpsw_common *cpsw = napi_to_cpsw(napi_rx); | |
214 | int num_rx; | |
215 | ||
216 | num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget); | |
217 | if (num_rx < budget) { | |
218 | napi_complete_done(napi_rx, num_rx); | |
219 | writel(0xff, &cpsw->wr_regs->rx_en); | |
220 | if (cpsw->rx_irq_disabled) { | |
221 | cpsw->rx_irq_disabled = false; | |
222 | enable_irq(cpsw->irqs_table[0]); | |
223 | } | |
224 | } | |
225 | ||
226 | return num_rx; | |
227 | } | |
228 | ||
229 | void cpsw_rx_vlan_encap(struct sk_buff *skb) | |
230 | { | |
231 | struct cpsw_priv *priv = netdev_priv(skb->dev); | |
232 | u32 rx_vlan_encap_hdr = *((u32 *)skb->data); | |
233 | struct cpsw_common *cpsw = priv->cpsw; | |
234 | u16 vtag, vid, prio, pkt_type; | |
235 | ||
236 | /* Remove VLAN header encapsulation word */ | |
237 | skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE); | |
238 | ||
239 | pkt_type = (rx_vlan_encap_hdr >> | |
240 | CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) & | |
241 | CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK; | |
242 | /* Ignore unknown & Priority-tagged packets*/ | |
243 | if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV || | |
244 | pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG) | |
245 | return; | |
246 | ||
247 | vid = (rx_vlan_encap_hdr >> | |
248 | CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) & | |
249 | VLAN_VID_MASK; | |
250 | /* Ignore vid 0 and pass packet as is */ | |
251 | if (!vid) | |
252 | return; | |
253 | ||
254 | /* Untag P0 packets if set for vlan */ | |
255 | if (!cpsw_ale_get_vlan_p0_untag(cpsw->ale, vid)) { | |
256 | prio = (rx_vlan_encap_hdr >> | |
257 | CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) & | |
258 | CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK; | |
259 | ||
260 | vtag = (prio << VLAN_PRIO_SHIFT) | vid; | |
261 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag); | |
262 | } | |
263 | ||
264 | /* strip vlan tag for VLAN-tagged packet */ | |
265 | if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) { | |
266 | memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); | |
267 | skb_pull(skb, VLAN_HLEN); | |
268 | } | |
269 | } | |
270 | ||
271 | void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv) | |
272 | { | |
273 | slave_write(slave, mac_hi(priv->mac_addr), SA_HI); | |
274 | slave_write(slave, mac_lo(priv->mac_addr), SA_LO); | |
275 | } | |
276 | ||
277 | void soft_reset(const char *module, void __iomem *reg) | |
278 | { | |
279 | unsigned long timeout = jiffies + HZ; | |
280 | ||
281 | writel_relaxed(1, reg); | |
282 | do { | |
283 | cpu_relax(); | |
284 | } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies)); | |
285 | ||
286 | WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module); | |
287 | } | |
288 | ||
0290bd29 | 289 | void cpsw_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue) |
c5013ac1 GS |
290 | { |
291 | struct cpsw_priv *priv = netdev_priv(ndev); | |
292 | struct cpsw_common *cpsw = priv->cpsw; | |
293 | int ch; | |
294 | ||
295 | cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n"); | |
296 | ndev->stats.tx_errors++; | |
297 | cpsw_intr_disable(cpsw); | |
298 | for (ch = 0; ch < cpsw->tx_ch_num; ch++) { | |
299 | cpdma_chan_stop(cpsw->txv[ch].ch); | |
300 | cpdma_chan_start(cpsw->txv[ch].ch); | |
301 | } | |
302 | ||
303 | cpsw_intr_enable(cpsw); | |
304 | netif_trans_update(ndev); | |
305 | netif_tx_wake_all_queues(ndev); | |
306 | } | |
307 | ||
308 | static int cpsw_get_common_speed(struct cpsw_common *cpsw) | |
309 | { | |
310 | int i, speed; | |
311 | ||
312 | for (i = 0, speed = 0; i < cpsw->data.slaves; i++) | |
313 | if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link) | |
314 | speed += cpsw->slaves[i].phy->speed; | |
315 | ||
316 | return speed; | |
317 | } | |
318 | ||
319 | int cpsw_need_resplit(struct cpsw_common *cpsw) | |
320 | { | |
321 | int i, rlim_ch_num; | |
322 | int speed, ch_rate; | |
323 | ||
324 | /* re-split resources only in case speed was changed */ | |
325 | speed = cpsw_get_common_speed(cpsw); | |
326 | if (speed == cpsw->speed || !speed) | |
327 | return 0; | |
328 | ||
329 | cpsw->speed = speed; | |
330 | ||
331 | for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) { | |
332 | ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch); | |
333 | if (!ch_rate) | |
334 | break; | |
335 | ||
336 | rlim_ch_num++; | |
337 | } | |
338 | ||
339 | /* cases not dependent on speed */ | |
340 | if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num) | |
341 | return 0; | |
342 | ||
343 | return 1; | |
344 | } | |
345 | ||
346 | void cpsw_split_res(struct cpsw_common *cpsw) | |
347 | { | |
348 | u32 consumed_rate = 0, bigest_rate = 0; | |
349 | struct cpsw_vector *txv = cpsw->txv; | |
350 | int i, ch_weight, rlim_ch_num = 0; | |
351 | int budget, bigest_rate_ch = 0; | |
352 | u32 ch_rate, max_rate; | |
353 | int ch_budget = 0; | |
354 | ||
355 | for (i = 0; i < cpsw->tx_ch_num; i++) { | |
356 | ch_rate = cpdma_chan_get_rate(txv[i].ch); | |
357 | if (!ch_rate) | |
358 | continue; | |
359 | ||
360 | rlim_ch_num++; | |
361 | consumed_rate += ch_rate; | |
362 | } | |
363 | ||
364 | if (cpsw->tx_ch_num == rlim_ch_num) { | |
365 | max_rate = consumed_rate; | |
366 | } else if (!rlim_ch_num) { | |
367 | ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num; | |
368 | bigest_rate = 0; | |
369 | max_rate = consumed_rate; | |
370 | } else { | |
371 | max_rate = cpsw->speed * 1000; | |
372 | ||
373 | /* if max_rate is less then expected due to reduced link speed, | |
374 | * split proportionally according next potential max speed | |
375 | */ | |
376 | if (max_rate < consumed_rate) | |
377 | max_rate *= 10; | |
378 | ||
379 | if (max_rate < consumed_rate) | |
380 | max_rate *= 10; | |
381 | ||
382 | ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate; | |
383 | ch_budget = (CPSW_POLL_WEIGHT - ch_budget) / | |
384 | (cpsw->tx_ch_num - rlim_ch_num); | |
385 | bigest_rate = (max_rate - consumed_rate) / | |
386 | (cpsw->tx_ch_num - rlim_ch_num); | |
387 | } | |
388 | ||
389 | /* split tx weight/budget */ | |
390 | budget = CPSW_POLL_WEIGHT; | |
391 | for (i = 0; i < cpsw->tx_ch_num; i++) { | |
392 | ch_rate = cpdma_chan_get_rate(txv[i].ch); | |
393 | if (ch_rate) { | |
394 | txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate; | |
395 | if (!txv[i].budget) | |
396 | txv[i].budget++; | |
397 | if (ch_rate > bigest_rate) { | |
398 | bigest_rate_ch = i; | |
399 | bigest_rate = ch_rate; | |
400 | } | |
401 | ||
402 | ch_weight = (ch_rate * 100) / max_rate; | |
403 | if (!ch_weight) | |
404 | ch_weight++; | |
405 | cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight); | |
406 | } else { | |
407 | txv[i].budget = ch_budget; | |
408 | if (!bigest_rate_ch) | |
409 | bigest_rate_ch = i; | |
410 | cpdma_chan_set_weight(cpsw->txv[i].ch, 0); | |
411 | } | |
412 | ||
413 | budget -= txv[i].budget; | |
414 | } | |
415 | ||
416 | if (budget) | |
417 | txv[bigest_rate_ch].budget += budget; | |
418 | ||
419 | /* split rx budget */ | |
420 | budget = CPSW_POLL_WEIGHT; | |
421 | ch_budget = budget / cpsw->rx_ch_num; | |
422 | for (i = 0; i < cpsw->rx_ch_num; i++) { | |
423 | cpsw->rxv[i].budget = ch_budget; | |
424 | budget -= ch_budget; | |
425 | } | |
426 | ||
427 | if (budget) | |
428 | cpsw->rxv[0].budget += budget; | |
429 | } | |
430 | ||
e6a84624 GS |
431 | int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs, |
432 | int ale_ageout, phys_addr_t desc_mem_phys, | |
433 | int descs_pool_size) | |
434 | { | |
435 | u32 slave_offset, sliver_offset, slave_size; | |
436 | struct cpsw_ale_params ale_params; | |
437 | struct cpsw_platform_data *data; | |
438 | struct cpdma_params dma_params; | |
439 | struct device *dev = cpsw->dev; | |
ed3525ed | 440 | struct device_node *cpts_node; |
e6a84624 GS |
441 | void __iomem *cpts_regs; |
442 | int ret = 0, i; | |
443 | ||
444 | data = &cpsw->data; | |
445 | cpsw->rx_ch_num = 1; | |
446 | cpsw->tx_ch_num = 1; | |
447 | ||
448 | cpsw->version = readl(&cpsw->regs->id_ver); | |
449 | ||
450 | memset(&dma_params, 0, sizeof(dma_params)); | |
451 | memset(&ale_params, 0, sizeof(ale_params)); | |
452 | ||
453 | switch (cpsw->version) { | |
454 | case CPSW_VERSION_1: | |
455 | cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET; | |
456 | cpts_regs = ss_regs + CPSW1_CPTS_OFFSET; | |
457 | cpsw->hw_stats = ss_regs + CPSW1_HW_STATS; | |
458 | dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET; | |
459 | dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET; | |
460 | ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET; | |
461 | slave_offset = CPSW1_SLAVE_OFFSET; | |
462 | slave_size = CPSW1_SLAVE_SIZE; | |
463 | sliver_offset = CPSW1_SLIVER_OFFSET; | |
464 | dma_params.desc_mem_phys = 0; | |
465 | break; | |
466 | case CPSW_VERSION_2: | |
467 | case CPSW_VERSION_3: | |
468 | case CPSW_VERSION_4: | |
469 | cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET; | |
470 | cpts_regs = ss_regs + CPSW2_CPTS_OFFSET; | |
471 | cpsw->hw_stats = ss_regs + CPSW2_HW_STATS; | |
472 | dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET; | |
473 | dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET; | |
474 | ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET; | |
475 | slave_offset = CPSW2_SLAVE_OFFSET; | |
476 | slave_size = CPSW2_SLAVE_SIZE; | |
477 | sliver_offset = CPSW2_SLIVER_OFFSET; | |
478 | dma_params.desc_mem_phys = desc_mem_phys; | |
479 | break; | |
480 | default: | |
481 | dev_err(dev, "unknown version 0x%08x\n", cpsw->version); | |
482 | return -ENODEV; | |
483 | } | |
484 | ||
485 | for (i = 0; i < cpsw->data.slaves; i++) { | |
486 | struct cpsw_slave *slave = &cpsw->slaves[i]; | |
487 | void __iomem *regs = cpsw->regs; | |
488 | ||
489 | slave->slave_num = i; | |
490 | slave->data = &cpsw->data.slave_data[i]; | |
491 | slave->regs = regs + slave_offset; | |
e6a84624 | 492 | slave->port_vlan = slave->data->dual_emac_res_vlan; |
cfc08345 GS |
493 | slave->mac_sl = cpsw_sl_get("cpsw", dev, regs + sliver_offset); |
494 | if (IS_ERR(slave->mac_sl)) | |
495 | return PTR_ERR(slave->mac_sl); | |
e6a84624 GS |
496 | |
497 | slave_offset += slave_size; | |
498 | sliver_offset += SLIVER_SIZE; | |
499 | } | |
500 | ||
501 | ale_params.dev = dev; | |
502 | ale_params.ale_ageout = ale_ageout; | |
503 | ale_params.ale_entries = data->ale_entries; | |
504 | ale_params.ale_ports = CPSW_ALE_PORTS_NUM; | |
505 | ||
506 | cpsw->ale = cpsw_ale_create(&ale_params); | |
3469660d | 507 | if (IS_ERR(cpsw->ale)) { |
e6a84624 | 508 | dev_err(dev, "error initializing ale engine\n"); |
3469660d | 509 | return PTR_ERR(cpsw->ale); |
e6a84624 GS |
510 | } |
511 | ||
512 | dma_params.dev = dev; | |
513 | dma_params.rxthresh = dma_params.dmaregs + CPDMA_RXTHRESH; | |
514 | dma_params.rxfree = dma_params.dmaregs + CPDMA_RXFREE; | |
515 | dma_params.rxhdp = dma_params.txhdp + CPDMA_RXHDP; | |
516 | dma_params.txcp = dma_params.txhdp + CPDMA_TXCP; | |
517 | dma_params.rxcp = dma_params.txhdp + CPDMA_RXCP; | |
518 | ||
519 | dma_params.num_chan = data->channels; | |
520 | dma_params.has_soft_reset = true; | |
521 | dma_params.min_packet_size = CPSW_MIN_PACKET_SIZE; | |
522 | dma_params.desc_mem_size = data->bd_ram_size; | |
523 | dma_params.desc_align = 16; | |
524 | dma_params.has_ext_regs = true; | |
525 | dma_params.desc_hw_addr = dma_params.desc_mem_phys; | |
526 | dma_params.bus_freq_mhz = cpsw->bus_freq_mhz; | |
527 | dma_params.descs_pool_size = descs_pool_size; | |
528 | ||
529 | cpsw->dma = cpdma_ctlr_create(&dma_params); | |
530 | if (!cpsw->dma) { | |
531 | dev_err(dev, "error initializing dma\n"); | |
532 | return -ENOMEM; | |
533 | } | |
534 | ||
ed3525ed IA |
535 | cpts_node = of_get_child_by_name(cpsw->dev->of_node, "cpts"); |
536 | if (!cpts_node) | |
537 | cpts_node = cpsw->dev->of_node; | |
538 | ||
b78aba49 GS |
539 | cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpts_node, |
540 | CPTS_N_ETX_TS); | |
e6a84624 GS |
541 | if (IS_ERR(cpsw->cpts)) { |
542 | ret = PTR_ERR(cpsw->cpts); | |
543 | cpdma_ctlr_destroy(cpsw->dma); | |
544 | } | |
ed3525ed | 545 | of_node_put(cpts_node); |
e6a84624 GS |
546 | |
547 | return ret; | |
548 | } | |
c5013ac1 GS |
549 | |
550 | #if IS_ENABLED(CONFIG_TI_CPTS) | |
551 | ||
552 | static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) | |
553 | { | |
554 | struct cpsw_common *cpsw = priv->cpsw; | |
555 | struct cpsw_slave *slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; | |
556 | u32 ts_en, seq_id; | |
557 | ||
558 | if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) { | |
559 | slave_write(slave, 0, CPSW1_TS_CTL); | |
560 | return; | |
561 | } | |
562 | ||
563 | seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588; | |
564 | ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS; | |
565 | ||
566 | if (priv->tx_ts_enabled) | |
567 | ts_en |= CPSW_V1_TS_TX_EN; | |
568 | ||
569 | if (priv->rx_ts_enabled) | |
570 | ts_en |= CPSW_V1_TS_RX_EN; | |
571 | ||
572 | slave_write(slave, ts_en, CPSW1_TS_CTL); | |
573 | slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE); | |
574 | } | |
575 | ||
576 | static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) | |
577 | { | |
578 | struct cpsw_common *cpsw = priv->cpsw; | |
579 | struct cpsw_slave *slave; | |
580 | u32 ctrl, mtype; | |
581 | ||
582 | slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; | |
583 | ||
584 | ctrl = slave_read(slave, CPSW2_CONTROL); | |
585 | switch (cpsw->version) { | |
586 | case CPSW_VERSION_2: | |
587 | ctrl &= ~CTRL_V2_ALL_TS_MASK; | |
588 | ||
589 | if (priv->tx_ts_enabled) | |
590 | ctrl |= CTRL_V2_TX_TS_BITS; | |
591 | ||
592 | if (priv->rx_ts_enabled) | |
593 | ctrl |= CTRL_V2_RX_TS_BITS; | |
594 | break; | |
595 | case CPSW_VERSION_3: | |
596 | default: | |
597 | ctrl &= ~CTRL_V3_ALL_TS_MASK; | |
598 | ||
599 | if (priv->tx_ts_enabled) | |
600 | ctrl |= CTRL_V3_TX_TS_BITS; | |
601 | ||
602 | if (priv->rx_ts_enabled) | |
603 | ctrl |= CTRL_V3_RX_TS_BITS; | |
604 | break; | |
605 | } | |
606 | ||
607 | mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS; | |
608 | ||
609 | slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE); | |
610 | slave_write(slave, ctrl, CPSW2_CONTROL); | |
611 | writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype); | |
612 | writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype); | |
613 | } | |
614 | ||
615 | static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) | |
616 | { | |
617 | struct cpsw_priv *priv = netdev_priv(dev); | |
618 | struct cpsw_common *cpsw = priv->cpsw; | |
619 | struct hwtstamp_config cfg; | |
620 | ||
621 | if (cpsw->version != CPSW_VERSION_1 && | |
622 | cpsw->version != CPSW_VERSION_2 && | |
623 | cpsw->version != CPSW_VERSION_3) | |
624 | return -EOPNOTSUPP; | |
625 | ||
626 | if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) | |
627 | return -EFAULT; | |
628 | ||
629 | /* reserved for future extensions */ | |
630 | if (cfg.flags) | |
631 | return -EINVAL; | |
632 | ||
633 | if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) | |
634 | return -ERANGE; | |
635 | ||
636 | switch (cfg.rx_filter) { | |
637 | case HWTSTAMP_FILTER_NONE: | |
638 | priv->rx_ts_enabled = 0; | |
639 | break; | |
640 | case HWTSTAMP_FILTER_ALL: | |
641 | case HWTSTAMP_FILTER_NTP_ALL: | |
642 | return -ERANGE; | |
643 | case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: | |
644 | case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: | |
645 | case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: | |
646 | priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; | |
647 | cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; | |
648 | break; | |
649 | case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: | |
650 | case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: | |
651 | case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: | |
652 | case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: | |
653 | case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: | |
654 | case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: | |
655 | case HWTSTAMP_FILTER_PTP_V2_EVENT: | |
656 | case HWTSTAMP_FILTER_PTP_V2_SYNC: | |
657 | case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: | |
658 | priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT; | |
659 | cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; | |
660 | break; | |
661 | default: | |
662 | return -ERANGE; | |
663 | } | |
664 | ||
665 | priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON; | |
666 | ||
667 | switch (cpsw->version) { | |
668 | case CPSW_VERSION_1: | |
669 | cpsw_hwtstamp_v1(priv); | |
670 | break; | |
671 | case CPSW_VERSION_2: | |
672 | case CPSW_VERSION_3: | |
673 | cpsw_hwtstamp_v2(priv); | |
674 | break; | |
675 | default: | |
676 | WARN_ON(1); | |
677 | } | |
678 | ||
679 | return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; | |
680 | } | |
681 | ||
682 | static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) | |
683 | { | |
684 | struct cpsw_common *cpsw = ndev_to_cpsw(dev); | |
685 | struct cpsw_priv *priv = netdev_priv(dev); | |
686 | struct hwtstamp_config cfg; | |
687 | ||
688 | if (cpsw->version != CPSW_VERSION_1 && | |
689 | cpsw->version != CPSW_VERSION_2 && | |
690 | cpsw->version != CPSW_VERSION_3) | |
691 | return -EOPNOTSUPP; | |
692 | ||
693 | cfg.flags = 0; | |
694 | cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; | |
695 | cfg.rx_filter = priv->rx_ts_enabled; | |
696 | ||
697 | return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; | |
698 | } | |
699 | #else | |
700 | static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) | |
701 | { | |
702 | return -EOPNOTSUPP; | |
703 | } | |
704 | ||
705 | static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) | |
706 | { | |
707 | return -EOPNOTSUPP; | |
708 | } | |
709 | #endif /*CONFIG_TI_CPTS*/ | |
710 | ||
711 | int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) | |
712 | { | |
713 | struct cpsw_priv *priv = netdev_priv(dev); | |
714 | struct cpsw_common *cpsw = priv->cpsw; | |
715 | int slave_no = cpsw_slave_index(cpsw, priv); | |
716 | ||
717 | if (!netif_running(dev)) | |
718 | return -EINVAL; | |
719 | ||
720 | switch (cmd) { | |
721 | case SIOCSHWTSTAMP: | |
722 | return cpsw_hwtstamp_set(dev, req); | |
723 | case SIOCGHWTSTAMP: | |
724 | return cpsw_hwtstamp_get(dev, req); | |
725 | } | |
726 | ||
727 | if (!cpsw->slaves[slave_no].phy) | |
728 | return -EOPNOTSUPP; | |
729 | return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd); | |
730 | } | |
731 | ||
732 | int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate) | |
733 | { | |
734 | struct cpsw_priv *priv = netdev_priv(ndev); | |
735 | struct cpsw_common *cpsw = priv->cpsw; | |
736 | struct cpsw_slave *slave; | |
737 | u32 min_rate; | |
738 | u32 ch_rate; | |
739 | int i, ret; | |
740 | ||
741 | ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate; | |
742 | if (ch_rate == rate) | |
743 | return 0; | |
744 | ||
745 | ch_rate = rate * 1000; | |
746 | min_rate = cpdma_chan_get_min_rate(cpsw->dma); | |
747 | if ((ch_rate < min_rate && ch_rate)) { | |
748 | dev_err(priv->dev, "The channel rate cannot be less than %dMbps", | |
749 | min_rate); | |
750 | return -EINVAL; | |
751 | } | |
752 | ||
753 | if (rate > cpsw->speed) { | |
754 | dev_err(priv->dev, "The channel rate cannot be more than 2Gbps"); | |
755 | return -EINVAL; | |
756 | } | |
757 | ||
758 | ret = pm_runtime_get_sync(cpsw->dev); | |
759 | if (ret < 0) { | |
760 | pm_runtime_put_noidle(cpsw->dev); | |
761 | return ret; | |
762 | } | |
763 | ||
764 | ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate); | |
765 | pm_runtime_put(cpsw->dev); | |
766 | ||
767 | if (ret) | |
768 | return ret; | |
769 | ||
770 | /* update rates for slaves tx queues */ | |
771 | for (i = 0; i < cpsw->data.slaves; i++) { | |
772 | slave = &cpsw->slaves[i]; | |
773 | if (!slave->ndev) | |
774 | continue; | |
775 | ||
776 | netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate; | |
777 | } | |
778 | ||
779 | cpsw_split_res(cpsw); | |
780 | return ret; | |
781 | } | |
782 | ||
783 | static int cpsw_tc_to_fifo(int tc, int num_tc) | |
784 | { | |
785 | if (tc == num_tc - 1) | |
786 | return 0; | |
787 | ||
788 | return CPSW_FIFO_SHAPERS_NUM - tc; | |
789 | } | |
790 | ||
791 | bool cpsw_shp_is_off(struct cpsw_priv *priv) | |
792 | { | |
793 | struct cpsw_common *cpsw = priv->cpsw; | |
794 | struct cpsw_slave *slave; | |
795 | u32 shift, mask, val; | |
796 | ||
797 | val = readl_relaxed(&cpsw->regs->ptype); | |
798 | ||
799 | slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; | |
800 | shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num; | |
801 | mask = 7 << shift; | |
802 | val = val & mask; | |
803 | ||
804 | return !val; | |
805 | } | |
806 | ||
807 | static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on) | |
808 | { | |
809 | struct cpsw_common *cpsw = priv->cpsw; | |
810 | struct cpsw_slave *slave; | |
811 | u32 shift, mask, val; | |
812 | ||
813 | val = readl_relaxed(&cpsw->regs->ptype); | |
814 | ||
815 | slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; | |
816 | shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num; | |
817 | mask = (1 << --fifo) << shift; | |
818 | val = on ? val | mask : val & ~mask; | |
819 | ||
820 | writel_relaxed(val, &cpsw->regs->ptype); | |
821 | } | |
822 | ||
823 | static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw) | |
824 | { | |
825 | struct cpsw_common *cpsw = priv->cpsw; | |
826 | u32 val = 0, send_pct, shift; | |
827 | struct cpsw_slave *slave; | |
828 | int pct = 0, i; | |
829 | ||
830 | if (bw > priv->shp_cfg_speed * 1000) | |
831 | goto err; | |
832 | ||
833 | /* shaping has to stay enabled for highest fifos linearly | |
834 | * and fifo bw no more then interface can allow | |
835 | */ | |
836 | slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; | |
837 | send_pct = slave_read(slave, SEND_PERCENT); | |
838 | for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) { | |
839 | if (!bw) { | |
840 | if (i >= fifo || !priv->fifo_bw[i]) | |
841 | continue; | |
842 | ||
843 | dev_warn(priv->dev, "Prev FIFO%d is shaped", i); | |
844 | continue; | |
845 | } | |
846 | ||
847 | if (!priv->fifo_bw[i] && i > fifo) { | |
848 | dev_err(priv->dev, "Upper FIFO%d is not shaped", i); | |
849 | return -EINVAL; | |
850 | } | |
851 | ||
852 | shift = (i - 1) * 8; | |
853 | if (i == fifo) { | |
854 | send_pct &= ~(CPSW_PCT_MASK << shift); | |
855 | val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10); | |
856 | if (!val) | |
857 | val = 1; | |
858 | ||
859 | send_pct |= val << shift; | |
860 | pct += val; | |
861 | continue; | |
862 | } | |
863 | ||
864 | if (priv->fifo_bw[i]) | |
865 | pct += (send_pct >> shift) & CPSW_PCT_MASK; | |
866 | } | |
867 | ||
868 | if (pct >= 100) | |
869 | goto err; | |
870 | ||
871 | slave_write(slave, send_pct, SEND_PERCENT); | |
872 | priv->fifo_bw[fifo] = bw; | |
873 | ||
874 | dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo, | |
875 | DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100)); | |
876 | ||
877 | return 0; | |
878 | err: | |
879 | dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration"); | |
880 | return -EINVAL; | |
881 | } | |
882 | ||
883 | static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw) | |
884 | { | |
885 | struct cpsw_common *cpsw = priv->cpsw; | |
886 | struct cpsw_slave *slave; | |
887 | u32 tx_in_ctl_rg, val; | |
888 | int ret; | |
889 | ||
890 | ret = cpsw_set_fifo_bw(priv, fifo, bw); | |
891 | if (ret) | |
892 | return ret; | |
893 | ||
894 | slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; | |
895 | tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ? | |
896 | CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL; | |
897 | ||
898 | if (!bw) | |
899 | cpsw_fifo_shp_on(priv, fifo, bw); | |
900 | ||
901 | val = slave_read(slave, tx_in_ctl_rg); | |
902 | if (cpsw_shp_is_off(priv)) { | |
903 | /* disable FIFOs rate limited queues */ | |
904 | val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT); | |
905 | ||
906 | /* set type of FIFO queues to normal priority mode */ | |
907 | val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT); | |
908 | ||
909 | /* set type of FIFO queues to be rate limited */ | |
910 | if (bw) | |
911 | val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT; | |
912 | else | |
913 | priv->shp_cfg_speed = 0; | |
914 | } | |
915 | ||
916 | /* toggle a FIFO rate limited queue */ | |
917 | if (bw) | |
918 | val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT); | |
919 | else | |
920 | val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT); | |
921 | slave_write(slave, val, tx_in_ctl_rg); | |
922 | ||
923 | /* FIFO transmit shape enable */ | |
924 | cpsw_fifo_shp_on(priv, fifo, bw); | |
925 | return 0; | |
926 | } | |
927 | ||
928 | /* Defaults: | |
929 | * class A - prio 3 | |
930 | * class B - prio 2 | |
931 | * shaping for class A should be set first | |
932 | */ | |
933 | static int cpsw_set_cbs(struct net_device *ndev, | |
934 | struct tc_cbs_qopt_offload *qopt) | |
935 | { | |
936 | struct cpsw_priv *priv = netdev_priv(ndev); | |
937 | struct cpsw_common *cpsw = priv->cpsw; | |
938 | struct cpsw_slave *slave; | |
939 | int prev_speed = 0; | |
940 | int tc, ret, fifo; | |
941 | u32 bw = 0; | |
942 | ||
943 | tc = netdev_txq_to_tc(priv->ndev, qopt->queue); | |
944 | ||
945 | /* enable channels in backward order, as highest FIFOs must be rate | |
946 | * limited first and for compliance with CPDMA rate limited channels | |
947 | * that also used in bacward order. FIFO0 cannot be rate limited. | |
948 | */ | |
949 | fifo = cpsw_tc_to_fifo(tc, ndev->num_tc); | |
950 | if (!fifo) { | |
951 | dev_err(priv->dev, "Last tc%d can't be rate limited", tc); | |
952 | return -EINVAL; | |
953 | } | |
954 | ||
955 | /* do nothing, it's disabled anyway */ | |
956 | if (!qopt->enable && !priv->fifo_bw[fifo]) | |
957 | return 0; | |
958 | ||
959 | /* shapers can be set if link speed is known */ | |
960 | slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; | |
961 | if (slave->phy && slave->phy->link) { | |
962 | if (priv->shp_cfg_speed && | |
963 | priv->shp_cfg_speed != slave->phy->speed) | |
964 | prev_speed = priv->shp_cfg_speed; | |
965 | ||
966 | priv->shp_cfg_speed = slave->phy->speed; | |
967 | } | |
968 | ||
969 | if (!priv->shp_cfg_speed) { | |
970 | dev_err(priv->dev, "Link speed is not known"); | |
971 | return -1; | |
972 | } | |
973 | ||
974 | ret = pm_runtime_get_sync(cpsw->dev); | |
975 | if (ret < 0) { | |
976 | pm_runtime_put_noidle(cpsw->dev); | |
977 | return ret; | |
978 | } | |
979 | ||
980 | bw = qopt->enable ? qopt->idleslope : 0; | |
981 | ret = cpsw_set_fifo_rlimit(priv, fifo, bw); | |
982 | if (ret) { | |
983 | priv->shp_cfg_speed = prev_speed; | |
984 | prev_speed = 0; | |
985 | } | |
986 | ||
987 | if (bw && prev_speed) | |
988 | dev_warn(priv->dev, | |
989 | "Speed was changed, CBS shaper speeds are changed!"); | |
990 | ||
991 | pm_runtime_put_sync(cpsw->dev); | |
992 | return ret; | |
993 | } | |
994 | ||
995 | static int cpsw_set_mqprio(struct net_device *ndev, void *type_data) | |
996 | { | |
997 | struct tc_mqprio_qopt_offload *mqprio = type_data; | |
998 | struct cpsw_priv *priv = netdev_priv(ndev); | |
999 | struct cpsw_common *cpsw = priv->cpsw; | |
1000 | int fifo, num_tc, count, offset; | |
1001 | struct cpsw_slave *slave; | |
1002 | u32 tx_prio_map = 0; | |
1003 | int i, tc, ret; | |
1004 | ||
1005 | num_tc = mqprio->qopt.num_tc; | |
1006 | if (num_tc > CPSW_TC_NUM) | |
1007 | return -EINVAL; | |
1008 | ||
1009 | if (mqprio->mode != TC_MQPRIO_MODE_DCB) | |
1010 | return -EINVAL; | |
1011 | ||
1012 | ret = pm_runtime_get_sync(cpsw->dev); | |
1013 | if (ret < 0) { | |
1014 | pm_runtime_put_noidle(cpsw->dev); | |
1015 | return ret; | |
1016 | } | |
1017 | ||
1018 | if (num_tc) { | |
1019 | for (i = 0; i < 8; i++) { | |
1020 | tc = mqprio->qopt.prio_tc_map[i]; | |
1021 | fifo = cpsw_tc_to_fifo(tc, num_tc); | |
1022 | tx_prio_map |= fifo << (4 * i); | |
1023 | } | |
1024 | ||
1025 | netdev_set_num_tc(ndev, num_tc); | |
1026 | for (i = 0; i < num_tc; i++) { | |
1027 | count = mqprio->qopt.count[i]; | |
1028 | offset = mqprio->qopt.offset[i]; | |
1029 | netdev_set_tc_queue(ndev, i, count, offset); | |
1030 | } | |
1031 | } | |
1032 | ||
1033 | if (!mqprio->qopt.hw) { | |
1034 | /* restore default configuration */ | |
1035 | netdev_reset_tc(ndev); | |
1036 | tx_prio_map = TX_PRIORITY_MAPPING; | |
1037 | } | |
1038 | ||
1039 | priv->mqprio_hw = mqprio->qopt.hw; | |
1040 | ||
1041 | offset = cpsw->version == CPSW_VERSION_1 ? | |
1042 | CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP; | |
1043 | ||
1044 | slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; | |
1045 | slave_write(slave, tx_prio_map, offset); | |
1046 | ||
1047 | pm_runtime_put_sync(cpsw->dev); | |
1048 | ||
1049 | return 0; | |
1050 | } | |
1051 | ||
1052 | int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type, | |
1053 | void *type_data) | |
1054 | { | |
1055 | switch (type) { | |
1056 | case TC_SETUP_QDISC_CBS: | |
1057 | return cpsw_set_cbs(ndev, type_data); | |
1058 | ||
1059 | case TC_SETUP_QDISC_MQPRIO: | |
1060 | return cpsw_set_mqprio(ndev, type_data); | |
1061 | ||
1062 | default: | |
1063 | return -EOPNOTSUPP; | |
1064 | } | |
1065 | } | |
1066 | ||
1067 | void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv) | |
1068 | { | |
1069 | int fifo, bw; | |
1070 | ||
1071 | for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) { | |
1072 | bw = priv->fifo_bw[fifo]; | |
1073 | if (!bw) | |
1074 | continue; | |
1075 | ||
1076 | cpsw_set_fifo_rlimit(priv, fifo, bw); | |
1077 | } | |
1078 | } | |
1079 | ||
1080 | void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv) | |
1081 | { | |
1082 | struct cpsw_common *cpsw = priv->cpsw; | |
1083 | u32 tx_prio_map = 0; | |
1084 | int i, tc, fifo; | |
1085 | u32 tx_prio_rg; | |
1086 | ||
1087 | if (!priv->mqprio_hw) | |
1088 | return; | |
1089 | ||
1090 | for (i = 0; i < 8; i++) { | |
1091 | tc = netdev_get_prio_tc_map(priv->ndev, i); | |
1092 | fifo = CPSW_FIFO_SHAPERS_NUM - tc; | |
1093 | tx_prio_map |= fifo << (4 * i); | |
1094 | } | |
1095 | ||
1096 | tx_prio_rg = cpsw->version == CPSW_VERSION_1 ? | |
1097 | CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP; | |
1098 | ||
1099 | slave_write(slave, tx_prio_map, tx_prio_rg); | |
1100 | } | |
1101 | ||
1102 | int cpsw_fill_rx_channels(struct cpsw_priv *priv) | |
1103 | { | |
1104 | struct cpsw_common *cpsw = priv->cpsw; | |
1105 | struct cpsw_meta_xdp *xmeta; | |
1106 | struct page_pool *pool; | |
1107 | struct page *page; | |
1108 | int ch_buf_num; | |
1109 | int ch, i, ret; | |
1110 | dma_addr_t dma; | |
1111 | ||
1112 | for (ch = 0; ch < cpsw->rx_ch_num; ch++) { | |
1113 | pool = cpsw->page_pool[ch]; | |
1114 | ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch); | |
1115 | for (i = 0; i < ch_buf_num; i++) { | |
1116 | page = page_pool_dev_alloc_pages(pool); | |
1117 | if (!page) { | |
1118 | cpsw_err(priv, ifup, "allocate rx page err\n"); | |
1119 | return -ENOMEM; | |
1120 | } | |
1121 | ||
1122 | xmeta = page_address(page) + CPSW_XMETA_OFFSET; | |
1123 | xmeta->ndev = priv->ndev; | |
1124 | xmeta->ch = ch; | |
1125 | ||
1126 | dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM; | |
1127 | ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch, | |
1128 | page, dma, | |
1129 | cpsw->rx_packet_max, | |
1130 | 0); | |
1131 | if (ret < 0) { | |
1132 | cpsw_err(priv, ifup, | |
1133 | "cannot submit page to channel %d rx, error %d\n", | |
1134 | ch, ret); | |
1135 | page_pool_recycle_direct(pool, page); | |
1136 | return ret; | |
1137 | } | |
1138 | } | |
1139 | ||
1140 | cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n", | |
1141 | ch, ch_buf_num); | |
1142 | } | |
1143 | ||
1144 | return 0; | |
1145 | } | |
1146 | ||
1147 | static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw, | |
1148 | int size) | |
1149 | { | |
1150 | struct page_pool_params pp_params; | |
1151 | struct page_pool *pool; | |
1152 | ||
1153 | pp_params.order = 0; | |
1154 | pp_params.flags = PP_FLAG_DMA_MAP; | |
1155 | pp_params.pool_size = size; | |
1156 | pp_params.nid = NUMA_NO_NODE; | |
1157 | pp_params.dma_dir = DMA_BIDIRECTIONAL; | |
1158 | pp_params.dev = cpsw->dev; | |
1159 | ||
1160 | pool = page_pool_create(&pp_params); | |
1161 | if (IS_ERR(pool)) | |
1162 | dev_err(cpsw->dev, "cannot create rx page pool\n"); | |
1163 | ||
1164 | return pool; | |
1165 | } | |
1166 | ||
1167 | static int cpsw_create_rx_pool(struct cpsw_common *cpsw, int ch) | |
1168 | { | |
1169 | struct page_pool *pool; | |
1170 | int ret = 0, pool_size; | |
1171 | ||
1172 | pool_size = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch); | |
1173 | pool = cpsw_create_page_pool(cpsw, pool_size); | |
1174 | if (IS_ERR(pool)) | |
1175 | ret = PTR_ERR(pool); | |
1176 | else | |
1177 | cpsw->page_pool[ch] = pool; | |
1178 | ||
1179 | return ret; | |
1180 | } | |
1181 | ||
1182 | static int cpsw_ndev_create_xdp_rxq(struct cpsw_priv *priv, int ch) | |
1183 | { | |
1184 | struct cpsw_common *cpsw = priv->cpsw; | |
1185 | struct xdp_rxq_info *rxq; | |
1186 | struct page_pool *pool; | |
1187 | int ret; | |
1188 | ||
1189 | pool = cpsw->page_pool[ch]; | |
1190 | rxq = &priv->xdp_rxq[ch]; | |
1191 | ||
1192 | ret = xdp_rxq_info_reg(rxq, priv->ndev, ch); | |
1193 | if (ret) | |
1194 | return ret; | |
1195 | ||
1196 | ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool); | |
1197 | if (ret) | |
1198 | xdp_rxq_info_unreg(rxq); | |
1199 | ||
1200 | return ret; | |
1201 | } | |
1202 | ||
1203 | static void cpsw_ndev_destroy_xdp_rxq(struct cpsw_priv *priv, int ch) | |
1204 | { | |
1205 | struct xdp_rxq_info *rxq = &priv->xdp_rxq[ch]; | |
1206 | ||
1207 | if (!xdp_rxq_info_is_reg(rxq)) | |
1208 | return; | |
1209 | ||
1210 | xdp_rxq_info_unreg(rxq); | |
1211 | } | |
1212 | ||
1213 | void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw) | |
1214 | { | |
1215 | struct net_device *ndev; | |
1216 | int i, ch; | |
1217 | ||
1218 | for (ch = 0; ch < cpsw->rx_ch_num; ch++) { | |
1219 | for (i = 0; i < cpsw->data.slaves; i++) { | |
1220 | ndev = cpsw->slaves[i].ndev; | |
1221 | if (!ndev) | |
1222 | continue; | |
1223 | ||
1224 | cpsw_ndev_destroy_xdp_rxq(netdev_priv(ndev), ch); | |
1225 | } | |
1226 | ||
1227 | page_pool_destroy(cpsw->page_pool[ch]); | |
1228 | cpsw->page_pool[ch] = NULL; | |
1229 | } | |
1230 | } | |
1231 | ||
1232 | int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw) | |
1233 | { | |
1234 | struct net_device *ndev; | |
1235 | int i, ch, ret; | |
1236 | ||
1237 | for (ch = 0; ch < cpsw->rx_ch_num; ch++) { | |
1238 | ret = cpsw_create_rx_pool(cpsw, ch); | |
1239 | if (ret) | |
1240 | goto err_cleanup; | |
1241 | ||
1242 | /* using same page pool is allowed as no running rx handlers | |
1243 | * simultaneously for both ndevs | |
1244 | */ | |
1245 | for (i = 0; i < cpsw->data.slaves; i++) { | |
1246 | ndev = cpsw->slaves[i].ndev; | |
1247 | if (!ndev) | |
1248 | continue; | |
1249 | ||
1250 | ret = cpsw_ndev_create_xdp_rxq(netdev_priv(ndev), ch); | |
1251 | if (ret) | |
1252 | goto err_cleanup; | |
1253 | } | |
1254 | } | |
1255 | ||
1256 | return 0; | |
1257 | ||
1258 | err_cleanup: | |
1259 | cpsw_destroy_xdp_rxqs(cpsw); | |
1260 | ||
1261 | return ret; | |
1262 | } | |
1263 | ||
1264 | static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf) | |
1265 | { | |
1266 | struct bpf_prog *prog = bpf->prog; | |
1267 | ||
1268 | if (!priv->xdpi.prog && !prog) | |
1269 | return 0; | |
1270 | ||
1271 | if (!xdp_attachment_flags_ok(&priv->xdpi, bpf)) | |
1272 | return -EBUSY; | |
1273 | ||
1274 | WRITE_ONCE(priv->xdp_prog, prog); | |
1275 | ||
1276 | xdp_attachment_setup(&priv->xdpi, bpf); | |
1277 | ||
1278 | return 0; | |
1279 | } | |
1280 | ||
1281 | int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf) | |
1282 | { | |
1283 | struct cpsw_priv *priv = netdev_priv(ndev); | |
1284 | ||
1285 | switch (bpf->command) { | |
1286 | case XDP_SETUP_PROG: | |
1287 | return cpsw_xdp_prog_setup(priv, bpf); | |
1288 | ||
1289 | case XDP_QUERY_PROG: | |
1290 | return xdp_attachment_query(&priv->xdpi, bpf); | |
1291 | ||
1292 | default: | |
1293 | return -EINVAL; | |
1294 | } | |
1295 | } | |
1296 | ||
1297 | int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf, | |
1298 | struct page *page, int port) | |
1299 | { | |
1300 | struct cpsw_common *cpsw = priv->cpsw; | |
1301 | struct cpsw_meta_xdp *xmeta; | |
1302 | struct cpdma_chan *txch; | |
1303 | dma_addr_t dma; | |
1304 | int ret; | |
1305 | ||
1306 | xmeta = (void *)xdpf + CPSW_XMETA_OFFSET; | |
1307 | xmeta->ndev = priv->ndev; | |
1308 | xmeta->ch = 0; | |
1309 | txch = cpsw->txv[0].ch; | |
1310 | ||
1311 | if (page) { | |
1312 | dma = page_pool_get_dma_addr(page); | |
1313 | dma += xdpf->headroom + sizeof(struct xdp_frame); | |
1314 | ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf), | |
1315 | dma, xdpf->len, port); | |
1316 | } else { | |
1317 | if (sizeof(*xmeta) > xdpf->headroom) { | |
1318 | xdp_return_frame_rx_napi(xdpf); | |
1319 | return -EINVAL; | |
1320 | } | |
1321 | ||
1322 | ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf), | |
1323 | xdpf->data, xdpf->len, port); | |
1324 | } | |
1325 | ||
1326 | if (ret) { | |
1327 | priv->ndev->stats.tx_dropped++; | |
1328 | xdp_return_frame_rx_napi(xdpf); | |
1329 | } | |
1330 | ||
1331 | return ret; | |
1332 | } | |
1333 | ||
1334 | int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp, | |
1335 | struct page *page, int port) | |
1336 | { | |
1337 | struct cpsw_common *cpsw = priv->cpsw; | |
1338 | struct net_device *ndev = priv->ndev; | |
1339 | int ret = CPSW_XDP_CONSUMED; | |
1340 | struct xdp_frame *xdpf; | |
1341 | struct bpf_prog *prog; | |
1342 | u32 act; | |
1343 | ||
1344 | rcu_read_lock(); | |
1345 | ||
1346 | prog = READ_ONCE(priv->xdp_prog); | |
1347 | if (!prog) { | |
1348 | ret = CPSW_XDP_PASS; | |
1349 | goto out; | |
1350 | } | |
1351 | ||
1352 | act = bpf_prog_run_xdp(prog, xdp); | |
1353 | switch (act) { | |
1354 | case XDP_PASS: | |
1355 | ret = CPSW_XDP_PASS; | |
1356 | break; | |
1357 | case XDP_TX: | |
1358 | xdpf = convert_to_xdp_frame(xdp); | |
1359 | if (unlikely(!xdpf)) | |
1360 | goto drop; | |
1361 | ||
1362 | cpsw_xdp_tx_frame(priv, xdpf, page, port); | |
1363 | break; | |
1364 | case XDP_REDIRECT: | |
1365 | if (xdp_do_redirect(ndev, xdp, prog)) | |
1366 | goto drop; | |
1367 | ||
1368 | /* Have to flush here, per packet, instead of doing it in bulk | |
1369 | * at the end of the napi handler. The RX devices on this | |
1370 | * particular hardware is sharing a common queue, so the | |
1371 | * incoming device might change per packet. | |
1372 | */ | |
1373 | xdp_do_flush_map(); | |
1374 | break; | |
1375 | default: | |
1376 | bpf_warn_invalid_xdp_action(act); | |
1377 | /* fall through */ | |
1378 | case XDP_ABORTED: | |
1379 | trace_xdp_exception(ndev, prog, act); | |
1380 | /* fall through -- handle aborts by dropping packet */ | |
1381 | case XDP_DROP: | |
1382 | goto drop; | |
1383 | } | |
1384 | out: | |
1385 | rcu_read_unlock(); | |
1386 | return ret; | |
1387 | drop: | |
1388 | rcu_read_unlock(); | |
1389 | page_pool_recycle_direct(cpsw->page_pool[ch], page); | |
1390 | return ret; | |
1391 | } |