]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/stmicro/stmmac/ring_mode.c
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / stmicro / stmmac / ring_mode.c
CommitLineData
4fa9c49f 1// SPDX-License-Identifier: GPL-2.0-only
286a8372
GC
2/*******************************************************************************
3 Specialised functions for managing Ring mode
4
5 Copyright(C) 2011 STMicroelectronics Ltd
6
7 It defines all the functions used to handle the normal/enhanced
8 descriptors in case of the DMA is configured to work in chained or
9 in ring mode.
10
286a8372
GC
11
12 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
13*******************************************************************************/
14
15#include "stmmac.h"
16
2c520b1c 17static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
286a8372 18{
ce736788 19 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
286a8372 20 unsigned int nopaged_len = skb_headlen(skb);
ce736788
JP
21 struct stmmac_priv *priv = tx_q->priv_data;
22 unsigned int entry = tx_q->cur_tx;
f8be0d78 23 unsigned int bmax, len, des2;
ce736788 24 struct dma_desc *desc;
286a8372 25
21ff0193 26 if (priv->extend_desc)
ce736788 27 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
21ff0193 28 else
ce736788 29 desc = tx_q->dma_tx + entry;
21ff0193 30
286a8372
GC
31 if (priv->plat->enh_desc)
32 bmax = BUF_SIZE_8KiB;
33 else
34 bmax = BUF_SIZE_2KiB;
35
36 len = nopaged_len - bmax;
37
38 if (nopaged_len > BUF_SIZE_8KiB) {
39
f8be0d78
MW
40 des2 = dma_map_single(priv->device, skb->data, bmax,
41 DMA_TO_DEVICE);
42 desc->des2 = cpu_to_le32(des2);
43 if (dma_mapping_error(priv->device, des2))
362b37be
GC
44 return -1;
45
ce736788
JP
46 tx_q->tx_skbuff_dma[entry].buf = des2;
47 tx_q->tx_skbuff_dma[entry].len = bmax;
48 tx_q->tx_skbuff_dma[entry].is_jumbo = true;
553e2ab3 49
f8be0d78 50 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
42de047d 51 stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
80acbed9 52 STMMAC_RING_MODE, 0, false, skb->len);
ce736788 53 tx_q->tx_skbuff[entry] = NULL;
e3ad57c9 54 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
21ff0193
BA
55
56 if (priv->extend_desc)
ce736788 57 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
21ff0193 58 else
ce736788 59 desc = tx_q->dma_tx + entry;
286a8372 60
f8be0d78
MW
61 des2 = dma_map_single(priv->device, skb->data + bmax, len,
62 DMA_TO_DEVICE);
63 desc->des2 = cpu_to_le32(des2);
64 if (dma_mapping_error(priv->device, des2))
362b37be 65 return -1;
ce736788
JP
66 tx_q->tx_skbuff_dma[entry].buf = des2;
67 tx_q->tx_skbuff_dma[entry].len = len;
68 tx_q->tx_skbuff_dma[entry].is_jumbo = true;
553e2ab3 69
f8be0d78 70 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
42de047d 71 stmmac_prepare_tx_desc(priv, desc, 0, len, csum,
58f2ce6f
AK
72 STMMAC_RING_MODE, 1, !skb_is_nonlinear(skb),
73 skb->len);
286a8372 74 } else {
f8be0d78
MW
75 des2 = dma_map_single(priv->device, skb->data,
76 nopaged_len, DMA_TO_DEVICE);
77 desc->des2 = cpu_to_le32(des2);
78 if (dma_mapping_error(priv->device, des2))
362b37be 79 return -1;
ce736788
JP
80 tx_q->tx_skbuff_dma[entry].buf = des2;
81 tx_q->tx_skbuff_dma[entry].len = nopaged_len;
82 tx_q->tx_skbuff_dma[entry].is_jumbo = true;
f8be0d78 83 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
42de047d 84 stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum,
58f2ce6f
AK
85 STMMAC_RING_MODE, 0, !skb_is_nonlinear(skb),
86 skb->len);
286a8372
GC
87 }
88
ce736788 89 tx_q->cur_tx = entry;
e3ad57c9 90
286a8372
GC
91 return entry;
92}
93
2c520b1c 94static unsigned int is_jumbo_frm(int len, int enh_desc)
286a8372
GC
95{
96 unsigned int ret = 0;
97
98 if (len >= BUF_SIZE_4KiB)
99 ret = 1;
100
101 return ret;
102}
103
2c520b1c 104static void refill_desc3(void *priv_ptr, struct dma_desc *p)
286a8372 105{
223a960c
AK
106 struct stmmac_rx_queue *rx_q = priv_ptr;
107 struct stmmac_priv *priv = rx_q->priv_data;
891434b1 108
29896a67 109 /* Fill DES3 in case of RING mode */
223a960c 110 if (priv->dma_buf_sz == BUF_SIZE_16KiB)
f8be0d78 111 p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
286a8372
GC
112}
113
4a7d666a 114/* In ring mode we need to fill the desc3 because it is used as buffer */
2c520b1c 115static void init_desc3(struct dma_desc *p)
286a8372 116{
f8be0d78 117 p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
286a8372
GC
118}
119
2c520b1c 120static void clean_desc3(void *priv_ptr, struct dma_desc *p)
286a8372 121{
ce736788
JP
122 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
123 struct stmmac_priv *priv = tx_q->priv_data;
124 unsigned int entry = tx_q->dirty_tx;
96951366
GC
125
126 /* des3 is only used for jumbo frames tx or time stamping */
ce736788
JP
127 if (unlikely(tx_q->tx_skbuff_dma[entry].is_jumbo ||
128 (tx_q->tx_skbuff_dma[entry].last_segment &&
96951366 129 !priv->extend_desc && priv->hwts_tx_en)))
286a8372
GC
130 p->des3 = 0;
131}
132
2c520b1c 133static int set_16kib_bfsize(int mtu)
286a8372
GC
134{
135 int ret = 0;
8137b6ef 136 if (unlikely(mtu > BUF_SIZE_8KiB))
286a8372
GC
137 ret = BUF_SIZE_16KiB;
138 return ret;
139}
140
29896a67 141const struct stmmac_mode_ops ring_mode_ops = {
2c520b1c
JA
142 .is_jumbo_frm = is_jumbo_frm,
143 .jumbo_frm = jumbo_frm,
144 .refill_desc3 = refill_desc3,
145 .init_desc3 = init_desc3,
146 .clean_desc3 = clean_desc3,
147 .set_16kib_bfsize = set_16kib_bfsize,
286a8372 148};