]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - drivers/net/ethernet/stmicro/stmmac/ring_mode.c
Merge branches 'for-5.1/upstream-fixes', 'for-5.2/core', 'for-5.2/ish', 'for-5.2...
[mirror_ubuntu-kernels.git] / drivers / net / ethernet / stmicro / stmmac / ring_mode.c
CommitLineData
286a8372
GC
1/*******************************************************************************
2 Specialised functions for managing Ring mode
3
4 Copyright(C) 2011 STMicroelectronics Ltd
5
6 It defines all the functions used to handle the normal/enhanced
7 descriptors in case of the DMA is configured to work in chained or
8 in ring mode.
9
10 This program is free software; you can redistribute it and/or modify it
11 under the terms and conditions of the GNU General Public License,
12 version 2, as published by the Free Software Foundation.
13
14 This program is distributed in the hope it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 more details.
18
286a8372
GC
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/
24
25#include "stmmac.h"
26
2c520b1c 27static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
286a8372 28{
ce736788 29 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
286a8372 30 unsigned int nopaged_len = skb_headlen(skb);
ce736788
JP
31 struct stmmac_priv *priv = tx_q->priv_data;
32 unsigned int entry = tx_q->cur_tx;
f8be0d78 33 unsigned int bmax, len, des2;
ce736788 34 struct dma_desc *desc;
286a8372 35
21ff0193 36 if (priv->extend_desc)
ce736788 37 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
21ff0193 38 else
ce736788 39 desc = tx_q->dma_tx + entry;
21ff0193 40
286a8372
GC
41 if (priv->plat->enh_desc)
42 bmax = BUF_SIZE_8KiB;
43 else
44 bmax = BUF_SIZE_2KiB;
45
46 len = nopaged_len - bmax;
47
48 if (nopaged_len > BUF_SIZE_8KiB) {
49
f8be0d78
MW
50 des2 = dma_map_single(priv->device, skb->data, bmax,
51 DMA_TO_DEVICE);
52 desc->des2 = cpu_to_le32(des2);
53 if (dma_mapping_error(priv->device, des2))
362b37be
GC
54 return -1;
55
ce736788
JP
56 tx_q->tx_skbuff_dma[entry].buf = des2;
57 tx_q->tx_skbuff_dma[entry].len = bmax;
58 tx_q->tx_skbuff_dma[entry].is_jumbo = true;
553e2ab3 59
f8be0d78 60 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
42de047d 61 stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
80acbed9 62 STMMAC_RING_MODE, 0, false, skb->len);
ce736788 63 tx_q->tx_skbuff[entry] = NULL;
e3ad57c9 64 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
21ff0193
BA
65
66 if (priv->extend_desc)
ce736788 67 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
21ff0193 68 else
ce736788 69 desc = tx_q->dma_tx + entry;
286a8372 70
f8be0d78
MW
71 des2 = dma_map_single(priv->device, skb->data + bmax, len,
72 DMA_TO_DEVICE);
73 desc->des2 = cpu_to_le32(des2);
74 if (dma_mapping_error(priv->device, des2))
362b37be 75 return -1;
ce736788
JP
76 tx_q->tx_skbuff_dma[entry].buf = des2;
77 tx_q->tx_skbuff_dma[entry].len = len;
78 tx_q->tx_skbuff_dma[entry].is_jumbo = true;
553e2ab3 79
f8be0d78 80 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
42de047d 81 stmmac_prepare_tx_desc(priv, desc, 0, len, csum,
58f2ce6f
AK
82 STMMAC_RING_MODE, 1, !skb_is_nonlinear(skb),
83 skb->len);
286a8372 84 } else {
f8be0d78
MW
85 des2 = dma_map_single(priv->device, skb->data,
86 nopaged_len, DMA_TO_DEVICE);
87 desc->des2 = cpu_to_le32(des2);
88 if (dma_mapping_error(priv->device, des2))
362b37be 89 return -1;
ce736788
JP
90 tx_q->tx_skbuff_dma[entry].buf = des2;
91 tx_q->tx_skbuff_dma[entry].len = nopaged_len;
92 tx_q->tx_skbuff_dma[entry].is_jumbo = true;
f8be0d78 93 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
42de047d 94 stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum,
58f2ce6f
AK
95 STMMAC_RING_MODE, 0, !skb_is_nonlinear(skb),
96 skb->len);
286a8372
GC
97 }
98
ce736788 99 tx_q->cur_tx = entry;
e3ad57c9 100
286a8372
GC
101 return entry;
102}
103
2c520b1c 104static unsigned int is_jumbo_frm(int len, int enh_desc)
286a8372
GC
105{
106 unsigned int ret = 0;
107
108 if (len >= BUF_SIZE_4KiB)
109 ret = 1;
110
111 return ret;
112}
113
2c520b1c 114static void refill_desc3(void *priv_ptr, struct dma_desc *p)
286a8372 115{
223a960c
AK
116 struct stmmac_rx_queue *rx_q = priv_ptr;
117 struct stmmac_priv *priv = rx_q->priv_data;
891434b1 118
29896a67 119 /* Fill DES3 in case of RING mode */
223a960c 120 if (priv->dma_buf_sz == BUF_SIZE_16KiB)
f8be0d78 121 p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
286a8372
GC
122}
123
4a7d666a 124/* In ring mode we need to fill the desc3 because it is used as buffer */
2c520b1c 125static void init_desc3(struct dma_desc *p)
286a8372 126{
f8be0d78 127 p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
286a8372
GC
128}
129
2c520b1c 130static void clean_desc3(void *priv_ptr, struct dma_desc *p)
286a8372 131{
ce736788
JP
132 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
133 struct stmmac_priv *priv = tx_q->priv_data;
134 unsigned int entry = tx_q->dirty_tx;
96951366
GC
135
136 /* des3 is only used for jumbo frames tx or time stamping */
ce736788
JP
137 if (unlikely(tx_q->tx_skbuff_dma[entry].is_jumbo ||
138 (tx_q->tx_skbuff_dma[entry].last_segment &&
96951366 139 !priv->extend_desc && priv->hwts_tx_en)))
286a8372
GC
140 p->des3 = 0;
141}
142
2c520b1c 143static int set_16kib_bfsize(int mtu)
286a8372
GC
144{
145 int ret = 0;
8137b6ef 146 if (unlikely(mtu > BUF_SIZE_8KiB))
286a8372
GC
147 ret = BUF_SIZE_16KiB;
148 return ret;
149}
150
29896a67 151const struct stmmac_mode_ops ring_mode_ops = {
2c520b1c
JA
152 .is_jumbo_frm = is_jumbo_frm,
153 .jumbo_frm = jumbo_frm,
154 .refill_desc3 = refill_desc3,
155 .init_desc3 = init_desc3,
156 .clean_desc3 = clean_desc3,
157 .set_16kib_bfsize = set_16kib_bfsize,
286a8372 158};