]> git.proxmox.com Git - mirror_qemu.git/blob - hw/net/cadence_gem.c
hw/net/cadence_gem: use FIELD to describe NWCFG register fields
[mirror_qemu.git] / hw / net / cadence_gem.c
1 /*
2 * QEMU Cadence GEM emulation
3 *
4 * Copyright (c) 2011 Xilinx, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26 #include <zlib.h> /* For crc32 */
27
28 #include "hw/irq.h"
29 #include "hw/net/cadence_gem.h"
30 #include "hw/qdev-properties.h"
31 #include "hw/registerfields.h"
32 #include "migration/vmstate.h"
33 #include "qapi/error.h"
34 #include "qemu/log.h"
35 #include "qemu/module.h"
36 #include "sysemu/dma.h"
37 #include "net/checksum.h"
38 #include "net/eth.h"
39
40 #define CADENCE_GEM_ERR_DEBUG 0
41 #define DB_PRINT(...) do {\
42 if (CADENCE_GEM_ERR_DEBUG) { \
43 qemu_log(": %s: ", __func__); \
44 qemu_log(__VA_ARGS__); \
45 } \
46 } while (0)
47
48 REG32(NWCTRL, 0x0) /* Network Control reg */
49 FIELD(NWCTRL, LOOPBACK , 0, 1)
50 FIELD(NWCTRL, LOOPBACK_LOCAL , 1, 1)
51 FIELD(NWCTRL, ENABLE_RECEIVE, 2, 1)
52 FIELD(NWCTRL, ENABLE_TRANSMIT, 3, 1)
53 FIELD(NWCTRL, MAN_PORT_EN , 4, 1)
54 FIELD(NWCTRL, CLEAR_ALL_STATS_REGS , 5, 1)
55 FIELD(NWCTRL, INC_ALL_STATS_REGS, 6, 1)
56 FIELD(NWCTRL, STATS_WRITE_EN, 7, 1)
57 FIELD(NWCTRL, BACK_PRESSURE, 8, 1)
58 FIELD(NWCTRL, TRANSMIT_START , 9, 1)
59 FIELD(NWCTRL, TRANSMIT_HALT, 10, 1)
60 FIELD(NWCTRL, TX_PAUSE_FRAME_RE, 11, 1)
61 FIELD(NWCTRL, TX_PAUSE_FRAME_ZE, 12, 1)
62 FIELD(NWCTRL, STATS_TAKE_SNAP, 13, 1)
63 FIELD(NWCTRL, STATS_READ_SNAP, 14, 1)
64 FIELD(NWCTRL, STORE_RX_TS, 15, 1)
65 FIELD(NWCTRL, PFC_ENABLE, 16, 1)
66 FIELD(NWCTRL, PFC_PRIO_BASED, 17, 1)
67 FIELD(NWCTRL, FLUSH_RX_PKT_PCLK , 18, 1)
68 FIELD(NWCTRL, TX_LPI_EN, 19, 1)
69 FIELD(NWCTRL, PTP_UNICAST_ENA, 20, 1)
70 FIELD(NWCTRL, ALT_SGMII_MODE, 21, 1)
71 FIELD(NWCTRL, STORE_UDP_OFFSET, 22, 1)
72 FIELD(NWCTRL, EXT_TSU_PORT_EN, 23, 1)
73 FIELD(NWCTRL, ONE_STEP_SYNC_MO, 24, 1)
74 FIELD(NWCTRL, PFC_CTRL , 25, 1)
75 FIELD(NWCTRL, EXT_RXQ_SEL_EN , 26, 1)
76 FIELD(NWCTRL, OSS_CORRECTION_FIELD, 27, 1)
77 FIELD(NWCTRL, SEL_MII_ON_RGMII, 28, 1)
78 FIELD(NWCTRL, TWO_PT_FIVE_GIG, 29, 1)
79 FIELD(NWCTRL, IFG_EATS_QAV_CREDIT, 30, 1)
80
81 REG32(NWCFG, 0x4) /* Network Config reg */
82 FIELD(NWCFG, SPEED, 0, 1)
83 FIELD(NWCFG, FULL_DUPLEX, 1, 1)
84 FIELD(NWCFG, DISCARD_NON_VLAN_FRAMES, 2, 1)
85 FIELD(NWCFG, JUMBO_FRAMES, 3, 1)
86 FIELD(NWCFG, PROMISC, 4, 1)
87 FIELD(NWCFG, NO_BROADCAST, 5, 1)
88 FIELD(NWCFG, MULTICAST_HASH_EN, 6, 1)
89 FIELD(NWCFG, UNICAST_HASH_EN, 7, 1)
90 FIELD(NWCFG, RECV_1536_BYTE_FRAMES, 8, 1)
91 FIELD(NWCFG, EXTERNAL_ADDR_MATCH_EN, 9, 1)
92 FIELD(NWCFG, GIGABIT_MODE_ENABLE, 10, 1)
93 FIELD(NWCFG, PCS_SELECT, 11, 1)
94 FIELD(NWCFG, RETRY_TEST, 12, 1)
95 FIELD(NWCFG, PAUSE_ENABLE, 13, 1)
96 FIELD(NWCFG, RECV_BUF_OFFSET, 14, 2)
97 FIELD(NWCFG, LEN_ERR_DISCARD, 16, 1)
98 FIELD(NWCFG, FCS_REMOVE, 17, 1)
99 FIELD(NWCFG, MDC_CLOCK_DIV, 18, 3)
100 FIELD(NWCFG, DATA_BUS_WIDTH, 21, 2)
101 FIELD(NWCFG, DISABLE_COPY_PAUSE_FRAMES, 23, 1)
102 FIELD(NWCFG, RECV_CSUM_OFFLOAD_EN, 24, 1)
103 FIELD(NWCFG, EN_HALF_DUPLEX_RX, 25, 1)
104 FIELD(NWCFG, IGNORE_RX_FCS, 26, 1)
105 FIELD(NWCFG, SGMII_MODE_ENABLE, 27, 1)
106 FIELD(NWCFG, IPG_STRETCH_ENABLE, 28, 1)
107 FIELD(NWCFG, NSP_ACCEPT, 29, 1)
108 FIELD(NWCFG, IGNORE_IPG_RX_ER, 30, 1)
109 FIELD(NWCFG, UNI_DIRECTION_ENABLE, 31, 1)
110
111 REG32(NWSTATUS, 0x8) /* Network Status reg */
112 REG32(USERIO, 0xc) /* User IO reg */
113 REG32(DMACFG, 0x10) /* DMA Control reg */
114 REG32(TXSTATUS, 0x14) /* TX Status reg */
115 REG32(RXQBASE, 0x18) /* RX Q Base address reg */
116 REG32(TXQBASE, 0x1c) /* TX Q Base address reg */
117 REG32(RXSTATUS, 0x20) /* RX Status reg */
118 REG32(ISR, 0x24) /* Interrupt Status reg */
119 REG32(IER, 0x28) /* Interrupt Enable reg */
120 REG32(IDR, 0x2c) /* Interrupt Disable reg */
121 REG32(IMR, 0x30) /* Interrupt Mask reg */
122 REG32(PHYMNTNC, 0x34) /* Phy Maintenance reg */
123 REG32(RXPAUSE, 0x38) /* RX Pause Time reg */
124 REG32(TXPAUSE, 0x3c) /* TX Pause Time reg */
125 REG32(TXPARTIALSF, 0x40) /* TX Partial Store and Forward */
126 REG32(RXPARTIALSF, 0x44) /* RX Partial Store and Forward */
127 REG32(JUMBO_MAX_LEN, 0x48) /* Max Jumbo Frame Size */
128 REG32(HASHLO, 0x80) /* Hash Low address reg */
129 REG32(HASHHI, 0x84) /* Hash High address reg */
130 REG32(SPADDR1LO, 0x88) /* Specific addr 1 low reg */
131 REG32(SPADDR1HI, 0x8c) /* Specific addr 1 high reg */
132 REG32(SPADDR2LO, 0x90) /* Specific addr 2 low reg */
133 REG32(SPADDR2HI, 0x94) /* Specific addr 2 high reg */
134 REG32(SPADDR3LO, 0x98) /* Specific addr 3 low reg */
135 REG32(SPADDR3HI, 0x9c) /* Specific addr 3 high reg */
136 REG32(SPADDR4LO, 0xa0) /* Specific addr 4 low reg */
137 REG32(SPADDR4HI, 0xa4) /* Specific addr 4 high reg */
138 REG32(TIDMATCH1, 0xa8) /* Type ID1 Match reg */
139 REG32(TIDMATCH2, 0xac) /* Type ID2 Match reg */
140 REG32(TIDMATCH3, 0xb0) /* Type ID3 Match reg */
141 REG32(TIDMATCH4, 0xb4) /* Type ID4 Match reg */
142 REG32(WOLAN, 0xb8) /* Wake on LAN reg */
143 REG32(IPGSTRETCH, 0xbc) /* IPG Stretch reg */
144 REG32(SVLAN, 0xc0) /* Stacked VLAN reg */
145 REG32(MODID, 0xfc) /* Module ID reg */
146 REG32(OCTTXLO, 0x100) /* Octects transmitted Low reg */
147 REG32(OCTTXHI, 0x104) /* Octects transmitted High reg */
148 REG32(TXCNT, 0x108) /* Error-free Frames transmitted */
149 REG32(TXBCNT, 0x10c) /* Error-free Broadcast Frames */
150 REG32(TXMCNT, 0x110) /* Error-free Multicast Frame */
151 REG32(TXPAUSECNT, 0x114) /* Pause Frames Transmitted */
152 REG32(TX64CNT, 0x118) /* Error-free 64 TX */
153 REG32(TX65CNT, 0x11c) /* Error-free 65-127 TX */
154 REG32(TX128CNT, 0x120) /* Error-free 128-255 TX */
155 REG32(TX256CNT, 0x124) /* Error-free 256-511 */
156 REG32(TX512CNT, 0x128) /* Error-free 512-1023 TX */
157 REG32(TX1024CNT, 0x12c) /* Error-free 1024-1518 TX */
158 REG32(TX1519CNT, 0x130) /* Error-free larger than 1519 TX */
159 REG32(TXURUNCNT, 0x134) /* TX under run error counter */
160 REG32(SINGLECOLLCNT, 0x138) /* Single Collision Frames */
161 REG32(MULTCOLLCNT, 0x13c) /* Multiple Collision Frames */
162 REG32(EXCESSCOLLCNT, 0x140) /* Excessive Collision Frames */
163 REG32(LATECOLLCNT, 0x144) /* Late Collision Frames */
164 REG32(DEFERTXCNT, 0x148) /* Deferred Transmission Frames */
165 REG32(CSENSECNT, 0x14c) /* Carrier Sense Error Counter */
166 REG32(OCTRXLO, 0x150) /* Octects Received register Low */
167 REG32(OCTRXHI, 0x154) /* Octects Received register High */
168 REG32(RXCNT, 0x158) /* Error-free Frames Received */
169 REG32(RXBROADCNT, 0x15c) /* Error-free Broadcast Frames RX */
170 REG32(RXMULTICNT, 0x160) /* Error-free Multicast Frames RX */
171 REG32(RXPAUSECNT, 0x164) /* Pause Frames Received Counter */
172 REG32(RX64CNT, 0x168) /* Error-free 64 byte Frames RX */
173 REG32(RX65CNT, 0x16c) /* Error-free 65-127B Frames RX */
174 REG32(RX128CNT, 0x170) /* Error-free 128-255B Frames RX */
175 REG32(RX256CNT, 0x174) /* Error-free 256-512B Frames RX */
176 REG32(RX512CNT, 0x178) /* Error-free 512-1023B Frames RX */
177 REG32(RX1024CNT, 0x17c) /* Error-free 1024-1518B Frames RX */
178 REG32(RX1519CNT, 0x180) /* Error-free 1519-max Frames RX */
179 REG32(RXUNDERCNT, 0x184) /* Undersize Frames Received */
180 REG32(RXOVERCNT, 0x188) /* Oversize Frames Received */
181 REG32(RXJABCNT, 0x18c) /* Jabbers Received Counter */
182 REG32(RXFCSCNT, 0x190) /* Frame Check seq. Error Counter */
183 REG32(RXLENERRCNT, 0x194) /* Length Field Error Counter */
184 REG32(RXSYMERRCNT, 0x198) /* Symbol Error Counter */
185 REG32(RXALIGNERRCNT, 0x19c) /* Alignment Error Counter */
186 REG32(RXRSCERRCNT, 0x1a0) /* Receive Resource Error Counter */
187 REG32(RXORUNCNT, 0x1a4) /* Receive Overrun Counter */
188 REG32(RXIPCSERRCNT, 0x1a8) /* IP header Checksum Err Counter */
189 REG32(RXTCPCCNT, 0x1ac) /* TCP Checksum Error Counter */
190 REG32(RXUDPCCNT, 0x1b0) /* UDP Checksum Error Counter */
191
192 REG32(1588S, 0x1d0) /* 1588 Timer Seconds */
193 REG32(1588NS, 0x1d4) /* 1588 Timer Nanoseconds */
194 REG32(1588ADJ, 0x1d8) /* 1588 Timer Adjust */
195 REG32(1588INC, 0x1dc) /* 1588 Timer Increment */
196 REG32(PTPETXS, 0x1e0) /* PTP Event Frame Transmitted (s) */
197 REG32(PTPETXNS, 0x1e4) /* PTP Event Frame Transmitted (ns) */
198 REG32(PTPERXS, 0x1e8) /* PTP Event Frame Received (s) */
199 REG32(PTPERXNS, 0x1ec) /* PTP Event Frame Received (ns) */
200 REG32(PTPPTXS, 0x1e0) /* PTP Peer Frame Transmitted (s) */
201 REG32(PTPPTXNS, 0x1e4) /* PTP Peer Frame Transmitted (ns) */
202 REG32(PTPPRXS, 0x1e8) /* PTP Peer Frame Received (s) */
203 REG32(PTPPRXNS, 0x1ec) /* PTP Peer Frame Received (ns) */
204
205 /* Design Configuration Registers */
206 REG32(DESCONF, 0x280)
207 REG32(DESCONF2, 0x284)
208 REG32(DESCONF3, 0x288)
209 REG32(DESCONF4, 0x28c)
210 REG32(DESCONF5, 0x290)
211 REG32(DESCONF6, 0x294)
212 #define GEM_DESCONF6_64B_MASK (1U << 23)
213 REG32(DESCONF7, 0x298)
214
215 REG32(INT_Q1_STATUS, 0x400)
216 REG32(INT_Q1_MASK, 0x640)
217
218 REG32(TRANSMIT_Q1_PTR, 0x440)
219 REG32(TRANSMIT_Q7_PTR, 0x458)
220
221 REG32(RECEIVE_Q1_PTR, 0x480)
222 REG32(RECEIVE_Q7_PTR, 0x498)
223
224 REG32(TBQPH, 0x4c8)
225 REG32(RBQPH, 0x4d4)
226
227 REG32(INT_Q1_ENABLE, 0x600)
228 REG32(INT_Q7_ENABLE, 0x618)
229
230 REG32(INT_Q1_DISABLE, 0x620)
231 REG32(INT_Q7_DISABLE, 0x638)
232
233 REG32(SCREENING_TYPE1_REG0, 0x500)
234 FIELD(SCREENING_TYPE1_REG0, QUEUE_NUM, 0, 4)
235 FIELD(SCREENING_TYPE1_REG0, DSTC_MATCH, 4, 8)
236 FIELD(SCREENING_TYPE1_REG0, UDP_PORT_MATCH, 12, 16)
237 FIELD(SCREENING_TYPE1_REG0, DSTC_ENABLE, 28, 1)
238 FIELD(SCREENING_TYPE1_REG0, UDP_PORT_MATCH_EN, 29, 1)
239 FIELD(SCREENING_TYPE1_REG0, DROP_ON_MATCH, 30, 1)
240
241 REG32(SCREENING_TYPE2_REG0, 0x540)
242 FIELD(SCREENING_TYPE2_REG0, QUEUE_NUM, 0, 4)
243 FIELD(SCREENING_TYPE2_REG0, VLAN_PRIORITY, 4, 3)
244 FIELD(SCREENING_TYPE2_REG0, VLAN_ENABLE, 8, 1)
245 FIELD(SCREENING_TYPE2_REG0, ETHERTYPE_REG_INDEX, 9, 3)
246 FIELD(SCREENING_TYPE2_REG0, ETHERTYPE_ENABLE, 12, 1)
247 FIELD(SCREENING_TYPE2_REG0, COMPARE_A, 13, 5)
248 FIELD(SCREENING_TYPE2_REG0, COMPARE_A_ENABLE, 18, 1)
249 FIELD(SCREENING_TYPE2_REG0, COMPARE_B, 19, 5)
250 FIELD(SCREENING_TYPE2_REG0, COMPARE_B_ENABLE, 24, 1)
251 FIELD(SCREENING_TYPE2_REG0, COMPARE_C, 25, 5)
252 FIELD(SCREENING_TYPE2_REG0, COMPARE_C_ENABLE, 30, 1)
253 FIELD(SCREENING_TYPE2_REG0, DROP_ON_MATCH, 31, 1)
254
255 REG32(SCREENING_TYPE2_ETHERTYPE_REG0, 0x6e0)
256
257 REG32(TYPE2_COMPARE_0_WORD_0, 0x700)
258 FIELD(TYPE2_COMPARE_0_WORD_0, MASK_VALUE, 0, 16)
259 FIELD(TYPE2_COMPARE_0_WORD_0, COMPARE_VALUE, 16, 16)
260
261 REG32(TYPE2_COMPARE_0_WORD_1, 0x704)
262 FIELD(TYPE2_COMPARE_0_WORD_1, OFFSET_VALUE, 0, 7)
263 FIELD(TYPE2_COMPARE_0_WORD_1, COMPARE_OFFSET, 7, 2)
264 FIELD(TYPE2_COMPARE_0_WORD_1, DISABLE_MASK, 9, 1)
265 FIELD(TYPE2_COMPARE_0_WORD_1, COMPARE_VLAN_ID, 10, 1)
266
267 /*****************************************/
268 #define GEM_DMACFG_ADDR_64B (1U << 30)
269 #define GEM_DMACFG_TX_BD_EXT (1U << 29)
270 #define GEM_DMACFG_RX_BD_EXT (1U << 28)
271 #define GEM_DMACFG_RBUFSZ_M 0x00FF0000 /* DMA RX Buffer Size mask */
272 #define GEM_DMACFG_RBUFSZ_S 16 /* DMA RX Buffer Size shift */
273 #define GEM_DMACFG_RBUFSZ_MUL 64 /* DMA RX Buffer Size multiplier */
274 #define GEM_DMACFG_TXCSUM_OFFL 0x00000800 /* Transmit checksum offload */
275
276 #define GEM_TXSTATUS_TXCMPL 0x00000020 /* Transmit Complete */
277 #define GEM_TXSTATUS_USED 0x00000001 /* sw owned descriptor encountered */
278
279 #define GEM_RXSTATUS_FRMRCVD 0x00000002 /* Frame received */
280 #define GEM_RXSTATUS_NOBUF 0x00000001 /* Buffer unavailable */
281
282 /* GEM_ISR GEM_IER GEM_IDR GEM_IMR */
283 #define GEM_INT_TXCMPL 0x00000080 /* Transmit Complete */
284 #define GEM_INT_AMBA_ERR 0x00000040
285 #define GEM_INT_TXUSED 0x00000008
286 #define GEM_INT_RXUSED 0x00000004
287 #define GEM_INT_RXCMPL 0x00000002
288
289 #define GEM_PHYMNTNC_OP_R 0x20000000 /* read operation */
290 #define GEM_PHYMNTNC_OP_W 0x10000000 /* write operation */
291 #define GEM_PHYMNTNC_ADDR 0x0F800000 /* Address bits */
292 #define GEM_PHYMNTNC_ADDR_SHFT 23
293 #define GEM_PHYMNTNC_REG 0x007C0000 /* register bits */
294 #define GEM_PHYMNTNC_REG_SHIFT 18
295
296 /* Marvell PHY definitions */
297 #define BOARD_PHY_ADDRESS 0 /* PHY address we will emulate a device at */
298
299 #define PHY_REG_CONTROL 0
300 #define PHY_REG_STATUS 1
301 #define PHY_REG_PHYID1 2
302 #define PHY_REG_PHYID2 3
303 #define PHY_REG_ANEGADV 4
304 #define PHY_REG_LINKPABIL 5
305 #define PHY_REG_ANEGEXP 6
306 #define PHY_REG_NEXTP 7
307 #define PHY_REG_LINKPNEXTP 8
308 #define PHY_REG_100BTCTRL 9
309 #define PHY_REG_1000BTSTAT 10
310 #define PHY_REG_EXTSTAT 15
311 #define PHY_REG_PHYSPCFC_CTL 16
312 #define PHY_REG_PHYSPCFC_ST 17
313 #define PHY_REG_INT_EN 18
314 #define PHY_REG_INT_ST 19
315 #define PHY_REG_EXT_PHYSPCFC_CTL 20
316 #define PHY_REG_RXERR 21
317 #define PHY_REG_EACD 22
318 #define PHY_REG_LED 24
319 #define PHY_REG_LED_OVRD 25
320 #define PHY_REG_EXT_PHYSPCFC_CTL2 26
321 #define PHY_REG_EXT_PHYSPCFC_ST 27
322 #define PHY_REG_CABLE_DIAG 28
323
324 #define PHY_REG_CONTROL_RST 0x8000
325 #define PHY_REG_CONTROL_LOOP 0x4000
326 #define PHY_REG_CONTROL_ANEG 0x1000
327 #define PHY_REG_CONTROL_ANRESTART 0x0200
328
329 #define PHY_REG_STATUS_LINK 0x0004
330 #define PHY_REG_STATUS_ANEGCMPL 0x0020
331
332 #define PHY_REG_INT_ST_ANEGCMPL 0x0800
333 #define PHY_REG_INT_ST_LINKC 0x0400
334 #define PHY_REG_INT_ST_ENERGY 0x0010
335
336 /***********************************************************************/
337 #define GEM_RX_REJECT (-1)
338 #define GEM_RX_PROMISCUOUS_ACCEPT (-2)
339 #define GEM_RX_BROADCAST_ACCEPT (-3)
340 #define GEM_RX_MULTICAST_HASH_ACCEPT (-4)
341 #define GEM_RX_UNICAST_HASH_ACCEPT (-5)
342
343 #define GEM_RX_SAR_ACCEPT 0
344
345 /***********************************************************************/
346
347 #define DESC_1_USED 0x80000000
348 #define DESC_1_LENGTH 0x00001FFF
349
350 #define DESC_1_TX_WRAP 0x40000000
351 #define DESC_1_TX_LAST 0x00008000
352
353 #define DESC_0_RX_WRAP 0x00000002
354 #define DESC_0_RX_OWNERSHIP 0x00000001
355
356 #define R_DESC_1_RX_SAR_SHIFT 25
357 #define R_DESC_1_RX_SAR_LENGTH 2
358 #define R_DESC_1_RX_SAR_MATCH (1 << 27)
359 #define R_DESC_1_RX_UNICAST_HASH (1 << 29)
360 #define R_DESC_1_RX_MULTICAST_HASH (1 << 30)
361 #define R_DESC_1_RX_BROADCAST (1 << 31)
362
363 #define DESC_1_RX_SOF 0x00004000
364 #define DESC_1_RX_EOF 0x00008000
365
366 #define GEM_MODID_VALUE 0x00020118
367
368 static inline uint64_t tx_desc_get_buffer(CadenceGEMState *s, uint32_t *desc)
369 {
370 uint64_t ret = desc[0];
371
372 if (s->regs[R_DMACFG] & GEM_DMACFG_ADDR_64B) {
373 ret |= (uint64_t)desc[2] << 32;
374 }
375 return ret;
376 }
377
378 static inline unsigned tx_desc_get_used(uint32_t *desc)
379 {
380 return (desc[1] & DESC_1_USED) ? 1 : 0;
381 }
382
383 static inline void tx_desc_set_used(uint32_t *desc)
384 {
385 desc[1] |= DESC_1_USED;
386 }
387
388 static inline unsigned tx_desc_get_wrap(uint32_t *desc)
389 {
390 return (desc[1] & DESC_1_TX_WRAP) ? 1 : 0;
391 }
392
393 static inline unsigned tx_desc_get_last(uint32_t *desc)
394 {
395 return (desc[1] & DESC_1_TX_LAST) ? 1 : 0;
396 }
397
398 static inline unsigned tx_desc_get_length(uint32_t *desc)
399 {
400 return desc[1] & DESC_1_LENGTH;
401 }
402
403 static inline void print_gem_tx_desc(uint32_t *desc, uint8_t queue)
404 {
405 DB_PRINT("TXDESC (queue %" PRId8 "):\n", queue);
406 DB_PRINT("bufaddr: 0x%08x\n", *desc);
407 DB_PRINT("used_hw: %d\n", tx_desc_get_used(desc));
408 DB_PRINT("wrap: %d\n", tx_desc_get_wrap(desc));
409 DB_PRINT("last: %d\n", tx_desc_get_last(desc));
410 DB_PRINT("length: %d\n", tx_desc_get_length(desc));
411 }
412
413 static inline uint64_t rx_desc_get_buffer(CadenceGEMState *s, uint32_t *desc)
414 {
415 uint64_t ret = desc[0] & ~0x3UL;
416
417 if (s->regs[R_DMACFG] & GEM_DMACFG_ADDR_64B) {
418 ret |= (uint64_t)desc[2] << 32;
419 }
420 return ret;
421 }
422
423 static inline int gem_get_desc_len(CadenceGEMState *s, bool rx_n_tx)
424 {
425 int ret = 2;
426
427 if (s->regs[R_DMACFG] & GEM_DMACFG_ADDR_64B) {
428 ret += 2;
429 }
430 if (s->regs[R_DMACFG] & (rx_n_tx ? GEM_DMACFG_RX_BD_EXT
431 : GEM_DMACFG_TX_BD_EXT)) {
432 ret += 2;
433 }
434
435 assert(ret <= DESC_MAX_NUM_WORDS);
436 return ret;
437 }
438
439 static inline unsigned rx_desc_get_wrap(uint32_t *desc)
440 {
441 return desc[0] & DESC_0_RX_WRAP ? 1 : 0;
442 }
443
444 static inline unsigned rx_desc_get_ownership(uint32_t *desc)
445 {
446 return desc[0] & DESC_0_RX_OWNERSHIP ? 1 : 0;
447 }
448
449 static inline void rx_desc_set_ownership(uint32_t *desc)
450 {
451 desc[0] |= DESC_0_RX_OWNERSHIP;
452 }
453
454 static inline void rx_desc_set_sof(uint32_t *desc)
455 {
456 desc[1] |= DESC_1_RX_SOF;
457 }
458
459 static inline void rx_desc_clear_control(uint32_t *desc)
460 {
461 desc[1] = 0;
462 }
463
464 static inline void rx_desc_set_eof(uint32_t *desc)
465 {
466 desc[1] |= DESC_1_RX_EOF;
467 }
468
469 static inline void rx_desc_set_length(uint32_t *desc, unsigned len)
470 {
471 desc[1] &= ~DESC_1_LENGTH;
472 desc[1] |= len;
473 }
474
475 static inline void rx_desc_set_broadcast(uint32_t *desc)
476 {
477 desc[1] |= R_DESC_1_RX_BROADCAST;
478 }
479
480 static inline void rx_desc_set_unicast_hash(uint32_t *desc)
481 {
482 desc[1] |= R_DESC_1_RX_UNICAST_HASH;
483 }
484
485 static inline void rx_desc_set_multicast_hash(uint32_t *desc)
486 {
487 desc[1] |= R_DESC_1_RX_MULTICAST_HASH;
488 }
489
490 static inline void rx_desc_set_sar(uint32_t *desc, int sar_idx)
491 {
492 desc[1] = deposit32(desc[1], R_DESC_1_RX_SAR_SHIFT, R_DESC_1_RX_SAR_LENGTH,
493 sar_idx);
494 desc[1] |= R_DESC_1_RX_SAR_MATCH;
495 }
496
497 /* The broadcast MAC address: 0xFFFFFFFFFFFF */
498 static const uint8_t broadcast_addr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
499
500 static uint32_t gem_get_max_buf_len(CadenceGEMState *s, bool tx)
501 {
502 uint32_t size;
503 if (FIELD_EX32(s->regs[R_NWCFG], NWCFG, JUMBO_FRAMES)) {
504 size = s->regs[R_JUMBO_MAX_LEN];
505 if (size > s->jumbo_max_len) {
506 size = s->jumbo_max_len;
507 qemu_log_mask(LOG_GUEST_ERROR, "GEM_JUMBO_MAX_LEN reg cannot be"
508 " greater than 0x%" PRIx32 "\n", s->jumbo_max_len);
509 }
510 } else if (tx) {
511 size = 1518;
512 } else {
513 size = FIELD_EX32(s->regs[R_NWCFG],
514 NWCFG, RECV_1536_BYTE_FRAMES) ? 1538 : 1518;
515 }
516 return size;
517 }
518
519 static void gem_set_isr(CadenceGEMState *s, int q, uint32_t flag)
520 {
521 if (q == 0) {
522 s->regs[R_ISR] |= flag & ~(s->regs[R_IMR]);
523 } else {
524 s->regs[R_INT_Q1_STATUS + q - 1] |= flag &
525 ~(s->regs[R_INT_Q1_MASK + q - 1]);
526 }
527 }
528
529 /*
530 * gem_init_register_masks:
531 * One time initialization.
532 * Set masks to identify which register bits have magical clear properties
533 */
534 static void gem_init_register_masks(CadenceGEMState *s)
535 {
536 unsigned int i;
537 /* Mask of register bits which are read only */
538 memset(&s->regs_ro[0], 0, sizeof(s->regs_ro));
539 s->regs_ro[R_NWCTRL] = 0xFFF80000;
540 s->regs_ro[R_NWSTATUS] = 0xFFFFFFFF;
541 s->regs_ro[R_DMACFG] = 0x8E00F000;
542 s->regs_ro[R_TXSTATUS] = 0xFFFFFE08;
543 s->regs_ro[R_RXQBASE] = 0x00000003;
544 s->regs_ro[R_TXQBASE] = 0x00000003;
545 s->regs_ro[R_RXSTATUS] = 0xFFFFFFF0;
546 s->regs_ro[R_ISR] = 0xFFFFFFFF;
547 s->regs_ro[R_IMR] = 0xFFFFFFFF;
548 s->regs_ro[R_MODID] = 0xFFFFFFFF;
549 for (i = 0; i < s->num_priority_queues; i++) {
550 s->regs_ro[R_INT_Q1_STATUS + i] = 0xFFFFFFFF;
551 s->regs_ro[R_INT_Q1_ENABLE + i] = 0xFFFFF319;
552 s->regs_ro[R_INT_Q1_DISABLE + i] = 0xFFFFF319;
553 s->regs_ro[R_INT_Q1_MASK + i] = 0xFFFFFFFF;
554 }
555
556 /* Mask of register bits which are clear on read */
557 memset(&s->regs_rtc[0], 0, sizeof(s->regs_rtc));
558 s->regs_rtc[R_ISR] = 0xFFFFFFFF;
559 for (i = 0; i < s->num_priority_queues; i++) {
560 s->regs_rtc[R_INT_Q1_STATUS + i] = 0x00000CE6;
561 }
562
563 /* Mask of register bits which are write 1 to clear */
564 memset(&s->regs_w1c[0], 0, sizeof(s->regs_w1c));
565 s->regs_w1c[R_TXSTATUS] = 0x000001F7;
566 s->regs_w1c[R_RXSTATUS] = 0x0000000F;
567
568 /* Mask of register bits which are write only */
569 memset(&s->regs_wo[0], 0, sizeof(s->regs_wo));
570 s->regs_wo[R_NWCTRL] = 0x00073E60;
571 s->regs_wo[R_IER] = 0x07FFFFFF;
572 s->regs_wo[R_IDR] = 0x07FFFFFF;
573 for (i = 0; i < s->num_priority_queues; i++) {
574 s->regs_wo[R_INT_Q1_ENABLE + i] = 0x00000CE6;
575 s->regs_wo[R_INT_Q1_DISABLE + i] = 0x00000CE6;
576 }
577 }
578
579 /*
580 * phy_update_link:
581 * Make the emulated PHY link state match the QEMU "interface" state.
582 */
583 static void phy_update_link(CadenceGEMState *s)
584 {
585 DB_PRINT("down %d\n", qemu_get_queue(s->nic)->link_down);
586
587 /* Autonegotiation status mirrors link status. */
588 if (qemu_get_queue(s->nic)->link_down) {
589 s->phy_regs[PHY_REG_STATUS] &= ~(PHY_REG_STATUS_ANEGCMPL |
590 PHY_REG_STATUS_LINK);
591 s->phy_regs[PHY_REG_INT_ST] |= PHY_REG_INT_ST_LINKC;
592 } else {
593 s->phy_regs[PHY_REG_STATUS] |= (PHY_REG_STATUS_ANEGCMPL |
594 PHY_REG_STATUS_LINK);
595 s->phy_regs[PHY_REG_INT_ST] |= (PHY_REG_INT_ST_LINKC |
596 PHY_REG_INT_ST_ANEGCMPL |
597 PHY_REG_INT_ST_ENERGY);
598 }
599 }
600
601 static bool gem_can_receive(NetClientState *nc)
602 {
603 CadenceGEMState *s;
604 int i;
605
606 s = qemu_get_nic_opaque(nc);
607
608 /* Do nothing if receive is not enabled. */
609 if (!FIELD_EX32(s->regs[R_NWCTRL], NWCTRL, ENABLE_RECEIVE)) {
610 if (s->can_rx_state != 1) {
611 s->can_rx_state = 1;
612 DB_PRINT("can't receive - no enable\n");
613 }
614 return false;
615 }
616
617 for (i = 0; i < s->num_priority_queues; i++) {
618 if (rx_desc_get_ownership(s->rx_desc[i]) != 1) {
619 break;
620 }
621 };
622
623 if (i == s->num_priority_queues) {
624 if (s->can_rx_state != 2) {
625 s->can_rx_state = 2;
626 DB_PRINT("can't receive - all the buffer descriptors are busy\n");
627 }
628 return false;
629 }
630
631 if (s->can_rx_state != 0) {
632 s->can_rx_state = 0;
633 DB_PRINT("can receive\n");
634 }
635 return true;
636 }
637
638 /*
639 * gem_update_int_status:
640 * Raise or lower interrupt based on current status.
641 */
642 static void gem_update_int_status(CadenceGEMState *s)
643 {
644 int i;
645
646 qemu_set_irq(s->irq[0], !!s->regs[R_ISR]);
647
648 for (i = 1; i < s->num_priority_queues; ++i) {
649 qemu_set_irq(s->irq[i], !!s->regs[R_INT_Q1_STATUS + i - 1]);
650 }
651 }
652
653 /*
654 * gem_receive_updatestats:
655 * Increment receive statistics.
656 */
657 static void gem_receive_updatestats(CadenceGEMState *s, const uint8_t *packet,
658 unsigned bytes)
659 {
660 uint64_t octets;
661
662 /* Total octets (bytes) received */
663 octets = ((uint64_t)(s->regs[R_OCTRXLO]) << 32) |
664 s->regs[R_OCTRXHI];
665 octets += bytes;
666 s->regs[R_OCTRXLO] = octets >> 32;
667 s->regs[R_OCTRXHI] = octets;
668
669 /* Error-free Frames received */
670 s->regs[R_RXCNT]++;
671
672 /* Error-free Broadcast Frames counter */
673 if (!memcmp(packet, broadcast_addr, 6)) {
674 s->regs[R_RXBROADCNT]++;
675 }
676
677 /* Error-free Multicast Frames counter */
678 if (packet[0] == 0x01) {
679 s->regs[R_RXMULTICNT]++;
680 }
681
682 if (bytes <= 64) {
683 s->regs[R_RX64CNT]++;
684 } else if (bytes <= 127) {
685 s->regs[R_RX65CNT]++;
686 } else if (bytes <= 255) {
687 s->regs[R_RX128CNT]++;
688 } else if (bytes <= 511) {
689 s->regs[R_RX256CNT]++;
690 } else if (bytes <= 1023) {
691 s->regs[R_RX512CNT]++;
692 } else if (bytes <= 1518) {
693 s->regs[R_RX1024CNT]++;
694 } else {
695 s->regs[R_RX1519CNT]++;
696 }
697 }
698
699 /*
700 * Get the MAC Address bit from the specified position
701 */
702 static unsigned get_bit(const uint8_t *mac, unsigned bit)
703 {
704 unsigned byte;
705
706 byte = mac[bit / 8];
707 byte >>= (bit & 0x7);
708 byte &= 1;
709
710 return byte;
711 }
712
713 /*
714 * Calculate a GEM MAC Address hash index
715 */
716 static unsigned calc_mac_hash(const uint8_t *mac)
717 {
718 int index_bit, mac_bit;
719 unsigned hash_index;
720
721 hash_index = 0;
722 mac_bit = 5;
723 for (index_bit = 5; index_bit >= 0; index_bit--) {
724 hash_index |= (get_bit(mac, mac_bit) ^
725 get_bit(mac, mac_bit + 6) ^
726 get_bit(mac, mac_bit + 12) ^
727 get_bit(mac, mac_bit + 18) ^
728 get_bit(mac, mac_bit + 24) ^
729 get_bit(mac, mac_bit + 30) ^
730 get_bit(mac, mac_bit + 36) ^
731 get_bit(mac, mac_bit + 42)) << index_bit;
732 mac_bit--;
733 }
734
735 return hash_index;
736 }
737
738 /*
739 * gem_mac_address_filter:
740 * Accept or reject this destination address?
741 * Returns:
742 * GEM_RX_REJECT: reject
743 * >= 0: Specific address accept (which matched SAR is returned)
744 * others for various other modes of accept:
745 * GEM_RM_PROMISCUOUS_ACCEPT, GEM_RX_BROADCAST_ACCEPT,
746 * GEM_RX_MULTICAST_HASH_ACCEPT or GEM_RX_UNICAST_HASH_ACCEPT
747 */
748 static int gem_mac_address_filter(CadenceGEMState *s, const uint8_t *packet)
749 {
750 uint8_t *gem_spaddr;
751 int i, is_mc;
752
753 /* Promiscuous mode? */
754 if (FIELD_EX32(s->regs[R_NWCFG], NWCFG, PROMISC)) {
755 return GEM_RX_PROMISCUOUS_ACCEPT;
756 }
757
758 if (!memcmp(packet, broadcast_addr, 6)) {
759 /* Reject broadcast packets? */
760 if (FIELD_EX32(s->regs[R_NWCFG], NWCFG, NO_BROADCAST)) {
761 return GEM_RX_REJECT;
762 }
763 return GEM_RX_BROADCAST_ACCEPT;
764 }
765
766 /* Accept packets -w- hash match? */
767 is_mc = is_multicast_ether_addr(packet);
768 if ((is_mc && (FIELD_EX32(s->regs[R_NWCFG], NWCFG, MULTICAST_HASH_EN))) ||
769 (!is_mc && FIELD_EX32(s->regs[R_NWCFG], NWCFG, UNICAST_HASH_EN))) {
770 uint64_t buckets;
771 unsigned hash_index;
772
773 hash_index = calc_mac_hash(packet);
774 buckets = ((uint64_t)s->regs[R_HASHHI] << 32) | s->regs[R_HASHLO];
775 if ((buckets >> hash_index) & 1) {
776 return is_mc ? GEM_RX_MULTICAST_HASH_ACCEPT
777 : GEM_RX_UNICAST_HASH_ACCEPT;
778 }
779 }
780
781 /* Check all 4 specific addresses */
782 gem_spaddr = (uint8_t *)&(s->regs[R_SPADDR1LO]);
783 for (i = 3; i >= 0; i--) {
784 if (s->sar_active[i] && !memcmp(packet, gem_spaddr + 8 * i, 6)) {
785 return GEM_RX_SAR_ACCEPT + i;
786 }
787 }
788
789 /* No address match; reject the packet */
790 return GEM_RX_REJECT;
791 }
792
793 /* Figure out which queue the received data should be sent to */
794 static int get_queue_from_screen(CadenceGEMState *s, uint8_t *rxbuf_ptr,
795 unsigned rxbufsize)
796 {
797 uint32_t reg;
798 bool matched, mismatched;
799 int i, j;
800
801 for (i = 0; i < s->num_type1_screeners; i++) {
802 reg = s->regs[R_SCREENING_TYPE1_REG0 + i];
803 matched = false;
804 mismatched = false;
805
806 /* Screening is based on UDP Port */
807 if (FIELD_EX32(reg, SCREENING_TYPE1_REG0, UDP_PORT_MATCH_EN)) {
808 uint16_t udp_port = rxbuf_ptr[14 + 22] << 8 | rxbuf_ptr[14 + 23];
809 if (udp_port == FIELD_EX32(reg, SCREENING_TYPE1_REG0, UDP_PORT_MATCH)) {
810 matched = true;
811 } else {
812 mismatched = true;
813 }
814 }
815
816 /* Screening is based on DS/TC */
817 if (FIELD_EX32(reg, SCREENING_TYPE1_REG0, DSTC_ENABLE)) {
818 uint8_t dscp = rxbuf_ptr[14 + 1];
819 if (dscp == FIELD_EX32(reg, SCREENING_TYPE1_REG0, DSTC_MATCH)) {
820 matched = true;
821 } else {
822 mismatched = true;
823 }
824 }
825
826 if (matched && !mismatched) {
827 return FIELD_EX32(reg, SCREENING_TYPE1_REG0, QUEUE_NUM);
828 }
829 }
830
831 for (i = 0; i < s->num_type2_screeners; i++) {
832 reg = s->regs[R_SCREENING_TYPE2_REG0 + i];
833 matched = false;
834 mismatched = false;
835
836 if (FIELD_EX32(reg, SCREENING_TYPE2_REG0, ETHERTYPE_ENABLE)) {
837 uint16_t type = rxbuf_ptr[12] << 8 | rxbuf_ptr[13];
838 int et_idx = FIELD_EX32(reg, SCREENING_TYPE2_REG0,
839 ETHERTYPE_REG_INDEX);
840
841 if (et_idx > s->num_type2_screeners) {
842 qemu_log_mask(LOG_GUEST_ERROR, "Out of range ethertype "
843 "register index: %d\n", et_idx);
844 }
845 if (type == s->regs[R_SCREENING_TYPE2_ETHERTYPE_REG0 +
846 et_idx]) {
847 matched = true;
848 } else {
849 mismatched = true;
850 }
851 }
852
853 /* Compare A, B, C */
854 for (j = 0; j < 3; j++) {
855 uint32_t cr0, cr1, mask, compare;
856 uint16_t rx_cmp;
857 int offset;
858 int cr_idx = extract32(reg, R_SCREENING_TYPE2_REG0_COMPARE_A_SHIFT + j * 6,
859 R_SCREENING_TYPE2_REG0_COMPARE_A_LENGTH);
860
861 if (!extract32(reg, R_SCREENING_TYPE2_REG0_COMPARE_A_ENABLE_SHIFT + j * 6,
862 R_SCREENING_TYPE2_REG0_COMPARE_A_ENABLE_LENGTH)) {
863 continue;
864 }
865
866 if (cr_idx > s->num_type2_screeners) {
867 qemu_log_mask(LOG_GUEST_ERROR, "Out of range compare "
868 "register index: %d\n", cr_idx);
869 }
870
871 cr0 = s->regs[R_TYPE2_COMPARE_0_WORD_0 + cr_idx * 2];
872 cr1 = s->regs[R_TYPE2_COMPARE_0_WORD_1 + cr_idx * 2];
873 offset = FIELD_EX32(cr1, TYPE2_COMPARE_0_WORD_1, OFFSET_VALUE);
874
875 switch (FIELD_EX32(cr1, TYPE2_COMPARE_0_WORD_1, COMPARE_OFFSET)) {
876 case 3: /* Skip UDP header */
877 qemu_log_mask(LOG_UNIMP, "TCP compare offsets"
878 "unimplemented - assuming UDP\n");
879 offset += 8;
880 /* Fallthrough */
881 case 2: /* skip the IP header */
882 offset += 20;
883 /* Fallthrough */
884 case 1: /* Count from after the ethertype */
885 offset += 14;
886 break;
887 case 0:
888 /* Offset from start of frame */
889 break;
890 }
891
892 rx_cmp = rxbuf_ptr[offset] << 8 | rxbuf_ptr[offset];
893 mask = FIELD_EX32(cr0, TYPE2_COMPARE_0_WORD_0, MASK_VALUE);
894 compare = FIELD_EX32(cr0, TYPE2_COMPARE_0_WORD_0, COMPARE_VALUE);
895
896 if ((rx_cmp & mask) == (compare & mask)) {
897 matched = true;
898 } else {
899 mismatched = true;
900 }
901 }
902
903 if (matched && !mismatched) {
904 return FIELD_EX32(reg, SCREENING_TYPE2_REG0, QUEUE_NUM);
905 }
906 }
907
908 /* We made it here, assume it's queue 0 */
909 return 0;
910 }
911
912 static uint32_t gem_get_queue_base_addr(CadenceGEMState *s, bool tx, int q)
913 {
914 uint32_t base_addr = 0;
915
916 switch (q) {
917 case 0:
918 base_addr = s->regs[tx ? R_TXQBASE : R_RXQBASE];
919 break;
920 case 1 ... (MAX_PRIORITY_QUEUES - 1):
921 base_addr = s->regs[(tx ? R_TRANSMIT_Q1_PTR :
922 R_RECEIVE_Q1_PTR) + q - 1];
923 break;
924 default:
925 g_assert_not_reached();
926 };
927
928 return base_addr;
929 }
930
931 static inline uint32_t gem_get_tx_queue_base_addr(CadenceGEMState *s, int q)
932 {
933 return gem_get_queue_base_addr(s, true, q);
934 }
935
936 static inline uint32_t gem_get_rx_queue_base_addr(CadenceGEMState *s, int q)
937 {
938 return gem_get_queue_base_addr(s, false, q);
939 }
940
941 static hwaddr gem_get_desc_addr(CadenceGEMState *s, bool tx, int q)
942 {
943 hwaddr desc_addr = 0;
944
945 if (s->regs[R_DMACFG] & GEM_DMACFG_ADDR_64B) {
946 desc_addr = s->regs[tx ? R_TBQPH : R_RBQPH];
947 }
948 desc_addr <<= 32;
949 desc_addr |= tx ? s->tx_desc_addr[q] : s->rx_desc_addr[q];
950 return desc_addr;
951 }
952
953 static hwaddr gem_get_tx_desc_addr(CadenceGEMState *s, int q)
954 {
955 return gem_get_desc_addr(s, true, q);
956 }
957
958 static hwaddr gem_get_rx_desc_addr(CadenceGEMState *s, int q)
959 {
960 return gem_get_desc_addr(s, false, q);
961 }
962
963 static void gem_get_rx_desc(CadenceGEMState *s, int q)
964 {
965 hwaddr desc_addr = gem_get_rx_desc_addr(s, q);
966
967 DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", desc_addr);
968
969 /* read current descriptor */
970 address_space_read(&s->dma_as, desc_addr, MEMTXATTRS_UNSPECIFIED,
971 s->rx_desc[q],
972 sizeof(uint32_t) * gem_get_desc_len(s, true));
973
974 /* Descriptor owned by software ? */
975 if (rx_desc_get_ownership(s->rx_desc[q]) == 1) {
976 DB_PRINT("descriptor 0x%" HWADDR_PRIx " owned by sw.\n", desc_addr);
977 s->regs[R_RXSTATUS] |= GEM_RXSTATUS_NOBUF;
978 gem_set_isr(s, q, GEM_INT_RXUSED);
979 /* Handle interrupt consequences */
980 gem_update_int_status(s);
981 }
982 }
983
984 /*
985 * gem_receive:
986 * Fit a packet handed to us by QEMU into the receive descriptor ring.
987 */
988 static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size)
989 {
990 CadenceGEMState *s = qemu_get_nic_opaque(nc);
991 unsigned rxbufsize, bytes_to_copy;
992 unsigned rxbuf_offset;
993 uint8_t *rxbuf_ptr;
994 bool first_desc = true;
995 int maf;
996 int q = 0;
997
998 /* Is this destination MAC address "for us" ? */
999 maf = gem_mac_address_filter(s, buf);
1000 if (maf == GEM_RX_REJECT) {
1001 return size; /* no, drop silently b/c it's not an error */
1002 }
1003
1004 /* Discard packets with receive length error enabled ? */
1005 if (FIELD_EX32(s->regs[R_NWCFG], NWCFG, LEN_ERR_DISCARD)) {
1006 unsigned type_len;
1007
1008 /* Fish the ethertype / length field out of the RX packet */
1009 type_len = buf[12] << 8 | buf[13];
1010 /* It is a length field, not an ethertype */
1011 if (type_len < 0x600) {
1012 if (size < type_len) {
1013 /* discard */
1014 return -1;
1015 }
1016 }
1017 }
1018
1019 /*
1020 * Determine configured receive buffer offset (probably 0)
1021 */
1022 rxbuf_offset = FIELD_EX32(s->regs[R_NWCFG], NWCFG, RECV_BUF_OFFSET);
1023
1024 /* The configure size of each receive buffer. Determines how many
1025 * buffers needed to hold this packet.
1026 */
1027 rxbufsize = ((s->regs[R_DMACFG] & GEM_DMACFG_RBUFSZ_M) >>
1028 GEM_DMACFG_RBUFSZ_S) * GEM_DMACFG_RBUFSZ_MUL;
1029 bytes_to_copy = size;
1030
1031 /* Hardware allows a zero value here but warns against it. To avoid QEMU
1032 * indefinite loops we enforce a minimum value here
1033 */
1034 if (rxbufsize < GEM_DMACFG_RBUFSZ_MUL) {
1035 rxbufsize = GEM_DMACFG_RBUFSZ_MUL;
1036 }
1037
1038 /* Pad to minimum length. Assume FCS field is stripped, logic
1039 * below will increment it to the real minimum of 64 when
1040 * not FCS stripping
1041 */
1042 if (size < 60) {
1043 size = 60;
1044 }
1045
1046 /* Strip of FCS field ? (usually yes) */
1047 if (FIELD_EX32(s->regs[R_NWCFG], NWCFG, FCS_REMOVE)) {
1048 rxbuf_ptr = (void *)buf;
1049 } else {
1050 unsigned crc_val;
1051
1052 if (size > MAX_FRAME_SIZE - sizeof(crc_val)) {
1053 size = MAX_FRAME_SIZE - sizeof(crc_val);
1054 }
1055 bytes_to_copy = size;
1056 /* The application wants the FCS field, which QEMU does not provide.
1057 * We must try and calculate one.
1058 */
1059
1060 memcpy(s->rx_packet, buf, size);
1061 memset(s->rx_packet + size, 0, MAX_FRAME_SIZE - size);
1062 rxbuf_ptr = s->rx_packet;
1063 crc_val = cpu_to_le32(crc32(0, s->rx_packet, MAX(size, 60)));
1064 memcpy(s->rx_packet + size, &crc_val, sizeof(crc_val));
1065
1066 bytes_to_copy += 4;
1067 size += 4;
1068 }
1069
1070 DB_PRINT("config bufsize: %u packet size: %zd\n", rxbufsize, size);
1071
1072 /* Find which queue we are targeting */
1073 q = get_queue_from_screen(s, rxbuf_ptr, rxbufsize);
1074
1075 if (size > gem_get_max_buf_len(s, false)) {
1076 qemu_log_mask(LOG_GUEST_ERROR, "rx frame too long\n");
1077 gem_set_isr(s, q, GEM_INT_AMBA_ERR);
1078 return -1;
1079 }
1080
1081 while (bytes_to_copy) {
1082 hwaddr desc_addr;
1083
1084 /* Do nothing if receive is not enabled. */
1085 if (!gem_can_receive(nc)) {
1086 return -1;
1087 }
1088
1089 DB_PRINT("copy %" PRIu32 " bytes to 0x%" PRIx64 "\n",
1090 MIN(bytes_to_copy, rxbufsize),
1091 rx_desc_get_buffer(s, s->rx_desc[q]));
1092
1093 /* Copy packet data to emulated DMA buffer */
1094 address_space_write(&s->dma_as, rx_desc_get_buffer(s, s->rx_desc[q]) +
1095 rxbuf_offset,
1096 MEMTXATTRS_UNSPECIFIED, rxbuf_ptr,
1097 MIN(bytes_to_copy, rxbufsize));
1098 rxbuf_ptr += MIN(bytes_to_copy, rxbufsize);
1099 bytes_to_copy -= MIN(bytes_to_copy, rxbufsize);
1100
1101 rx_desc_clear_control(s->rx_desc[q]);
1102
1103 /* Update the descriptor. */
1104 if (first_desc) {
1105 rx_desc_set_sof(s->rx_desc[q]);
1106 first_desc = false;
1107 }
1108 if (bytes_to_copy == 0) {
1109 rx_desc_set_eof(s->rx_desc[q]);
1110 rx_desc_set_length(s->rx_desc[q], size);
1111 }
1112 rx_desc_set_ownership(s->rx_desc[q]);
1113
1114 switch (maf) {
1115 case GEM_RX_PROMISCUOUS_ACCEPT:
1116 break;
1117 case GEM_RX_BROADCAST_ACCEPT:
1118 rx_desc_set_broadcast(s->rx_desc[q]);
1119 break;
1120 case GEM_RX_UNICAST_HASH_ACCEPT:
1121 rx_desc_set_unicast_hash(s->rx_desc[q]);
1122 break;
1123 case GEM_RX_MULTICAST_HASH_ACCEPT:
1124 rx_desc_set_multicast_hash(s->rx_desc[q]);
1125 break;
1126 case GEM_RX_REJECT:
1127 abort();
1128 default: /* SAR */
1129 rx_desc_set_sar(s->rx_desc[q], maf);
1130 }
1131
1132 /* Descriptor write-back. */
1133 desc_addr = gem_get_rx_desc_addr(s, q);
1134 address_space_write(&s->dma_as, desc_addr, MEMTXATTRS_UNSPECIFIED,
1135 s->rx_desc[q],
1136 sizeof(uint32_t) * gem_get_desc_len(s, true));
1137
1138 /* Next descriptor */
1139 if (rx_desc_get_wrap(s->rx_desc[q])) {
1140 DB_PRINT("wrapping RX descriptor list\n");
1141 s->rx_desc_addr[q] = gem_get_rx_queue_base_addr(s, q);
1142 } else {
1143 DB_PRINT("incrementing RX descriptor list\n");
1144 s->rx_desc_addr[q] += 4 * gem_get_desc_len(s, true);
1145 }
1146
1147 gem_get_rx_desc(s, q);
1148 }
1149
1150 /* Count it */
1151 gem_receive_updatestats(s, buf, size);
1152
1153 s->regs[R_RXSTATUS] |= GEM_RXSTATUS_FRMRCVD;
1154 gem_set_isr(s, q, GEM_INT_RXCMPL);
1155
1156 /* Handle interrupt consequences */
1157 gem_update_int_status(s);
1158
1159 return size;
1160 }
1161
1162 /*
1163 * gem_transmit_updatestats:
1164 * Increment transmit statistics.
1165 */
1166 static void gem_transmit_updatestats(CadenceGEMState *s, const uint8_t *packet,
1167 unsigned bytes)
1168 {
1169 uint64_t octets;
1170
1171 /* Total octets (bytes) transmitted */
1172 octets = ((uint64_t)(s->regs[R_OCTTXLO]) << 32) |
1173 s->regs[R_OCTTXHI];
1174 octets += bytes;
1175 s->regs[R_OCTTXLO] = octets >> 32;
1176 s->regs[R_OCTTXHI] = octets;
1177
1178 /* Error-free Frames transmitted */
1179 s->regs[R_TXCNT]++;
1180
1181 /* Error-free Broadcast Frames counter */
1182 if (!memcmp(packet, broadcast_addr, 6)) {
1183 s->regs[R_TXBCNT]++;
1184 }
1185
1186 /* Error-free Multicast Frames counter */
1187 if (packet[0] == 0x01) {
1188 s->regs[R_TXMCNT]++;
1189 }
1190
1191 if (bytes <= 64) {
1192 s->regs[R_TX64CNT]++;
1193 } else if (bytes <= 127) {
1194 s->regs[R_TX65CNT]++;
1195 } else if (bytes <= 255) {
1196 s->regs[R_TX128CNT]++;
1197 } else if (bytes <= 511) {
1198 s->regs[R_TX256CNT]++;
1199 } else if (bytes <= 1023) {
1200 s->regs[R_TX512CNT]++;
1201 } else if (bytes <= 1518) {
1202 s->regs[R_TX1024CNT]++;
1203 } else {
1204 s->regs[R_TX1519CNT]++;
1205 }
1206 }
1207
1208 /*
1209 * gem_transmit:
1210 * Fish packets out of the descriptor ring and feed them to QEMU
1211 */
1212 static void gem_transmit(CadenceGEMState *s)
1213 {
1214 uint32_t desc[DESC_MAX_NUM_WORDS];
1215 hwaddr packet_desc_addr;
1216 uint8_t *p;
1217 unsigned total_bytes;
1218 int q = 0;
1219
1220 /* Do nothing if transmit is not enabled. */
1221 if (!FIELD_EX32(s->regs[R_NWCTRL], NWCTRL, ENABLE_TRANSMIT)) {
1222 return;
1223 }
1224
1225 DB_PRINT("\n");
1226
1227 /* The packet we will hand off to QEMU.
1228 * Packets scattered across multiple descriptors are gathered to this
1229 * one contiguous buffer first.
1230 */
1231 p = s->tx_packet;
1232 total_bytes = 0;
1233
1234 for (q = s->num_priority_queues - 1; q >= 0; q--) {
1235 /* read current descriptor */
1236 packet_desc_addr = gem_get_tx_desc_addr(s, q);
1237
1238 DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr);
1239 address_space_read(&s->dma_as, packet_desc_addr,
1240 MEMTXATTRS_UNSPECIFIED, desc,
1241 sizeof(uint32_t) * gem_get_desc_len(s, false));
1242 /* Handle all descriptors owned by hardware */
1243 while (tx_desc_get_used(desc) == 0) {
1244
1245 /* Do nothing if transmit is not enabled. */
1246 if (!FIELD_EX32(s->regs[R_NWCTRL], NWCTRL, ENABLE_TRANSMIT)) {
1247 return;
1248 }
1249 print_gem_tx_desc(desc, q);
1250
1251 /* The real hardware would eat this (and possibly crash).
1252 * For QEMU let's lend a helping hand.
1253 */
1254 if ((tx_desc_get_buffer(s, desc) == 0) ||
1255 (tx_desc_get_length(desc) == 0)) {
1256 DB_PRINT("Invalid TX descriptor @ 0x%" HWADDR_PRIx "\n",
1257 packet_desc_addr);
1258 break;
1259 }
1260
1261 if (tx_desc_get_length(desc) > gem_get_max_buf_len(s, true) -
1262 (p - s->tx_packet)) {
1263 qemu_log_mask(LOG_GUEST_ERROR, "TX descriptor @ 0x%" \
1264 HWADDR_PRIx " too large: size 0x%x space 0x%zx\n",
1265 packet_desc_addr, tx_desc_get_length(desc),
1266 gem_get_max_buf_len(s, true) - (p - s->tx_packet));
1267 gem_set_isr(s, q, GEM_INT_AMBA_ERR);
1268 break;
1269 }
1270
1271 /* Gather this fragment of the packet from "dma memory" to our
1272 * contig buffer.
1273 */
1274 address_space_read(&s->dma_as, tx_desc_get_buffer(s, desc),
1275 MEMTXATTRS_UNSPECIFIED,
1276 p, tx_desc_get_length(desc));
1277 p += tx_desc_get_length(desc);
1278 total_bytes += tx_desc_get_length(desc);
1279
1280 /* Last descriptor for this packet; hand the whole thing off */
1281 if (tx_desc_get_last(desc)) {
1282 uint32_t desc_first[DESC_MAX_NUM_WORDS];
1283 hwaddr desc_addr = gem_get_tx_desc_addr(s, q);
1284
1285 /* Modify the 1st descriptor of this packet to be owned by
1286 * the processor.
1287 */
1288 address_space_read(&s->dma_as, desc_addr,
1289 MEMTXATTRS_UNSPECIFIED, desc_first,
1290 sizeof(desc_first));
1291 tx_desc_set_used(desc_first);
1292 address_space_write(&s->dma_as, desc_addr,
1293 MEMTXATTRS_UNSPECIFIED, desc_first,
1294 sizeof(desc_first));
1295 /* Advance the hardware current descriptor past this packet */
1296 if (tx_desc_get_wrap(desc)) {
1297 s->tx_desc_addr[q] = gem_get_tx_queue_base_addr(s, q);
1298 } else {
1299 s->tx_desc_addr[q] = packet_desc_addr +
1300 4 * gem_get_desc_len(s, false);
1301 }
1302 DB_PRINT("TX descriptor next: 0x%08x\n", s->tx_desc_addr[q]);
1303
1304 s->regs[R_TXSTATUS] |= GEM_TXSTATUS_TXCMPL;
1305 gem_set_isr(s, q, GEM_INT_TXCMPL);
1306
1307 /* Handle interrupt consequences */
1308 gem_update_int_status(s);
1309
1310 /* Is checksum offload enabled? */
1311 if (s->regs[R_DMACFG] & GEM_DMACFG_TXCSUM_OFFL) {
1312 net_checksum_calculate(s->tx_packet, total_bytes, CSUM_ALL);
1313 }
1314
1315 /* Update MAC statistics */
1316 gem_transmit_updatestats(s, s->tx_packet, total_bytes);
1317
1318 /* Send the packet somewhere */
1319 if (s->phy_loop || FIELD_EX32(s->regs[R_NWCTRL], NWCTRL,
1320 LOOPBACK_LOCAL)) {
1321 qemu_receive_packet(qemu_get_queue(s->nic), s->tx_packet,
1322 total_bytes);
1323 } else {
1324 qemu_send_packet(qemu_get_queue(s->nic), s->tx_packet,
1325 total_bytes);
1326 }
1327
1328 /* Prepare for next packet */
1329 p = s->tx_packet;
1330 total_bytes = 0;
1331 }
1332
1333 /* read next descriptor */
1334 if (tx_desc_get_wrap(desc)) {
1335 if (s->regs[R_DMACFG] & GEM_DMACFG_ADDR_64B) {
1336 packet_desc_addr = s->regs[R_TBQPH];
1337 packet_desc_addr <<= 32;
1338 } else {
1339 packet_desc_addr = 0;
1340 }
1341 packet_desc_addr |= gem_get_tx_queue_base_addr(s, q);
1342 } else {
1343 packet_desc_addr += 4 * gem_get_desc_len(s, false);
1344 }
1345 DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr);
1346 address_space_read(&s->dma_as, packet_desc_addr,
1347 MEMTXATTRS_UNSPECIFIED, desc,
1348 sizeof(uint32_t) * gem_get_desc_len(s, false));
1349 }
1350
1351 if (tx_desc_get_used(desc)) {
1352 s->regs[R_TXSTATUS] |= GEM_TXSTATUS_USED;
1353 /* IRQ TXUSED is defined only for queue 0 */
1354 if (q == 0) {
1355 gem_set_isr(s, 0, GEM_INT_TXUSED);
1356 }
1357 gem_update_int_status(s);
1358 }
1359 }
1360 }
1361
1362 static void gem_phy_reset(CadenceGEMState *s)
1363 {
1364 memset(&s->phy_regs[0], 0, sizeof(s->phy_regs));
1365 s->phy_regs[PHY_REG_CONTROL] = 0x1140;
1366 s->phy_regs[PHY_REG_STATUS] = 0x7969;
1367 s->phy_regs[PHY_REG_PHYID1] = 0x0141;
1368 s->phy_regs[PHY_REG_PHYID2] = 0x0CC2;
1369 s->phy_regs[PHY_REG_ANEGADV] = 0x01E1;
1370 s->phy_regs[PHY_REG_LINKPABIL] = 0xCDE1;
1371 s->phy_regs[PHY_REG_ANEGEXP] = 0x000F;
1372 s->phy_regs[PHY_REG_NEXTP] = 0x2001;
1373 s->phy_regs[PHY_REG_LINKPNEXTP] = 0x40E6;
1374 s->phy_regs[PHY_REG_100BTCTRL] = 0x0300;
1375 s->phy_regs[PHY_REG_1000BTSTAT] = 0x7C00;
1376 s->phy_regs[PHY_REG_EXTSTAT] = 0x3000;
1377 s->phy_regs[PHY_REG_PHYSPCFC_CTL] = 0x0078;
1378 s->phy_regs[PHY_REG_PHYSPCFC_ST] = 0x7C00;
1379 s->phy_regs[PHY_REG_EXT_PHYSPCFC_CTL] = 0x0C60;
1380 s->phy_regs[PHY_REG_LED] = 0x4100;
1381 s->phy_regs[PHY_REG_EXT_PHYSPCFC_CTL2] = 0x000A;
1382 s->phy_regs[PHY_REG_EXT_PHYSPCFC_ST] = 0x848B;
1383
1384 phy_update_link(s);
1385 }
1386
1387 static void gem_reset(DeviceState *d)
1388 {
1389 int i;
1390 CadenceGEMState *s = CADENCE_GEM(d);
1391 const uint8_t *a;
1392 uint32_t queues_mask = 0;
1393
1394 DB_PRINT("\n");
1395
1396 /* Set post reset register values */
1397 memset(&s->regs[0], 0, sizeof(s->regs));
1398 s->regs[R_NWCFG] = 0x00080000;
1399 s->regs[R_NWSTATUS] = 0x00000006;
1400 s->regs[R_DMACFG] = 0x00020784;
1401 s->regs[R_IMR] = 0x07ffffff;
1402 s->regs[R_TXPAUSE] = 0x0000ffff;
1403 s->regs[R_TXPARTIALSF] = 0x000003ff;
1404 s->regs[R_RXPARTIALSF] = 0x000003ff;
1405 s->regs[R_MODID] = s->revision;
1406 s->regs[R_DESCONF] = 0x02D00111;
1407 s->regs[R_DESCONF2] = 0x2ab10000 | s->jumbo_max_len;
1408 s->regs[R_DESCONF5] = 0x002f2045;
1409 s->regs[R_DESCONF6] = GEM_DESCONF6_64B_MASK;
1410 s->regs[R_INT_Q1_MASK] = 0x00000CE6;
1411 s->regs[R_JUMBO_MAX_LEN] = s->jumbo_max_len;
1412
1413 if (s->num_priority_queues > 1) {
1414 queues_mask = MAKE_64BIT_MASK(1, s->num_priority_queues - 1);
1415 s->regs[R_DESCONF6] |= queues_mask;
1416 }
1417
1418 /* Set MAC address */
1419 a = &s->conf.macaddr.a[0];
1420 s->regs[R_SPADDR1LO] = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24);
1421 s->regs[R_SPADDR1HI] = a[4] | (a[5] << 8);
1422
1423 for (i = 0; i < 4; i++) {
1424 s->sar_active[i] = false;
1425 }
1426
1427 gem_phy_reset(s);
1428
1429 gem_update_int_status(s);
1430 }
1431
1432 static uint16_t gem_phy_read(CadenceGEMState *s, unsigned reg_num)
1433 {
1434 DB_PRINT("reg: %d value: 0x%04x\n", reg_num, s->phy_regs[reg_num]);
1435 return s->phy_regs[reg_num];
1436 }
1437
1438 static void gem_phy_write(CadenceGEMState *s, unsigned reg_num, uint16_t val)
1439 {
1440 DB_PRINT("reg: %d value: 0x%04x\n", reg_num, val);
1441
1442 switch (reg_num) {
1443 case PHY_REG_CONTROL:
1444 if (val & PHY_REG_CONTROL_RST) {
1445 /* Phy reset */
1446 gem_phy_reset(s);
1447 val &= ~(PHY_REG_CONTROL_RST | PHY_REG_CONTROL_LOOP);
1448 s->phy_loop = 0;
1449 }
1450 if (val & PHY_REG_CONTROL_ANEG) {
1451 /* Complete autonegotiation immediately */
1452 val &= ~(PHY_REG_CONTROL_ANEG | PHY_REG_CONTROL_ANRESTART);
1453 s->phy_regs[PHY_REG_STATUS] |= PHY_REG_STATUS_ANEGCMPL;
1454 }
1455 if (val & PHY_REG_CONTROL_LOOP) {
1456 DB_PRINT("PHY placed in loopback\n");
1457 s->phy_loop = 1;
1458 } else {
1459 s->phy_loop = 0;
1460 }
1461 break;
1462 }
1463 s->phy_regs[reg_num] = val;
1464 }
1465
1466 /*
1467 * gem_read32:
1468 * Read a GEM register.
1469 */
1470 static uint64_t gem_read(void *opaque, hwaddr offset, unsigned size)
1471 {
1472 CadenceGEMState *s;
1473 uint32_t retval;
1474 s = opaque;
1475
1476 offset >>= 2;
1477 retval = s->regs[offset];
1478
1479 DB_PRINT("offset: 0x%04x read: 0x%08x\n", (unsigned)offset*4, retval);
1480
1481 switch (offset) {
1482 case R_ISR:
1483 DB_PRINT("lowering irqs on ISR read\n");
1484 /* The interrupts get updated at the end of the function. */
1485 break;
1486 case R_PHYMNTNC:
1487 if (retval & GEM_PHYMNTNC_OP_R) {
1488 uint32_t phy_addr, reg_num;
1489
1490 phy_addr = (retval & GEM_PHYMNTNC_ADDR) >> GEM_PHYMNTNC_ADDR_SHFT;
1491 if (phy_addr == s->phy_addr) {
1492 reg_num = (retval & GEM_PHYMNTNC_REG) >> GEM_PHYMNTNC_REG_SHIFT;
1493 retval &= 0xFFFF0000;
1494 retval |= gem_phy_read(s, reg_num);
1495 } else {
1496 retval |= 0xFFFF; /* No device at this address */
1497 }
1498 }
1499 break;
1500 }
1501
1502 /* Squash read to clear bits */
1503 s->regs[offset] &= ~(s->regs_rtc[offset]);
1504
1505 /* Do not provide write only bits */
1506 retval &= ~(s->regs_wo[offset]);
1507
1508 DB_PRINT("0x%08x\n", retval);
1509 gem_update_int_status(s);
1510 return retval;
1511 }
1512
1513 /*
1514 * gem_write32:
1515 * Write a GEM register.
1516 */
1517 static void gem_write(void *opaque, hwaddr offset, uint64_t val,
1518 unsigned size)
1519 {
1520 CadenceGEMState *s = (CadenceGEMState *)opaque;
1521 uint32_t readonly;
1522 int i;
1523
1524 DB_PRINT("offset: 0x%04x write: 0x%08x ", (unsigned)offset, (unsigned)val);
1525 offset >>= 2;
1526
1527 /* Squash bits which are read only in write value */
1528 val &= ~(s->regs_ro[offset]);
1529 /* Preserve (only) bits which are read only and wtc in register */
1530 readonly = s->regs[offset] & (s->regs_ro[offset] | s->regs_w1c[offset]);
1531
1532 /* Copy register write to backing store */
1533 s->regs[offset] = (val & ~s->regs_w1c[offset]) | readonly;
1534
1535 /* do w1c */
1536 s->regs[offset] &= ~(s->regs_w1c[offset] & val);
1537
1538 /* Handle register write side effects */
1539 switch (offset) {
1540 case R_NWCTRL:
1541 if (FIELD_EX32(val, NWCTRL, ENABLE_RECEIVE)) {
1542 for (i = 0; i < s->num_priority_queues; ++i) {
1543 gem_get_rx_desc(s, i);
1544 }
1545 }
1546 if (FIELD_EX32(val, NWCTRL, TRANSMIT_START)) {
1547 gem_transmit(s);
1548 }
1549 if (!(FIELD_EX32(val, NWCTRL, ENABLE_TRANSMIT))) {
1550 /* Reset to start of Q when transmit disabled. */
1551 for (i = 0; i < s->num_priority_queues; i++) {
1552 s->tx_desc_addr[i] = gem_get_tx_queue_base_addr(s, i);
1553 }
1554 }
1555 if (gem_can_receive(qemu_get_queue(s->nic))) {
1556 qemu_flush_queued_packets(qemu_get_queue(s->nic));
1557 }
1558 break;
1559
1560 case R_TXSTATUS:
1561 gem_update_int_status(s);
1562 break;
1563 case R_RXQBASE:
1564 s->rx_desc_addr[0] = val;
1565 break;
1566 case R_RECEIVE_Q1_PTR ... R_RECEIVE_Q7_PTR:
1567 s->rx_desc_addr[offset - R_RECEIVE_Q1_PTR + 1] = val;
1568 break;
1569 case R_TXQBASE:
1570 s->tx_desc_addr[0] = val;
1571 break;
1572 case R_TRANSMIT_Q1_PTR ... R_TRANSMIT_Q7_PTR:
1573 s->tx_desc_addr[offset - R_TRANSMIT_Q1_PTR + 1] = val;
1574 break;
1575 case R_RXSTATUS:
1576 gem_update_int_status(s);
1577 break;
1578 case R_IER:
1579 s->regs[R_IMR] &= ~val;
1580 gem_update_int_status(s);
1581 break;
1582 case R_JUMBO_MAX_LEN:
1583 s->regs[R_JUMBO_MAX_LEN] = val & MAX_JUMBO_FRAME_SIZE_MASK;
1584 break;
1585 case R_INT_Q1_ENABLE ... R_INT_Q7_ENABLE:
1586 s->regs[R_INT_Q1_MASK + offset - R_INT_Q1_ENABLE] &= ~val;
1587 gem_update_int_status(s);
1588 break;
1589 case R_IDR:
1590 s->regs[R_IMR] |= val;
1591 gem_update_int_status(s);
1592 break;
1593 case R_INT_Q1_DISABLE ... R_INT_Q7_DISABLE:
1594 s->regs[R_INT_Q1_MASK + offset - R_INT_Q1_DISABLE] |= val;
1595 gem_update_int_status(s);
1596 break;
1597 case R_SPADDR1LO:
1598 case R_SPADDR2LO:
1599 case R_SPADDR3LO:
1600 case R_SPADDR4LO:
1601 s->sar_active[(offset - R_SPADDR1LO) / 2] = false;
1602 break;
1603 case R_SPADDR1HI:
1604 case R_SPADDR2HI:
1605 case R_SPADDR3HI:
1606 case R_SPADDR4HI:
1607 s->sar_active[(offset - R_SPADDR1HI) / 2] = true;
1608 break;
1609 case R_PHYMNTNC:
1610 if (val & GEM_PHYMNTNC_OP_W) {
1611 uint32_t phy_addr, reg_num;
1612
1613 phy_addr = (val & GEM_PHYMNTNC_ADDR) >> GEM_PHYMNTNC_ADDR_SHFT;
1614 if (phy_addr == s->phy_addr) {
1615 reg_num = (val & GEM_PHYMNTNC_REG) >> GEM_PHYMNTNC_REG_SHIFT;
1616 gem_phy_write(s, reg_num, val);
1617 }
1618 }
1619 break;
1620 }
1621
1622 DB_PRINT("newval: 0x%08x\n", s->regs[offset]);
1623 }
1624
1625 static const MemoryRegionOps gem_ops = {
1626 .read = gem_read,
1627 .write = gem_write,
1628 .endianness = DEVICE_LITTLE_ENDIAN,
1629 };
1630
1631 static void gem_set_link(NetClientState *nc)
1632 {
1633 CadenceGEMState *s = qemu_get_nic_opaque(nc);
1634
1635 DB_PRINT("\n");
1636 phy_update_link(s);
1637 gem_update_int_status(s);
1638 }
1639
1640 static NetClientInfo net_gem_info = {
1641 .type = NET_CLIENT_DRIVER_NIC,
1642 .size = sizeof(NICState),
1643 .can_receive = gem_can_receive,
1644 .receive = gem_receive,
1645 .link_status_changed = gem_set_link,
1646 };
1647
1648 static void gem_realize(DeviceState *dev, Error **errp)
1649 {
1650 CadenceGEMState *s = CADENCE_GEM(dev);
1651 int i;
1652
1653 address_space_init(&s->dma_as,
1654 s->dma_mr ? s->dma_mr : get_system_memory(), "dma");
1655
1656 if (s->num_priority_queues == 0 ||
1657 s->num_priority_queues > MAX_PRIORITY_QUEUES) {
1658 error_setg(errp, "Invalid num-priority-queues value: %" PRIx8,
1659 s->num_priority_queues);
1660 return;
1661 } else if (s->num_type1_screeners > MAX_TYPE1_SCREENERS) {
1662 error_setg(errp, "Invalid num-type1-screeners value: %" PRIx8,
1663 s->num_type1_screeners);
1664 return;
1665 } else if (s->num_type2_screeners > MAX_TYPE2_SCREENERS) {
1666 error_setg(errp, "Invalid num-type2-screeners value: %" PRIx8,
1667 s->num_type2_screeners);
1668 return;
1669 }
1670
1671 for (i = 0; i < s->num_priority_queues; ++i) {
1672 sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[i]);
1673 }
1674
1675 qemu_macaddr_default_if_unset(&s->conf.macaddr);
1676
1677 s->nic = qemu_new_nic(&net_gem_info, &s->conf,
1678 object_get_typename(OBJECT(dev)), dev->id, s);
1679
1680 if (s->jumbo_max_len > MAX_FRAME_SIZE) {
1681 error_setg(errp, "jumbo-max-len is greater than %d",
1682 MAX_FRAME_SIZE);
1683 return;
1684 }
1685 }
1686
1687 static void gem_init(Object *obj)
1688 {
1689 CadenceGEMState *s = CADENCE_GEM(obj);
1690 DeviceState *dev = DEVICE(obj);
1691
1692 DB_PRINT("\n");
1693
1694 gem_init_register_masks(s);
1695 memory_region_init_io(&s->iomem, OBJECT(s), &gem_ops, s,
1696 "enet", sizeof(s->regs));
1697
1698 sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
1699 }
1700
1701 static const VMStateDescription vmstate_cadence_gem = {
1702 .name = "cadence_gem",
1703 .version_id = 4,
1704 .minimum_version_id = 4,
1705 .fields = (VMStateField[]) {
1706 VMSTATE_UINT32_ARRAY(regs, CadenceGEMState, CADENCE_GEM_MAXREG),
1707 VMSTATE_UINT16_ARRAY(phy_regs, CadenceGEMState, 32),
1708 VMSTATE_UINT8(phy_loop, CadenceGEMState),
1709 VMSTATE_UINT32_ARRAY(rx_desc_addr, CadenceGEMState,
1710 MAX_PRIORITY_QUEUES),
1711 VMSTATE_UINT32_ARRAY(tx_desc_addr, CadenceGEMState,
1712 MAX_PRIORITY_QUEUES),
1713 VMSTATE_BOOL_ARRAY(sar_active, CadenceGEMState, 4),
1714 VMSTATE_END_OF_LIST(),
1715 }
1716 };
1717
1718 static Property gem_properties[] = {
1719 DEFINE_NIC_PROPERTIES(CadenceGEMState, conf),
1720 DEFINE_PROP_UINT32("revision", CadenceGEMState, revision,
1721 GEM_MODID_VALUE),
1722 DEFINE_PROP_UINT8("phy-addr", CadenceGEMState, phy_addr, BOARD_PHY_ADDRESS),
1723 DEFINE_PROP_UINT8("num-priority-queues", CadenceGEMState,
1724 num_priority_queues, 1),
1725 DEFINE_PROP_UINT8("num-type1-screeners", CadenceGEMState,
1726 num_type1_screeners, 4),
1727 DEFINE_PROP_UINT8("num-type2-screeners", CadenceGEMState,
1728 num_type2_screeners, 4),
1729 DEFINE_PROP_UINT16("jumbo-max-len", CadenceGEMState,
1730 jumbo_max_len, 10240),
1731 DEFINE_PROP_LINK("dma", CadenceGEMState, dma_mr,
1732 TYPE_MEMORY_REGION, MemoryRegion *),
1733 DEFINE_PROP_END_OF_LIST(),
1734 };
1735
1736 static void gem_class_init(ObjectClass *klass, void *data)
1737 {
1738 DeviceClass *dc = DEVICE_CLASS(klass);
1739
1740 dc->realize = gem_realize;
1741 device_class_set_props(dc, gem_properties);
1742 dc->vmsd = &vmstate_cadence_gem;
1743 dc->reset = gem_reset;
1744 }
1745
1746 static const TypeInfo gem_info = {
1747 .name = TYPE_CADENCE_GEM,
1748 .parent = TYPE_SYS_BUS_DEVICE,
1749 .instance_size = sizeof(CadenceGEMState),
1750 .instance_init = gem_init,
1751 .class_init = gem_class_init,
1752 };
1753
1754 static void gem_register_types(void)
1755 {
1756 type_register_static(&gem_info);
1757 }
1758
1759 type_init(gem_register_types)