]>
Commit | Line | Data |
---|---|---|
eb1e7c3e FC |
1 | /* |
2 | * QEMU Freescale eTSEC Emulator | |
3 | * | |
4 | * Copyright (c) 2011-2013 AdaCore | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
e8d40465 | 24 | #include "qemu/osdep.h" |
eb1e7c3e | 25 | #include "net/checksum.h" |
03dd024f | 26 | #include "qemu/log.h" |
eb1e7c3e FC |
27 | #include "etsec.h" |
28 | #include "registers.h" | |
29 | ||
30 | /* #define ETSEC_RING_DEBUG */ | |
31 | /* #define HEX_DUMP */ | |
32 | /* #define DEBUG_BD */ | |
33 | ||
34 | #ifdef ETSEC_RING_DEBUG | |
35 | static const int debug_etsec = 1; | |
36 | #else | |
37 | static const int debug_etsec; | |
38 | #endif | |
39 | ||
40 | #define RING_DEBUG(fmt, ...) do { \ | |
41 | if (debug_etsec) { \ | |
42 | qemu_log(fmt , ## __VA_ARGS__); \ | |
43 | } \ | |
44 | } while (0) | |
45 | ||
46 | #ifdef DEBUG_BD | |
47 | ||
48 | static void print_tx_bd_flags(uint16_t flags) | |
49 | { | |
50 | qemu_log(" Ready: %d\n", !!(flags & BD_TX_READY)); | |
51 | qemu_log(" PAD/CRC: %d\n", !!(flags & BD_TX_PADCRC)); | |
52 | qemu_log(" Wrap: %d\n", !!(flags & BD_WRAP)); | |
53 | qemu_log(" Interrupt: %d\n", !!(flags & BD_INTERRUPT)); | |
54 | qemu_log(" Last in frame: %d\n", !!(flags & BD_LAST)); | |
55 | qemu_log(" Tx CRC: %d\n", !!(flags & BD_TX_TC)); | |
56 | qemu_log(" User-defined preamble / defer: %d\n", | |
57 | !!(flags & BD_TX_PREDEF)); | |
58 | qemu_log(" Huge frame enable / Late collision: %d\n", | |
59 | !!(flags & BD_TX_HFELC)); | |
60 | qemu_log(" Control frame / Retransmission Limit: %d\n", | |
61 | !!(flags & BD_TX_CFRL)); | |
62 | qemu_log(" Retry count: %d\n", | |
63 | (flags >> BD_TX_RC_OFFSET) & BD_TX_RC_MASK); | |
64 | qemu_log(" Underrun / TCP/IP off-load enable: %d\n", | |
65 | !!(flags & BD_TX_TOEUN)); | |
66 | qemu_log(" Truncation: %d\n", !!(flags & BD_TX_TR)); | |
67 | } | |
68 | ||
69 | static void print_rx_bd_flags(uint16_t flags) | |
70 | { | |
71 | qemu_log(" Empty: %d\n", !!(flags & BD_RX_EMPTY)); | |
72 | qemu_log(" Receive software ownership: %d\n", !!(flags & BD_RX_RO1)); | |
73 | qemu_log(" Wrap: %d\n", !!(flags & BD_WRAP)); | |
74 | qemu_log(" Interrupt: %d\n", !!(flags & BD_INTERRUPT)); | |
75 | qemu_log(" Last in frame: %d\n", !!(flags & BD_LAST)); | |
76 | qemu_log(" First in frame: %d\n", !!(flags & BD_RX_FIRST)); | |
77 | qemu_log(" Miss: %d\n", !!(flags & BD_RX_MISS)); | |
78 | qemu_log(" Broadcast: %d\n", !!(flags & BD_RX_BROADCAST)); | |
79 | qemu_log(" Multicast: %d\n", !!(flags & BD_RX_MULTICAST)); | |
80 | qemu_log(" Rx frame length violation: %d\n", !!(flags & BD_RX_LG)); | |
81 | qemu_log(" Rx non-octet aligned frame: %d\n", !!(flags & BD_RX_NO)); | |
82 | qemu_log(" Short frame: %d\n", !!(flags & BD_RX_SH)); | |
83 | qemu_log(" Rx CRC Error: %d\n", !!(flags & BD_RX_CR)); | |
84 | qemu_log(" Overrun: %d\n", !!(flags & BD_RX_OV)); | |
85 | qemu_log(" Truncation: %d\n", !!(flags & BD_RX_TR)); | |
86 | } | |
87 | ||
88 | ||
89 | static void print_bd(eTSEC_rxtx_bd bd, int mode, uint32_t index) | |
90 | { | |
91 | qemu_log("eTSEC %s Data Buffer Descriptor (%u)\n", | |
92 | mode == eTSEC_TRANSMIT ? "Transmit" : "Receive", | |
93 | index); | |
94 | qemu_log(" Flags : 0x%04x\n", bd.flags); | |
95 | if (mode == eTSEC_TRANSMIT) { | |
96 | print_tx_bd_flags(bd.flags); | |
97 | } else { | |
98 | print_rx_bd_flags(bd.flags); | |
99 | } | |
100 | qemu_log(" Length : 0x%04x\n", bd.length); | |
101 | qemu_log(" Pointer : 0x%08x\n", bd.bufptr); | |
102 | } | |
103 | ||
104 | #endif /* DEBUG_BD */ | |
105 | ||
106 | static void read_buffer_descriptor(eTSEC *etsec, | |
107 | hwaddr addr, | |
108 | eTSEC_rxtx_bd *bd) | |
109 | { | |
110 | assert(bd != NULL); | |
111 | ||
112 | RING_DEBUG("READ Buffer Descriptor @ 0x" TARGET_FMT_plx"\n", addr); | |
113 | cpu_physical_memory_read(addr, | |
114 | bd, | |
115 | sizeof(eTSEC_rxtx_bd)); | |
116 | ||
117 | if (etsec->regs[DMACTRL].value & DMACTRL_LE) { | |
118 | bd->flags = lduw_le_p(&bd->flags); | |
119 | bd->length = lduw_le_p(&bd->length); | |
120 | bd->bufptr = ldl_le_p(&bd->bufptr); | |
121 | } else { | |
122 | bd->flags = lduw_be_p(&bd->flags); | |
123 | bd->length = lduw_be_p(&bd->length); | |
124 | bd->bufptr = ldl_be_p(&bd->bufptr); | |
125 | } | |
126 | } | |
127 | ||
128 | static void write_buffer_descriptor(eTSEC *etsec, | |
129 | hwaddr addr, | |
130 | eTSEC_rxtx_bd *bd) | |
131 | { | |
132 | assert(bd != NULL); | |
133 | ||
134 | if (etsec->regs[DMACTRL].value & DMACTRL_LE) { | |
135 | stw_le_p(&bd->flags, bd->flags); | |
136 | stw_le_p(&bd->length, bd->length); | |
137 | stl_le_p(&bd->bufptr, bd->bufptr); | |
138 | } else { | |
139 | stw_be_p(&bd->flags, bd->flags); | |
140 | stw_be_p(&bd->length, bd->length); | |
141 | stl_be_p(&bd->bufptr, bd->bufptr); | |
142 | } | |
143 | ||
144 | RING_DEBUG("Write Buffer Descriptor @ 0x" TARGET_FMT_plx"\n", addr); | |
145 | cpu_physical_memory_write(addr, | |
146 | bd, | |
147 | sizeof(eTSEC_rxtx_bd)); | |
148 | } | |
149 | ||
150 | static void ievent_set(eTSEC *etsec, | |
151 | uint32_t flags) | |
152 | { | |
153 | etsec->regs[IEVENT].value |= flags; | |
154 | ||
155 | if ((flags & IEVENT_TXB && etsec->regs[IMASK].value & IMASK_TXBEN) | |
156 | || (flags & IEVENT_TXF && etsec->regs[IMASK].value & IMASK_TXFEN)) { | |
157 | qemu_irq_raise(etsec->tx_irq); | |
158 | RING_DEBUG("%s Raise Tx IRQ\n", __func__); | |
159 | } | |
160 | ||
161 | if ((flags & IEVENT_RXB && etsec->regs[IMASK].value & IMASK_RXBEN) | |
162 | || (flags & IEVENT_RXF && etsec->regs[IMASK].value & IMASK_RXFEN)) { | |
d5843485 | 163 | qemu_irq_raise(etsec->rx_irq); |
eb1e7c3e FC |
164 | RING_DEBUG("%s Raise Rx IRQ\n", __func__); |
165 | } | |
166 | } | |
167 | ||
168 | static void tx_padding_and_crc(eTSEC *etsec, uint32_t min_frame_len) | |
169 | { | |
170 | int add = min_frame_len - etsec->tx_buffer_len; | |
171 | ||
172 | /* Padding */ | |
173 | if (add > 0) { | |
174 | RING_DEBUG("pad:%u\n", add); | |
175 | etsec->tx_buffer = g_realloc(etsec->tx_buffer, | |
176 | etsec->tx_buffer_len + add); | |
177 | ||
178 | memset(etsec->tx_buffer + etsec->tx_buffer_len, 0x0, add); | |
179 | etsec->tx_buffer_len += add; | |
180 | } | |
181 | ||
182 | /* Never add CRC in QEMU */ | |
183 | } | |
184 | ||
185 | static void process_tx_fcb(eTSEC *etsec) | |
186 | { | |
187 | uint8_t flags = (uint8_t)(*etsec->tx_buffer); | |
188 | /* L3 header offset from start of frame */ | |
189 | uint8_t l3_header_offset = (uint8_t)*(etsec->tx_buffer + 3); | |
190 | /* L4 header offset from start of L3 header */ | |
191 | uint8_t l4_header_offset = (uint8_t)*(etsec->tx_buffer + 2); | |
192 | /* L3 header */ | |
193 | uint8_t *l3_header = etsec->tx_buffer + 8 + l3_header_offset; | |
194 | /* L4 header */ | |
195 | uint8_t *l4_header = l3_header + l4_header_offset; | |
196 | ||
197 | /* if packet is IP4 and IP checksum is requested */ | |
198 | if (flags & FCB_TX_IP && flags & FCB_TX_CIP) { | |
3b163b01 SW |
199 | /* do IP4 checksum (TODO This function does TCP/UDP checksum |
200 | * but not sure if it also does IP4 checksum.) */ | |
eb1e7c3e FC |
201 | net_checksum_calculate(etsec->tx_buffer + 8, |
202 | etsec->tx_buffer_len - 8); | |
203 | } | |
204 | /* TODO Check the correct usage of the PHCS field of the FCB in case the NPH | |
205 | * flag is on */ | |
206 | ||
207 | /* if packet is IP4 and TCP or UDP */ | |
208 | if (flags & FCB_TX_IP && flags & FCB_TX_TUP) { | |
209 | /* if UDP */ | |
210 | if (flags & FCB_TX_UDP) { | |
211 | /* if checksum is requested */ | |
212 | if (flags & FCB_TX_CTU) { | |
213 | /* do UDP checksum */ | |
214 | ||
215 | net_checksum_calculate(etsec->tx_buffer + 8, | |
216 | etsec->tx_buffer_len - 8); | |
217 | } else { | |
218 | /* set checksum field to 0 */ | |
219 | l4_header[6] = 0; | |
220 | l4_header[7] = 0; | |
221 | } | |
222 | } else if (flags & FCB_TX_CTU) { /* if TCP and checksum is requested */ | |
223 | /* do TCP checksum */ | |
224 | net_checksum_calculate(etsec->tx_buffer + 8, | |
225 | etsec->tx_buffer_len - 8); | |
226 | } | |
227 | } | |
228 | } | |
229 | ||
230 | static void process_tx_bd(eTSEC *etsec, | |
231 | eTSEC_rxtx_bd *bd) | |
232 | { | |
233 | uint8_t *tmp_buff = NULL; | |
234 | hwaddr tbdbth = (hwaddr)(etsec->regs[TBDBPH].value & 0xF) << 32; | |
235 | ||
236 | if (bd->length == 0) { | |
237 | /* ERROR */ | |
238 | return; | |
239 | } | |
240 | ||
241 | if (etsec->tx_buffer_len == 0) { | |
242 | /* It's the first BD */ | |
243 | etsec->first_bd = *bd; | |
244 | } | |
245 | ||
246 | /* TODO: if TxBD[TOE/UN] skip the Tx Frame Control Block*/ | |
247 | ||
248 | /* Load this Data Buffer */ | |
249 | etsec->tx_buffer = g_realloc(etsec->tx_buffer, | |
250 | etsec->tx_buffer_len + bd->length); | |
251 | tmp_buff = etsec->tx_buffer + etsec->tx_buffer_len; | |
252 | cpu_physical_memory_read(bd->bufptr + tbdbth, tmp_buff, bd->length); | |
253 | ||
254 | /* Update buffer length */ | |
255 | etsec->tx_buffer_len += bd->length; | |
256 | ||
257 | ||
258 | if (etsec->tx_buffer_len != 0 && (bd->flags & BD_LAST)) { | |
259 | if (etsec->regs[MACCFG1].value & MACCFG1_TX_EN) { | |
260 | /* MAC Transmit enabled */ | |
261 | ||
262 | /* Process offload Tx FCB */ | |
263 | if (etsec->first_bd.flags & BD_TX_TOEUN) { | |
264 | process_tx_fcb(etsec); | |
265 | } | |
266 | ||
267 | if (etsec->first_bd.flags & BD_TX_PADCRC | |
268 | || etsec->regs[MACCFG2].value & MACCFG2_PADCRC) { | |
269 | ||
270 | /* Padding and CRC (Padding implies CRC) */ | |
271 | tx_padding_and_crc(etsec, 64); | |
272 | ||
273 | } else if (etsec->first_bd.flags & BD_TX_TC | |
274 | || etsec->regs[MACCFG2].value & MACCFG2_CRC_EN) { | |
275 | ||
276 | /* Only CRC */ | |
277 | /* Never add CRC in QEMU */ | |
278 | } | |
279 | ||
280 | #if defined(HEX_DUMP) | |
281 | qemu_log("eTSEC Send packet size:%d\n", etsec->tx_buffer_len); | |
282 | qemu_hexdump(etsec->tx_buffer, stderr, "", etsec->tx_buffer_len); | |
283 | #endif /* ETSEC_RING_DEBUG */ | |
284 | ||
285 | if (etsec->first_bd.flags & BD_TX_TOEUN) { | |
286 | qemu_send_packet(qemu_get_queue(etsec->nic), | |
287 | etsec->tx_buffer + 8, | |
288 | etsec->tx_buffer_len - 8); | |
289 | } else { | |
290 | qemu_send_packet(qemu_get_queue(etsec->nic), | |
291 | etsec->tx_buffer, | |
292 | etsec->tx_buffer_len); | |
293 | } | |
294 | ||
295 | } | |
296 | ||
297 | etsec->tx_buffer_len = 0; | |
298 | ||
299 | if (bd->flags & BD_INTERRUPT) { | |
300 | ievent_set(etsec, IEVENT_TXF); | |
301 | } | |
302 | } else { | |
303 | if (bd->flags & BD_INTERRUPT) { | |
304 | ievent_set(etsec, IEVENT_TXB); | |
305 | } | |
306 | } | |
307 | ||
308 | /* Update DB flags */ | |
309 | ||
310 | /* Clear Ready */ | |
311 | bd->flags &= ~BD_TX_READY; | |
312 | ||
313 | /* Clear Defer */ | |
314 | bd->flags &= ~BD_TX_PREDEF; | |
315 | ||
316 | /* Clear Late Collision */ | |
317 | bd->flags &= ~BD_TX_HFELC; | |
318 | ||
319 | /* Clear Retransmission Limit */ | |
320 | bd->flags &= ~BD_TX_CFRL; | |
321 | ||
322 | /* Clear Retry Count */ | |
323 | bd->flags &= ~(BD_TX_RC_MASK << BD_TX_RC_OFFSET); | |
324 | ||
325 | /* Clear Underrun */ | |
326 | bd->flags &= ~BD_TX_TOEUN; | |
327 | ||
328 | /* Clear Truncation */ | |
329 | bd->flags &= ~BD_TX_TR; | |
330 | } | |
331 | ||
332 | void etsec_walk_tx_ring(eTSEC *etsec, int ring_nbr) | |
333 | { | |
334 | hwaddr ring_base = 0; | |
335 | hwaddr bd_addr = 0; | |
336 | eTSEC_rxtx_bd bd; | |
337 | uint16_t bd_flags; | |
338 | ||
339 | if (!(etsec->regs[MACCFG1].value & MACCFG1_TX_EN)) { | |
340 | RING_DEBUG("%s: MAC Transmit not enabled\n", __func__); | |
341 | return; | |
342 | } | |
343 | ||
344 | ring_base = (hwaddr)(etsec->regs[TBASEH].value & 0xF) << 32; | |
345 | ring_base += etsec->regs[TBASE0 + ring_nbr].value & ~0x7; | |
346 | bd_addr = etsec->regs[TBPTR0 + ring_nbr].value & ~0x7; | |
347 | ||
348 | do { | |
349 | read_buffer_descriptor(etsec, bd_addr, &bd); | |
350 | ||
351 | #ifdef DEBUG_BD | |
352 | print_bd(bd, | |
353 | eTSEC_TRANSMIT, | |
354 | (bd_addr - ring_base) / sizeof(eTSEC_rxtx_bd)); | |
355 | ||
356 | #endif /* DEBUG_BD */ | |
357 | ||
358 | /* Save flags before BD update */ | |
359 | bd_flags = bd.flags; | |
360 | ||
361 | if (bd_flags & BD_TX_READY) { | |
362 | process_tx_bd(etsec, &bd); | |
363 | ||
364 | /* Write back BD after update */ | |
365 | write_buffer_descriptor(etsec, bd_addr, &bd); | |
366 | } | |
367 | ||
368 | /* Wrap or next BD */ | |
369 | if (bd_flags & BD_WRAP) { | |
370 | bd_addr = ring_base; | |
371 | } else { | |
372 | bd_addr += sizeof(eTSEC_rxtx_bd); | |
373 | } | |
374 | ||
375 | } while (bd_addr != ring_base); | |
376 | ||
377 | bd_addr = ring_base; | |
378 | ||
379 | /* Save the Buffer Descriptor Pointers to current bd */ | |
380 | etsec->regs[TBPTR0 + ring_nbr].value = bd_addr; | |
381 | ||
382 | /* Set transmit halt THLTx */ | |
383 | etsec->regs[TSTAT].value |= 1 << (31 - ring_nbr); | |
384 | } | |
385 | ||
386 | static void fill_rx_bd(eTSEC *etsec, | |
387 | eTSEC_rxtx_bd *bd, | |
388 | const uint8_t **buf, | |
389 | size_t *size) | |
390 | { | |
391 | uint16_t to_write; | |
392 | hwaddr bufptr = bd->bufptr + | |
393 | ((hwaddr)(etsec->regs[TBDBPH].value & 0xF) << 32); | |
394 | uint8_t padd[etsec->rx_padding]; | |
395 | uint8_t rem; | |
396 | ||
397 | RING_DEBUG("eTSEC fill Rx buffer @ 0x%016" HWADDR_PRIx | |
398 | " size:%zu(padding + crc:%u) + fcb:%u\n", | |
399 | bufptr, *size, etsec->rx_padding, etsec->rx_fcb_size); | |
400 | ||
401 | bd->length = 0; | |
402 | ||
403 | /* This operation will only write FCB */ | |
404 | if (etsec->rx_fcb_size != 0) { | |
405 | ||
406 | cpu_physical_memory_write(bufptr, etsec->rx_fcb, etsec->rx_fcb_size); | |
407 | ||
408 | bufptr += etsec->rx_fcb_size; | |
409 | bd->length += etsec->rx_fcb_size; | |
410 | etsec->rx_fcb_size = 0; | |
411 | ||
412 | } | |
413 | ||
414 | /* We remove padding from the computation of to_write because it is not | |
415 | * allocated in the buffer. | |
416 | */ | |
417 | to_write = MIN(*size - etsec->rx_padding, | |
418 | etsec->regs[MRBLR].value - etsec->rx_fcb_size); | |
419 | ||
420 | /* This operation can only write packet data and no padding */ | |
421 | if (to_write > 0) { | |
422 | cpu_physical_memory_write(bufptr, *buf, to_write); | |
423 | ||
424 | *buf += to_write; | |
425 | bufptr += to_write; | |
426 | *size -= to_write; | |
427 | ||
428 | bd->flags &= ~BD_RX_EMPTY; | |
429 | bd->length += to_write; | |
430 | } | |
431 | ||
432 | if (*size == etsec->rx_padding) { | |
433 | /* The remaining bytes are only for padding which is not actually | |
434 | * allocated in the data buffer. | |
435 | */ | |
436 | ||
437 | rem = MIN(etsec->regs[MRBLR].value - bd->length, etsec->rx_padding); | |
438 | ||
439 | if (rem > 0) { | |
440 | memset(padd, 0x0, sizeof(padd)); | |
441 | etsec->rx_padding -= rem; | |
442 | *size -= rem; | |
443 | bd->length += rem; | |
444 | cpu_physical_memory_write(bufptr, padd, rem); | |
445 | } | |
446 | } | |
447 | } | |
448 | ||
449 | static void rx_init_frame(eTSEC *etsec, const uint8_t *buf, size_t size) | |
450 | { | |
451 | uint32_t fcb_size = 0; | |
452 | uint8_t prsdep = (etsec->regs[RCTRL].value >> RCTRL_PRSDEP_OFFSET) | |
453 | & RCTRL_PRSDEP_MASK; | |
454 | ||
455 | if (prsdep != 0) { | |
456 | /* Prepend FCB (FCB size + RCTRL[PAL]) */ | |
457 | fcb_size = 8 + ((etsec->regs[RCTRL].value >> 16) & 0x1F); | |
458 | ||
459 | etsec->rx_fcb_size = fcb_size; | |
460 | ||
461 | /* TODO: fill_FCB(etsec); */ | |
462 | memset(etsec->rx_fcb, 0x0, sizeof(etsec->rx_fcb)); | |
463 | ||
464 | } else { | |
465 | etsec->rx_fcb_size = 0; | |
466 | } | |
467 | ||
ef1e1e07 | 468 | g_free(etsec->rx_buffer); |
eb1e7c3e FC |
469 | |
470 | /* Do not copy the frame for now */ | |
471 | etsec->rx_buffer = (uint8_t *)buf; | |
472 | etsec->rx_buffer_len = size; | |
473 | ||
474 | /* CRC padding (We don't have to compute the CRC) */ | |
475 | etsec->rx_padding = 4; | |
476 | ||
64f441d2 AS |
477 | /* |
478 | * Ensure that payload length + CRC length is at least 802.3 | |
479 | * minimum MTU size bytes long (64) | |
480 | */ | |
481 | if (etsec->rx_buffer_len < 60) { | |
482 | etsec->rx_padding += 60 - etsec->rx_buffer_len; | |
483 | } | |
484 | ||
eb1e7c3e FC |
485 | etsec->rx_first_in_frame = 1; |
486 | etsec->rx_remaining_data = etsec->rx_buffer_len; | |
487 | RING_DEBUG("%s: rx_buffer_len:%u rx_padding+crc:%u\n", __func__, | |
488 | etsec->rx_buffer_len, etsec->rx_padding); | |
489 | } | |
490 | ||
b6cb6610 | 491 | ssize_t etsec_rx_ring_write(eTSEC *etsec, const uint8_t *buf, size_t size) |
eb1e7c3e FC |
492 | { |
493 | int ring_nbr = 0; /* Always use ring0 (no filer) */ | |
494 | ||
495 | if (etsec->rx_buffer_len != 0) { | |
496 | RING_DEBUG("%s: We can't receive now," | |
497 | " a buffer is already in the pipe\n", __func__); | |
b6cb6610 | 498 | return 0; |
eb1e7c3e FC |
499 | } |
500 | ||
501 | if (etsec->regs[RSTAT].value & 1 << (23 - ring_nbr)) { | |
502 | RING_DEBUG("%s: The ring is halted\n", __func__); | |
b6cb6610 | 503 | return -1; |
eb1e7c3e FC |
504 | } |
505 | ||
506 | if (etsec->regs[DMACTRL].value & DMACTRL_GRS) { | |
507 | RING_DEBUG("%s: Graceful receive stop\n", __func__); | |
b6cb6610 | 508 | return -1; |
eb1e7c3e FC |
509 | } |
510 | ||
511 | if (!(etsec->regs[MACCFG1].value & MACCFG1_RX_EN)) { | |
512 | RING_DEBUG("%s: MAC Receive not enabled\n", __func__); | |
b6cb6610 | 513 | return -1; |
eb1e7c3e FC |
514 | } |
515 | ||
516 | if ((etsec->regs[RCTRL].value & RCTRL_RSF) && (size < 60)) { | |
517 | /* CRC is not in the packet yet, so short frame is below 60 bytes */ | |
518 | RING_DEBUG("%s: Drop short frame\n", __func__); | |
b6cb6610 | 519 | return -1; |
eb1e7c3e FC |
520 | } |
521 | ||
522 | rx_init_frame(etsec, buf, size); | |
523 | ||
524 | etsec_walk_rx_ring(etsec, ring_nbr); | |
b6cb6610 FZ |
525 | |
526 | return size; | |
eb1e7c3e FC |
527 | } |
528 | ||
529 | void etsec_walk_rx_ring(eTSEC *etsec, int ring_nbr) | |
530 | { | |
531 | hwaddr ring_base = 0; | |
532 | hwaddr bd_addr = 0; | |
533 | hwaddr start_bd_addr = 0; | |
534 | eTSEC_rxtx_bd bd; | |
535 | uint16_t bd_flags; | |
536 | size_t remaining_data; | |
537 | const uint8_t *buf; | |
538 | uint8_t *tmp_buf; | |
539 | size_t size; | |
540 | ||
541 | if (etsec->rx_buffer_len == 0) { | |
542 | /* No frame to send */ | |
543 | RING_DEBUG("No frame to send\n"); | |
544 | return; | |
545 | } | |
546 | ||
547 | remaining_data = etsec->rx_remaining_data + etsec->rx_padding; | |
548 | buf = etsec->rx_buffer | |
549 | + (etsec->rx_buffer_len - etsec->rx_remaining_data); | |
550 | size = etsec->rx_buffer_len + etsec->rx_padding; | |
551 | ||
552 | ring_base = (hwaddr)(etsec->regs[RBASEH].value & 0xF) << 32; | |
553 | ring_base += etsec->regs[RBASE0 + ring_nbr].value & ~0x7; | |
554 | start_bd_addr = bd_addr = etsec->regs[RBPTR0 + ring_nbr].value & ~0x7; | |
555 | ||
556 | do { | |
557 | read_buffer_descriptor(etsec, bd_addr, &bd); | |
558 | ||
559 | #ifdef DEBUG_BD | |
560 | print_bd(bd, | |
561 | eTSEC_RECEIVE, | |
562 | (bd_addr - ring_base) / sizeof(eTSEC_rxtx_bd)); | |
563 | ||
564 | #endif /* DEBUG_BD */ | |
565 | ||
566 | /* Save flags before BD update */ | |
567 | bd_flags = bd.flags; | |
568 | ||
569 | if (bd_flags & BD_RX_EMPTY) { | |
570 | fill_rx_bd(etsec, &bd, &buf, &remaining_data); | |
571 | ||
572 | if (etsec->rx_first_in_frame) { | |
573 | bd.flags |= BD_RX_FIRST; | |
574 | etsec->rx_first_in_frame = 0; | |
575 | etsec->rx_first_bd = bd; | |
576 | } | |
577 | ||
578 | /* Last in frame */ | |
579 | if (remaining_data == 0) { | |
580 | ||
581 | /* Clear flags */ | |
582 | ||
583 | bd.flags &= ~0x7ff; | |
584 | ||
585 | bd.flags |= BD_LAST; | |
586 | ||
587 | /* NOTE: non-octet aligned frame is impossible in qemu */ | |
588 | ||
589 | if (size >= etsec->regs[MAXFRM].value) { | |
590 | /* frame length violation */ | |
591 | qemu_log("%s frame length violation: size:%zu MAXFRM:%d\n", | |
592 | __func__, size, etsec->regs[MAXFRM].value); | |
593 | ||
594 | bd.flags |= BD_RX_LG; | |
595 | } | |
596 | ||
597 | if (size < 64) { | |
598 | /* Short frame */ | |
599 | bd.flags |= BD_RX_SH; | |
600 | } | |
601 | ||
602 | /* TODO: Broadcast and Multicast */ | |
603 | ||
9c749e4d | 604 | if (bd.flags & BD_INTERRUPT) { |
eb1e7c3e FC |
605 | /* Set RXFx */ |
606 | etsec->regs[RSTAT].value |= 1 << (7 - ring_nbr); | |
607 | ||
608 | /* Set IEVENT */ | |
609 | ievent_set(etsec, IEVENT_RXF); | |
610 | } | |
611 | ||
612 | } else { | |
9c749e4d | 613 | if (bd.flags & BD_INTERRUPT) { |
eb1e7c3e FC |
614 | /* Set IEVENT */ |
615 | ievent_set(etsec, IEVENT_RXB); | |
616 | } | |
617 | } | |
618 | ||
619 | /* Write back BD after update */ | |
620 | write_buffer_descriptor(etsec, bd_addr, &bd); | |
621 | } | |
622 | ||
623 | /* Wrap or next BD */ | |
624 | if (bd_flags & BD_WRAP) { | |
625 | bd_addr = ring_base; | |
626 | } else { | |
627 | bd_addr += sizeof(eTSEC_rxtx_bd); | |
628 | } | |
629 | } while (remaining_data != 0 | |
630 | && (bd_flags & BD_RX_EMPTY) | |
631 | && bd_addr != start_bd_addr); | |
632 | ||
633 | /* Reset ring ptr */ | |
634 | etsec->regs[RBPTR0 + ring_nbr].value = bd_addr; | |
635 | ||
636 | /* The frame is too large to fit in the Rx ring */ | |
637 | if (remaining_data > 0) { | |
638 | ||
639 | /* Set RSTAT[QHLTx] */ | |
640 | etsec->regs[RSTAT].value |= 1 << (23 - ring_nbr); | |
641 | ||
642 | /* Save remaining data to send the end of the frame when the ring will | |
643 | * be restarted | |
644 | */ | |
645 | etsec->rx_remaining_data = remaining_data; | |
646 | ||
647 | /* Copy the frame */ | |
648 | tmp_buf = g_malloc(size); | |
649 | memcpy(tmp_buf, etsec->rx_buffer, size); | |
650 | etsec->rx_buffer = tmp_buf; | |
651 | ||
652 | RING_DEBUG("no empty RxBD available any more\n"); | |
653 | } else { | |
654 | etsec->rx_buffer_len = 0; | |
655 | etsec->rx_buffer = NULL; | |
575bafd1 FZ |
656 | if (etsec->need_flush) { |
657 | qemu_flush_queued_packets(qemu_get_queue(etsec->nic)); | |
658 | } | |
eb1e7c3e FC |
659 | } |
660 | ||
661 | RING_DEBUG("eTSEC End of ring_write: remaining_data:%zu\n", remaining_data); | |
662 | } |