]> git.proxmox.com Git - qemu.git/blob - hw/mcf_fec.c
update VERSION for 1.1.2
[qemu.git] / hw / mcf_fec.c
1 /*
2 * ColdFire Fast Ethernet Controller emulation.
3 *
4 * Copyright (c) 2007 CodeSourcery.
5 *
6 * This code is licensed under the GPL
7 */
8 #include "hw.h"
9 #include "net.h"
10 #include "mcf.h"
11 /* For crc32 */
12 #include <zlib.h>
13 #include "exec-memory.h"
14
15 //#define DEBUG_FEC 1
16
17 #ifdef DEBUG_FEC
18 #define DPRINTF(fmt, ...) \
19 do { printf("mcf_fec: " fmt , ## __VA_ARGS__); } while (0)
20 #else
21 #define DPRINTF(fmt, ...) do {} while(0)
22 #endif
23
24 #define FEC_MAX_FRAME_SIZE 2032
25
26 typedef struct {
27 MemoryRegion *sysmem;
28 MemoryRegion iomem;
29 qemu_irq *irq;
30 NICState *nic;
31 NICConf conf;
32 uint32_t irq_state;
33 uint32_t eir;
34 uint32_t eimr;
35 int rx_enabled;
36 uint32_t rx_descriptor;
37 uint32_t tx_descriptor;
38 uint32_t ecr;
39 uint32_t mmfr;
40 uint32_t mscr;
41 uint32_t rcr;
42 uint32_t tcr;
43 uint32_t tfwr;
44 uint32_t rfsr;
45 uint32_t erdsr;
46 uint32_t etdsr;
47 uint32_t emrbr;
48 } mcf_fec_state;
49
50 #define FEC_INT_HB 0x80000000
51 #define FEC_INT_BABR 0x40000000
52 #define FEC_INT_BABT 0x20000000
53 #define FEC_INT_GRA 0x10000000
54 #define FEC_INT_TXF 0x08000000
55 #define FEC_INT_TXB 0x04000000
56 #define FEC_INT_RXF 0x02000000
57 #define FEC_INT_RXB 0x01000000
58 #define FEC_INT_MII 0x00800000
59 #define FEC_INT_EB 0x00400000
60 #define FEC_INT_LC 0x00200000
61 #define FEC_INT_RL 0x00100000
62 #define FEC_INT_UN 0x00080000
63
64 #define FEC_EN 2
65 #define FEC_RESET 1
66
67 /* Map interrupt flags onto IRQ lines. */
68 #define FEC_NUM_IRQ 13
69 static const uint32_t mcf_fec_irq_map[FEC_NUM_IRQ] = {
70 FEC_INT_TXF,
71 FEC_INT_TXB,
72 FEC_INT_UN,
73 FEC_INT_RL,
74 FEC_INT_RXF,
75 FEC_INT_RXB,
76 FEC_INT_MII,
77 FEC_INT_LC,
78 FEC_INT_HB,
79 FEC_INT_GRA,
80 FEC_INT_EB,
81 FEC_INT_BABT,
82 FEC_INT_BABR
83 };
84
85 /* Buffer Descriptor. */
86 typedef struct {
87 uint16_t flags;
88 uint16_t length;
89 uint32_t data;
90 } mcf_fec_bd;
91
92 #define FEC_BD_R 0x8000
93 #define FEC_BD_E 0x8000
94 #define FEC_BD_O1 0x4000
95 #define FEC_BD_W 0x2000
96 #define FEC_BD_O2 0x1000
97 #define FEC_BD_L 0x0800
98 #define FEC_BD_TC 0x0400
99 #define FEC_BD_ABC 0x0200
100 #define FEC_BD_M 0x0100
101 #define FEC_BD_BC 0x0080
102 #define FEC_BD_MC 0x0040
103 #define FEC_BD_LG 0x0020
104 #define FEC_BD_NO 0x0010
105 #define FEC_BD_CR 0x0004
106 #define FEC_BD_OV 0x0002
107 #define FEC_BD_TR 0x0001
108
109 static void mcf_fec_read_bd(mcf_fec_bd *bd, uint32_t addr)
110 {
111 cpu_physical_memory_read(addr, (uint8_t *)bd, sizeof(*bd));
112 be16_to_cpus(&bd->flags);
113 be16_to_cpus(&bd->length);
114 be32_to_cpus(&bd->data);
115 }
116
117 static void mcf_fec_write_bd(mcf_fec_bd *bd, uint32_t addr)
118 {
119 mcf_fec_bd tmp;
120 tmp.flags = cpu_to_be16(bd->flags);
121 tmp.length = cpu_to_be16(bd->length);
122 tmp.data = cpu_to_be32(bd->data);
123 cpu_physical_memory_write(addr, (uint8_t *)&tmp, sizeof(tmp));
124 }
125
126 static void mcf_fec_update(mcf_fec_state *s)
127 {
128 uint32_t active;
129 uint32_t changed;
130 uint32_t mask;
131 int i;
132
133 active = s->eir & s->eimr;
134 changed = active ^s->irq_state;
135 for (i = 0; i < FEC_NUM_IRQ; i++) {
136 mask = mcf_fec_irq_map[i];
137 if (changed & mask) {
138 DPRINTF("IRQ %d = %d\n", i, (active & mask) != 0);
139 qemu_set_irq(s->irq[i], (active & mask) != 0);
140 }
141 }
142 s->irq_state = active;
143 }
144
145 static void mcf_fec_do_tx(mcf_fec_state *s)
146 {
147 uint32_t addr;
148 mcf_fec_bd bd;
149 int frame_size;
150 int len;
151 uint8_t frame[FEC_MAX_FRAME_SIZE];
152 uint8_t *ptr;
153
154 DPRINTF("do_tx\n");
155 ptr = frame;
156 frame_size = 0;
157 addr = s->tx_descriptor;
158 while (1) {
159 mcf_fec_read_bd(&bd, addr);
160 DPRINTF("tx_bd %x flags %04x len %d data %08x\n",
161 addr, bd.flags, bd.length, bd.data);
162 if ((bd.flags & FEC_BD_R) == 0) {
163 /* Run out of descriptors to transmit. */
164 break;
165 }
166 len = bd.length;
167 if (frame_size + len > FEC_MAX_FRAME_SIZE) {
168 len = FEC_MAX_FRAME_SIZE - frame_size;
169 s->eir |= FEC_INT_BABT;
170 }
171 cpu_physical_memory_read(bd.data, ptr, len);
172 ptr += len;
173 frame_size += len;
174 if (bd.flags & FEC_BD_L) {
175 /* Last buffer in frame. */
176 DPRINTF("Sending packet\n");
177 qemu_send_packet(&s->nic->nc, frame, len);
178 ptr = frame;
179 frame_size = 0;
180 s->eir |= FEC_INT_TXF;
181 }
182 s->eir |= FEC_INT_TXB;
183 bd.flags &= ~FEC_BD_R;
184 /* Write back the modified descriptor. */
185 mcf_fec_write_bd(&bd, addr);
186 /* Advance to the next descriptor. */
187 if ((bd.flags & FEC_BD_W) != 0) {
188 addr = s->etdsr;
189 } else {
190 addr += 8;
191 }
192 }
193 s->tx_descriptor = addr;
194 }
195
196 static void mcf_fec_enable_rx(mcf_fec_state *s)
197 {
198 mcf_fec_bd bd;
199
200 mcf_fec_read_bd(&bd, s->rx_descriptor);
201 s->rx_enabled = ((bd.flags & FEC_BD_E) != 0);
202 if (!s->rx_enabled)
203 DPRINTF("RX buffer full\n");
204 }
205
206 static void mcf_fec_reset(mcf_fec_state *s)
207 {
208 s->eir = 0;
209 s->eimr = 0;
210 s->rx_enabled = 0;
211 s->ecr = 0;
212 s->mscr = 0;
213 s->rcr = 0x05ee0001;
214 s->tcr = 0;
215 s->tfwr = 0;
216 s->rfsr = 0x500;
217 }
218
219 static uint64_t mcf_fec_read(void *opaque, target_phys_addr_t addr,
220 unsigned size)
221 {
222 mcf_fec_state *s = (mcf_fec_state *)opaque;
223 switch (addr & 0x3ff) {
224 case 0x004: return s->eir;
225 case 0x008: return s->eimr;
226 case 0x010: return s->rx_enabled ? (1 << 24) : 0; /* RDAR */
227 case 0x014: return 0; /* TDAR */
228 case 0x024: return s->ecr;
229 case 0x040: return s->mmfr;
230 case 0x044: return s->mscr;
231 case 0x064: return 0; /* MIBC */
232 case 0x084: return s->rcr;
233 case 0x0c4: return s->tcr;
234 case 0x0e4: /* PALR */
235 return (s->conf.macaddr.a[0] << 24) | (s->conf.macaddr.a[1] << 16)
236 | (s->conf.macaddr.a[2] << 8) | s->conf.macaddr.a[3];
237 break;
238 case 0x0e8: /* PAUR */
239 return (s->conf.macaddr.a[4] << 24) | (s->conf.macaddr.a[5] << 16) | 0x8808;
240 case 0x0ec: return 0x10000; /* OPD */
241 case 0x118: return 0;
242 case 0x11c: return 0;
243 case 0x120: return 0;
244 case 0x124: return 0;
245 case 0x144: return s->tfwr;
246 case 0x14c: return 0x600;
247 case 0x150: return s->rfsr;
248 case 0x180: return s->erdsr;
249 case 0x184: return s->etdsr;
250 case 0x188: return s->emrbr;
251 default:
252 hw_error("mcf_fec_read: Bad address 0x%x\n", (int)addr);
253 return 0;
254 }
255 }
256
257 static void mcf_fec_write(void *opaque, target_phys_addr_t addr,
258 uint64_t value, unsigned size)
259 {
260 mcf_fec_state *s = (mcf_fec_state *)opaque;
261 switch (addr & 0x3ff) {
262 case 0x004:
263 s->eir &= ~value;
264 break;
265 case 0x008:
266 s->eimr = value;
267 break;
268 case 0x010: /* RDAR */
269 if ((s->ecr & FEC_EN) && !s->rx_enabled) {
270 DPRINTF("RX enable\n");
271 mcf_fec_enable_rx(s);
272 }
273 break;
274 case 0x014: /* TDAR */
275 if (s->ecr & FEC_EN) {
276 mcf_fec_do_tx(s);
277 }
278 break;
279 case 0x024:
280 s->ecr = value;
281 if (value & FEC_RESET) {
282 DPRINTF("Reset\n");
283 mcf_fec_reset(s);
284 }
285 if ((s->ecr & FEC_EN) == 0) {
286 s->rx_enabled = 0;
287 }
288 break;
289 case 0x040:
290 /* TODO: Implement MII. */
291 s->mmfr = value;
292 break;
293 case 0x044:
294 s->mscr = value & 0xfe;
295 break;
296 case 0x064:
297 /* TODO: Implement MIB. */
298 break;
299 case 0x084:
300 s->rcr = value & 0x07ff003f;
301 /* TODO: Implement LOOP mode. */
302 break;
303 case 0x0c4: /* TCR */
304 /* We transmit immediately, so raise GRA immediately. */
305 s->tcr = value;
306 if (value & 1)
307 s->eir |= FEC_INT_GRA;
308 break;
309 case 0x0e4: /* PALR */
310 s->conf.macaddr.a[0] = value >> 24;
311 s->conf.macaddr.a[1] = value >> 16;
312 s->conf.macaddr.a[2] = value >> 8;
313 s->conf.macaddr.a[3] = value;
314 break;
315 case 0x0e8: /* PAUR */
316 s->conf.macaddr.a[4] = value >> 24;
317 s->conf.macaddr.a[5] = value >> 16;
318 break;
319 case 0x0ec:
320 /* OPD */
321 break;
322 case 0x118:
323 case 0x11c:
324 case 0x120:
325 case 0x124:
326 /* TODO: implement MAC hash filtering. */
327 break;
328 case 0x144:
329 s->tfwr = value & 3;
330 break;
331 case 0x14c:
332 /* FRBR writes ignored. */
333 break;
334 case 0x150:
335 s->rfsr = (value & 0x3fc) | 0x400;
336 break;
337 case 0x180:
338 s->erdsr = value & ~3;
339 s->rx_descriptor = s->erdsr;
340 break;
341 case 0x184:
342 s->etdsr = value & ~3;
343 s->tx_descriptor = s->etdsr;
344 break;
345 case 0x188:
346 s->emrbr = value & 0x7f0;
347 break;
348 default:
349 hw_error("mcf_fec_write Bad address 0x%x\n", (int)addr);
350 }
351 mcf_fec_update(s);
352 }
353
354 static int mcf_fec_can_receive(VLANClientState *nc)
355 {
356 mcf_fec_state *s = DO_UPCAST(NICState, nc, nc)->opaque;
357 return s->rx_enabled;
358 }
359
360 static ssize_t mcf_fec_receive(VLANClientState *nc, const uint8_t *buf, size_t size)
361 {
362 mcf_fec_state *s = DO_UPCAST(NICState, nc, nc)->opaque;
363 mcf_fec_bd bd;
364 uint32_t flags = 0;
365 uint32_t addr;
366 uint32_t crc;
367 uint32_t buf_addr;
368 uint8_t *crc_ptr;
369 unsigned int buf_len;
370
371 DPRINTF("do_rx len %d\n", size);
372 if (!s->rx_enabled) {
373 fprintf(stderr, "mcf_fec_receive: Unexpected packet\n");
374 }
375 /* 4 bytes for the CRC. */
376 size += 4;
377 crc = cpu_to_be32(crc32(~0, buf, size));
378 crc_ptr = (uint8_t *)&crc;
379 /* Huge frames are truncted. */
380 if (size > FEC_MAX_FRAME_SIZE) {
381 size = FEC_MAX_FRAME_SIZE;
382 flags |= FEC_BD_TR | FEC_BD_LG;
383 }
384 /* Frames larger than the user limit just set error flags. */
385 if (size > (s->rcr >> 16)) {
386 flags |= FEC_BD_LG;
387 }
388 addr = s->rx_descriptor;
389 while (size > 0) {
390 mcf_fec_read_bd(&bd, addr);
391 if ((bd.flags & FEC_BD_E) == 0) {
392 /* No descriptors available. Bail out. */
393 /* FIXME: This is wrong. We should probably either save the
394 remainder for when more RX buffers are available, or
395 flag an error. */
396 fprintf(stderr, "mcf_fec: Lost end of frame\n");
397 break;
398 }
399 buf_len = (size <= s->emrbr) ? size: s->emrbr;
400 bd.length = buf_len;
401 size -= buf_len;
402 DPRINTF("rx_bd %x length %d\n", addr, bd.length);
403 /* The last 4 bytes are the CRC. */
404 if (size < 4)
405 buf_len += size - 4;
406 buf_addr = bd.data;
407 cpu_physical_memory_write(buf_addr, buf, buf_len);
408 buf += buf_len;
409 if (size < 4) {
410 cpu_physical_memory_write(buf_addr + buf_len, crc_ptr, 4 - size);
411 crc_ptr += 4 - size;
412 }
413 bd.flags &= ~FEC_BD_E;
414 if (size == 0) {
415 /* Last buffer in frame. */
416 bd.flags |= flags | FEC_BD_L;
417 DPRINTF("rx frame flags %04x\n", bd.flags);
418 s->eir |= FEC_INT_RXF;
419 } else {
420 s->eir |= FEC_INT_RXB;
421 }
422 mcf_fec_write_bd(&bd, addr);
423 /* Advance to the next descriptor. */
424 if ((bd.flags & FEC_BD_W) != 0) {
425 addr = s->erdsr;
426 } else {
427 addr += 8;
428 }
429 }
430 s->rx_descriptor = addr;
431 mcf_fec_enable_rx(s);
432 mcf_fec_update(s);
433 return size;
434 }
435
436 static const MemoryRegionOps mcf_fec_ops = {
437 .read = mcf_fec_read,
438 .write = mcf_fec_write,
439 .endianness = DEVICE_NATIVE_ENDIAN,
440 };
441
442 static void mcf_fec_cleanup(VLANClientState *nc)
443 {
444 mcf_fec_state *s = DO_UPCAST(NICState, nc, nc)->opaque;
445
446 memory_region_del_subregion(s->sysmem, &s->iomem);
447 memory_region_destroy(&s->iomem);
448
449 g_free(s);
450 }
451
452 static NetClientInfo net_mcf_fec_info = {
453 .type = NET_CLIENT_TYPE_NIC,
454 .size = sizeof(NICState),
455 .can_receive = mcf_fec_can_receive,
456 .receive = mcf_fec_receive,
457 .cleanup = mcf_fec_cleanup,
458 };
459
460 void mcf_fec_init(MemoryRegion *sysmem, NICInfo *nd,
461 target_phys_addr_t base, qemu_irq *irq)
462 {
463 mcf_fec_state *s;
464
465 qemu_check_nic_model(nd, "mcf_fec");
466
467 s = (mcf_fec_state *)g_malloc0(sizeof(mcf_fec_state));
468 s->sysmem = sysmem;
469 s->irq = irq;
470
471 memory_region_init_io(&s->iomem, &mcf_fec_ops, s, "fec", 0x400);
472 memory_region_add_subregion(sysmem, base, &s->iomem);
473
474 s->conf.macaddr = nd->macaddr;
475 s->conf.vlan = nd->vlan;
476 s->conf.peer = nd->netdev;
477
478 s->nic = qemu_new_nic(&net_mcf_fec_info, &s->conf, nd->model, nd->name, s);
479
480 qemu_format_nic_info_str(&s->nic->nc, s->conf.macaddr.a);
481 }