]>
Commit | Line | Data |
---|---|---|
c110425d MCA |
1 | /* |
2 | * QEMU Sun Happy Meal Ethernet emulation | |
3 | * | |
4 | * Copyright (c) 2017 Mark Cave-Ayland | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
25 | #include "qemu/osdep.h" | |
26 | #include "hw/hw.h" | |
27 | #include "hw/pci/pci.h" | |
28 | #include "hw/net/mii.h" | |
29 | #include "net/net.h" | |
0b8fa32f | 30 | #include "qemu/module.h" |
c110425d MCA |
31 | #include "net/checksum.h" |
32 | #include "net/eth.h" | |
33 | #include "sysemu/sysemu.h" | |
34 | #include "trace.h" | |
35 | ||
36 | #define HME_REG_SIZE 0x8000 | |
37 | ||
38 | #define HME_SEB_REG_SIZE 0x2000 | |
39 | ||
40 | #define HME_SEBI_RESET 0x0 | |
41 | #define HME_SEB_RESET_ETX 0x1 | |
42 | #define HME_SEB_RESET_ERX 0x2 | |
43 | ||
44 | #define HME_SEBI_STAT 0x100 | |
45 | #define HME_SEBI_STAT_LINUXBUG 0x108 | |
46 | #define HME_SEB_STAT_RXTOHOST 0x10000 | |
47 | #define HME_SEB_STAT_MIFIRQ 0x800000 | |
48 | #define HME_SEB_STAT_HOSTTOTX 0x1000000 | |
49 | #define HME_SEB_STAT_TXALL 0x2000000 | |
50 | ||
51 | #define HME_SEBI_IMASK 0x104 | |
52 | #define HME_SEBI_IMASK_LINUXBUG 0x10c | |
53 | ||
54 | #define HME_ETX_REG_SIZE 0x2000 | |
55 | ||
56 | #define HME_ETXI_PENDING 0x0 | |
57 | ||
58 | #define HME_ETXI_RING 0x8 | |
59 | #define HME_ETXI_RING_ADDR 0xffffff00 | |
60 | #define HME_ETXI_RING_OFFSET 0xff | |
61 | ||
62 | #define HME_ETXI_RSIZE 0x2c | |
63 | ||
64 | #define HME_ERX_REG_SIZE 0x2000 | |
65 | ||
66 | #define HME_ERXI_CFG 0x0 | |
67 | #define HME_ERX_CFG_RINGSIZE 0x600 | |
68 | #define HME_ERX_CFG_RINGSIZE_SHIFT 9 | |
69 | #define HME_ERX_CFG_BYTEOFFSET 0x38 | |
70 | #define HME_ERX_CFG_BYTEOFFSET_SHIFT 3 | |
71 | #define HME_ERX_CFG_CSUMSTART 0x7f0000 | |
72 | #define HME_ERX_CFG_CSUMSHIFT 16 | |
73 | ||
74 | #define HME_ERXI_RING 0x4 | |
75 | #define HME_ERXI_RING_ADDR 0xffffff00 | |
76 | #define HME_ERXI_RING_OFFSET 0xff | |
77 | ||
78 | #define HME_MAC_REG_SIZE 0x1000 | |
79 | ||
80 | #define HME_MACI_TXCFG 0x20c | |
81 | #define HME_MAC_TXCFG_ENABLE 0x1 | |
82 | ||
83 | #define HME_MACI_RXCFG 0x30c | |
84 | #define HME_MAC_RXCFG_ENABLE 0x1 | |
85 | #define HME_MAC_RXCFG_PMISC 0x40 | |
86 | #define HME_MAC_RXCFG_HENABLE 0x800 | |
87 | ||
88 | #define HME_MACI_MACADDR2 0x318 | |
89 | #define HME_MACI_MACADDR1 0x31c | |
90 | #define HME_MACI_MACADDR0 0x320 | |
91 | ||
92 | #define HME_MACI_HASHTAB3 0x340 | |
93 | #define HME_MACI_HASHTAB2 0x344 | |
94 | #define HME_MACI_HASHTAB1 0x348 | |
95 | #define HME_MACI_HASHTAB0 0x34c | |
96 | ||
97 | #define HME_MIF_REG_SIZE 0x20 | |
98 | ||
99 | #define HME_MIFI_FO 0xc | |
100 | #define HME_MIF_FO_ST 0xc0000000 | |
101 | #define HME_MIF_FO_ST_SHIFT 30 | |
102 | #define HME_MIF_FO_OPC 0x30000000 | |
103 | #define HME_MIF_FO_OPC_SHIFT 28 | |
104 | #define HME_MIF_FO_PHYAD 0x0f800000 | |
105 | #define HME_MIF_FO_PHYAD_SHIFT 23 | |
106 | #define HME_MIF_FO_REGAD 0x007c0000 | |
107 | #define HME_MIF_FO_REGAD_SHIFT 18 | |
108 | #define HME_MIF_FO_TAMSB 0x20000 | |
109 | #define HME_MIF_FO_TALSB 0x10000 | |
110 | #define HME_MIF_FO_DATA 0xffff | |
111 | ||
112 | #define HME_MIFI_CFG 0x10 | |
113 | #define HME_MIF_CFG_MDI0 0x100 | |
114 | #define HME_MIF_CFG_MDI1 0x200 | |
115 | ||
116 | #define HME_MIFI_IMASK 0x14 | |
117 | ||
118 | #define HME_MIFI_STAT 0x18 | |
119 | ||
120 | ||
121 | /* Wired HME PHY addresses */ | |
122 | #define HME_PHYAD_INTERNAL 1 | |
123 | #define HME_PHYAD_EXTERNAL 0 | |
124 | ||
125 | #define MII_COMMAND_START 0x1 | |
126 | #define MII_COMMAND_READ 0x2 | |
127 | #define MII_COMMAND_WRITE 0x1 | |
128 | ||
129 | #define TYPE_SUNHME "sunhme" | |
130 | #define SUNHME(obj) OBJECT_CHECK(SunHMEState, (obj), TYPE_SUNHME) | |
131 | ||
132 | /* Maximum size of buffer */ | |
133 | #define HME_FIFO_SIZE 0x800 | |
134 | ||
135 | /* Size of TX/RX descriptor */ | |
136 | #define HME_DESC_SIZE 0x8 | |
137 | ||
138 | #define HME_XD_OWN 0x80000000 | |
139 | #define HME_XD_OFL 0x40000000 | |
140 | #define HME_XD_SOP 0x40000000 | |
141 | #define HME_XD_EOP 0x20000000 | |
142 | #define HME_XD_RXLENMSK 0x3fff0000 | |
143 | #define HME_XD_RXLENSHIFT 16 | |
144 | #define HME_XD_RXCKSUM 0xffff | |
145 | #define HME_XD_TXLENMSK 0x00001fff | |
146 | #define HME_XD_TXCKSUM 0x10000000 | |
147 | #define HME_XD_TXCSSTUFF 0xff00000 | |
148 | #define HME_XD_TXCSSTUFFSHIFT 20 | |
149 | #define HME_XD_TXCSSTART 0xfc000 | |
150 | #define HME_XD_TXCSSTARTSHIFT 14 | |
151 | ||
152 | #define HME_MII_REGS_SIZE 0x20 | |
153 | ||
154 | typedef struct SunHMEState { | |
155 | /*< private >*/ | |
156 | PCIDevice parent_obj; | |
157 | ||
158 | NICState *nic; | |
159 | NICConf conf; | |
160 | ||
161 | MemoryRegion hme; | |
162 | MemoryRegion sebreg; | |
163 | MemoryRegion etxreg; | |
164 | MemoryRegion erxreg; | |
165 | MemoryRegion macreg; | |
166 | MemoryRegion mifreg; | |
167 | ||
168 | uint32_t sebregs[HME_SEB_REG_SIZE >> 2]; | |
169 | uint32_t etxregs[HME_ETX_REG_SIZE >> 2]; | |
170 | uint32_t erxregs[HME_ERX_REG_SIZE >> 2]; | |
171 | uint32_t macregs[HME_MAC_REG_SIZE >> 2]; | |
172 | uint32_t mifregs[HME_MIF_REG_SIZE >> 2]; | |
173 | ||
174 | uint16_t miiregs[HME_MII_REGS_SIZE]; | |
175 | } SunHMEState; | |
176 | ||
177 | static Property sunhme_properties[] = { | |
178 | DEFINE_NIC_PROPERTIES(SunHMEState, conf), | |
179 | DEFINE_PROP_END_OF_LIST(), | |
180 | }; | |
181 | ||
182 | static void sunhme_reset_tx(SunHMEState *s) | |
183 | { | |
184 | /* Indicate TX reset complete */ | |
185 | s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ETX; | |
186 | } | |
187 | ||
188 | static void sunhme_reset_rx(SunHMEState *s) | |
189 | { | |
190 | /* Indicate RX reset complete */ | |
191 | s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ERX; | |
192 | } | |
193 | ||
194 | static void sunhme_update_irq(SunHMEState *s) | |
195 | { | |
196 | PCIDevice *d = PCI_DEVICE(s); | |
197 | int level; | |
198 | ||
199 | /* MIF interrupt mask (16-bit) */ | |
200 | uint32_t mifmask = ~(s->mifregs[HME_MIFI_IMASK >> 2]) & 0xffff; | |
201 | uint32_t mif = s->mifregs[HME_MIFI_STAT >> 2] & mifmask; | |
202 | ||
203 | /* Main SEB interrupt mask (include MIF status from above) */ | |
204 | uint32_t sebmask = ~(s->sebregs[HME_SEBI_IMASK >> 2]) & | |
205 | ~HME_SEB_STAT_MIFIRQ; | |
206 | uint32_t seb = s->sebregs[HME_SEBI_STAT >> 2] & sebmask; | |
207 | if (mif) { | |
208 | seb |= HME_SEB_STAT_MIFIRQ; | |
209 | } | |
210 | ||
211 | level = (seb ? 1 : 0); | |
6bdc3707 MCA |
212 | trace_sunhme_update_irq(mifmask, mif, sebmask, seb, level); |
213 | ||
c110425d MCA |
214 | pci_set_irq(d, level); |
215 | } | |
216 | ||
217 | static void sunhme_seb_write(void *opaque, hwaddr addr, | |
218 | uint64_t val, unsigned size) | |
219 | { | |
220 | SunHMEState *s = SUNHME(opaque); | |
221 | ||
222 | trace_sunhme_seb_write(addr, val); | |
223 | ||
224 | /* Handly buggy Linux drivers before 4.13 which have | |
225 | the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */ | |
226 | switch (addr) { | |
227 | case HME_SEBI_STAT_LINUXBUG: | |
228 | addr = HME_SEBI_STAT; | |
229 | break; | |
230 | case HME_SEBI_IMASK_LINUXBUG: | |
231 | addr = HME_SEBI_IMASK; | |
232 | break; | |
233 | default: | |
234 | break; | |
235 | } | |
236 | ||
237 | switch (addr) { | |
238 | case HME_SEBI_RESET: | |
239 | if (val & HME_SEB_RESET_ETX) { | |
240 | sunhme_reset_tx(s); | |
241 | } | |
242 | if (val & HME_SEB_RESET_ERX) { | |
243 | sunhme_reset_rx(s); | |
244 | } | |
245 | val = s->sebregs[HME_SEBI_RESET >> 2]; | |
246 | break; | |
247 | } | |
248 | ||
249 | s->sebregs[addr >> 2] = val; | |
250 | } | |
251 | ||
252 | static uint64_t sunhme_seb_read(void *opaque, hwaddr addr, | |
253 | unsigned size) | |
254 | { | |
255 | SunHMEState *s = SUNHME(opaque); | |
256 | uint64_t val; | |
257 | ||
258 | /* Handly buggy Linux drivers before 4.13 which have | |
259 | the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */ | |
260 | switch (addr) { | |
261 | case HME_SEBI_STAT_LINUXBUG: | |
262 | addr = HME_SEBI_STAT; | |
263 | break; | |
264 | case HME_SEBI_IMASK_LINUXBUG: | |
265 | addr = HME_SEBI_IMASK; | |
266 | break; | |
267 | default: | |
268 | break; | |
269 | } | |
270 | ||
271 | val = s->sebregs[addr >> 2]; | |
272 | ||
273 | switch (addr) { | |
274 | case HME_SEBI_STAT: | |
275 | /* Autoclear status (except MIF) */ | |
276 | s->sebregs[HME_SEBI_STAT >> 2] &= HME_SEB_STAT_MIFIRQ; | |
277 | sunhme_update_irq(s); | |
278 | break; | |
279 | } | |
280 | ||
281 | trace_sunhme_seb_read(addr, val); | |
282 | ||
283 | return val; | |
284 | } | |
285 | ||
286 | static const MemoryRegionOps sunhme_seb_ops = { | |
287 | .read = sunhme_seb_read, | |
288 | .write = sunhme_seb_write, | |
289 | .endianness = DEVICE_LITTLE_ENDIAN, | |
290 | .valid = { | |
291 | .min_access_size = 4, | |
292 | .max_access_size = 4, | |
293 | }, | |
294 | }; | |
295 | ||
296 | static void sunhme_transmit(SunHMEState *s); | |
297 | ||
298 | static void sunhme_etx_write(void *opaque, hwaddr addr, | |
299 | uint64_t val, unsigned size) | |
300 | { | |
301 | SunHMEState *s = SUNHME(opaque); | |
302 | ||
303 | trace_sunhme_etx_write(addr, val); | |
304 | ||
305 | switch (addr) { | |
306 | case HME_ETXI_PENDING: | |
307 | if (val) { | |
308 | sunhme_transmit(s); | |
309 | } | |
310 | break; | |
311 | } | |
312 | ||
313 | s->etxregs[addr >> 2] = val; | |
314 | } | |
315 | ||
316 | static uint64_t sunhme_etx_read(void *opaque, hwaddr addr, | |
317 | unsigned size) | |
318 | { | |
319 | SunHMEState *s = SUNHME(opaque); | |
320 | uint64_t val; | |
321 | ||
322 | val = s->etxregs[addr >> 2]; | |
323 | ||
324 | trace_sunhme_etx_read(addr, val); | |
325 | ||
326 | return val; | |
327 | } | |
328 | ||
329 | static const MemoryRegionOps sunhme_etx_ops = { | |
330 | .read = sunhme_etx_read, | |
331 | .write = sunhme_etx_write, | |
332 | .endianness = DEVICE_LITTLE_ENDIAN, | |
333 | .valid = { | |
334 | .min_access_size = 4, | |
335 | .max_access_size = 4, | |
336 | }, | |
337 | }; | |
338 | ||
339 | static void sunhme_erx_write(void *opaque, hwaddr addr, | |
340 | uint64_t val, unsigned size) | |
341 | { | |
342 | SunHMEState *s = SUNHME(opaque); | |
343 | ||
344 | trace_sunhme_erx_write(addr, val); | |
345 | ||
346 | s->erxregs[addr >> 2] = val; | |
347 | } | |
348 | ||
349 | static uint64_t sunhme_erx_read(void *opaque, hwaddr addr, | |
350 | unsigned size) | |
351 | { | |
352 | SunHMEState *s = SUNHME(opaque); | |
353 | uint64_t val; | |
354 | ||
355 | val = s->erxregs[addr >> 2]; | |
356 | ||
357 | trace_sunhme_erx_read(addr, val); | |
358 | ||
359 | return val; | |
360 | } | |
361 | ||
362 | static const MemoryRegionOps sunhme_erx_ops = { | |
363 | .read = sunhme_erx_read, | |
364 | .write = sunhme_erx_write, | |
365 | .endianness = DEVICE_LITTLE_ENDIAN, | |
366 | .valid = { | |
367 | .min_access_size = 4, | |
368 | .max_access_size = 4, | |
369 | }, | |
370 | }; | |
371 | ||
372 | static void sunhme_mac_write(void *opaque, hwaddr addr, | |
373 | uint64_t val, unsigned size) | |
374 | { | |
375 | SunHMEState *s = SUNHME(opaque); | |
376 | ||
377 | trace_sunhme_mac_write(addr, val); | |
378 | ||
379 | s->macregs[addr >> 2] = val; | |
380 | } | |
381 | ||
382 | static uint64_t sunhme_mac_read(void *opaque, hwaddr addr, | |
383 | unsigned size) | |
384 | { | |
385 | SunHMEState *s = SUNHME(opaque); | |
386 | uint64_t val; | |
387 | ||
388 | val = s->macregs[addr >> 2]; | |
389 | ||
390 | trace_sunhme_mac_read(addr, val); | |
391 | ||
392 | return val; | |
393 | } | |
394 | ||
395 | static const MemoryRegionOps sunhme_mac_ops = { | |
396 | .read = sunhme_mac_read, | |
397 | .write = sunhme_mac_write, | |
398 | .endianness = DEVICE_LITTLE_ENDIAN, | |
399 | .valid = { | |
400 | .min_access_size = 4, | |
401 | .max_access_size = 4, | |
402 | }, | |
403 | }; | |
404 | ||
405 | static void sunhme_mii_write(SunHMEState *s, uint8_t reg, uint16_t data) | |
406 | { | |
407 | trace_sunhme_mii_write(reg, data); | |
408 | ||
409 | switch (reg) { | |
410 | case MII_BMCR: | |
411 | if (data & MII_BMCR_RESET) { | |
412 | /* Autoclear reset bit, enable auto negotiation */ | |
413 | data &= ~MII_BMCR_RESET; | |
414 | data |= MII_BMCR_AUTOEN; | |
415 | } | |
416 | if (data & MII_BMCR_ANRESTART) { | |
417 | /* Autoclear auto negotiation restart */ | |
418 | data &= ~MII_BMCR_ANRESTART; | |
419 | ||
420 | /* Indicate negotiation complete */ | |
421 | s->miiregs[MII_BMSR] |= MII_BMSR_AN_COMP; | |
422 | ||
423 | if (!qemu_get_queue(s->nic)->link_down) { | |
424 | s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD; | |
425 | s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST; | |
426 | } | |
427 | } | |
428 | break; | |
429 | } | |
430 | ||
431 | s->miiregs[reg] = data; | |
432 | } | |
433 | ||
434 | static uint16_t sunhme_mii_read(SunHMEState *s, uint8_t reg) | |
435 | { | |
436 | uint16_t data = s->miiregs[reg]; | |
437 | ||
438 | trace_sunhme_mii_read(reg, data); | |
439 | ||
440 | return data; | |
441 | } | |
442 | ||
443 | static void sunhme_mif_write(void *opaque, hwaddr addr, | |
444 | uint64_t val, unsigned size) | |
445 | { | |
446 | SunHMEState *s = SUNHME(opaque); | |
447 | uint8_t cmd, reg; | |
448 | uint16_t data; | |
449 | ||
450 | trace_sunhme_mif_write(addr, val); | |
451 | ||
452 | switch (addr) { | |
453 | case HME_MIFI_CFG: | |
454 | /* Mask the read-only bits */ | |
455 | val &= ~(HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1); | |
456 | val |= s->mifregs[HME_MIFI_CFG >> 2] & | |
457 | (HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1); | |
458 | break; | |
459 | case HME_MIFI_FO: | |
460 | /* Detect start of MII command */ | |
461 | if ((val & HME_MIF_FO_ST) >> HME_MIF_FO_ST_SHIFT | |
462 | != MII_COMMAND_START) { | |
463 | val |= HME_MIF_FO_TALSB; | |
464 | break; | |
465 | } | |
466 | ||
467 | /* Internal phy only */ | |
468 | if ((val & HME_MIF_FO_PHYAD) >> HME_MIF_FO_PHYAD_SHIFT | |
469 | != HME_PHYAD_INTERNAL) { | |
470 | val |= HME_MIF_FO_TALSB; | |
471 | break; | |
472 | } | |
473 | ||
474 | cmd = (val & HME_MIF_FO_OPC) >> HME_MIF_FO_OPC_SHIFT; | |
475 | reg = (val & HME_MIF_FO_REGAD) >> HME_MIF_FO_REGAD_SHIFT; | |
476 | data = (val & HME_MIF_FO_DATA); | |
477 | ||
478 | switch (cmd) { | |
479 | case MII_COMMAND_WRITE: | |
480 | sunhme_mii_write(s, reg, data); | |
481 | break; | |
482 | ||
483 | case MII_COMMAND_READ: | |
484 | val &= ~HME_MIF_FO_DATA; | |
485 | val |= sunhme_mii_read(s, reg); | |
486 | break; | |
487 | } | |
488 | ||
489 | val |= HME_MIF_FO_TALSB; | |
490 | break; | |
491 | } | |
492 | ||
493 | s->mifregs[addr >> 2] = val; | |
494 | } | |
495 | ||
496 | static uint64_t sunhme_mif_read(void *opaque, hwaddr addr, | |
497 | unsigned size) | |
498 | { | |
499 | SunHMEState *s = SUNHME(opaque); | |
500 | uint64_t val; | |
501 | ||
502 | val = s->mifregs[addr >> 2]; | |
503 | ||
504 | switch (addr) { | |
505 | case HME_MIFI_STAT: | |
506 | /* Autoclear MIF interrupt status */ | |
507 | s->mifregs[HME_MIFI_STAT >> 2] = 0; | |
508 | sunhme_update_irq(s); | |
509 | break; | |
510 | } | |
511 | ||
512 | trace_sunhme_mif_read(addr, val); | |
513 | ||
514 | return val; | |
515 | } | |
516 | ||
517 | static const MemoryRegionOps sunhme_mif_ops = { | |
518 | .read = sunhme_mif_read, | |
519 | .write = sunhme_mif_write, | |
520 | .endianness = DEVICE_LITTLE_ENDIAN, | |
521 | .valid = { | |
522 | .min_access_size = 4, | |
523 | .max_access_size = 4, | |
524 | }, | |
525 | }; | |
526 | ||
527 | static void sunhme_transmit_frame(SunHMEState *s, uint8_t *buf, int size) | |
528 | { | |
529 | qemu_send_packet(qemu_get_queue(s->nic), buf, size); | |
530 | } | |
531 | ||
532 | static inline int sunhme_get_tx_ring_count(SunHMEState *s) | |
533 | { | |
534 | return (s->etxregs[HME_ETXI_RSIZE >> 2] + 1) << 4; | |
535 | } | |
536 | ||
537 | static inline int sunhme_get_tx_ring_nr(SunHMEState *s) | |
538 | { | |
539 | return s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_OFFSET; | |
540 | } | |
541 | ||
542 | static inline void sunhme_set_tx_ring_nr(SunHMEState *s, int i) | |
543 | { | |
544 | uint32_t ring = s->etxregs[HME_ETXI_RING >> 2] & ~HME_ETXI_RING_OFFSET; | |
545 | ring |= i & HME_ETXI_RING_OFFSET; | |
546 | ||
547 | s->etxregs[HME_ETXI_RING >> 2] = ring; | |
548 | } | |
549 | ||
550 | static void sunhme_transmit(SunHMEState *s) | |
551 | { | |
552 | PCIDevice *d = PCI_DEVICE(s); | |
553 | dma_addr_t tb, addr; | |
554 | uint32_t intstatus, status, buffer, sum = 0; | |
555 | int cr, nr, len, xmit_pos, csum_offset = 0, csum_stuff_offset = 0; | |
556 | uint16_t csum = 0; | |
557 | uint8_t xmit_buffer[HME_FIFO_SIZE]; | |
558 | ||
559 | tb = s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_ADDR; | |
560 | nr = sunhme_get_tx_ring_count(s); | |
561 | cr = sunhme_get_tx_ring_nr(s); | |
562 | ||
563 | pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4); | |
564 | pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4); | |
565 | ||
566 | xmit_pos = 0; | |
567 | while (status & HME_XD_OWN) { | |
568 | trace_sunhme_tx_desc(buffer, status, cr, nr); | |
569 | ||
570 | /* Copy data into transmit buffer */ | |
571 | addr = buffer; | |
572 | len = status & HME_XD_TXLENMSK; | |
573 | ||
574 | if (xmit_pos + len > HME_FIFO_SIZE) { | |
575 | len = HME_FIFO_SIZE - xmit_pos; | |
576 | } | |
577 | ||
578 | pci_dma_read(d, addr, &xmit_buffer[xmit_pos], len); | |
579 | xmit_pos += len; | |
580 | ||
581 | /* Detect start of packet for TX checksum */ | |
582 | if (status & HME_XD_SOP) { | |
583 | sum = 0; | |
584 | csum_offset = (status & HME_XD_TXCSSTART) >> HME_XD_TXCSSTARTSHIFT; | |
585 | csum_stuff_offset = (status & HME_XD_TXCSSTUFF) >> | |
586 | HME_XD_TXCSSTUFFSHIFT; | |
587 | } | |
588 | ||
589 | if (status & HME_XD_TXCKSUM) { | |
590 | /* Only start calculation from csum_offset */ | |
591 | if (xmit_pos - len <= csum_offset && xmit_pos > csum_offset) { | |
592 | sum += net_checksum_add(xmit_pos - csum_offset, | |
593 | xmit_buffer + csum_offset); | |
594 | trace_sunhme_tx_xsum_add(csum_offset, xmit_pos - csum_offset); | |
595 | } else { | |
596 | sum += net_checksum_add(len, xmit_buffer + xmit_pos - len); | |
597 | trace_sunhme_tx_xsum_add(xmit_pos - len, len); | |
598 | } | |
599 | } | |
600 | ||
601 | /* Detect end of packet for TX checksum */ | |
602 | if (status & HME_XD_EOP) { | |
603 | /* Stuff the checksum if required */ | |
604 | if (status & HME_XD_TXCKSUM) { | |
605 | csum = net_checksum_finish(sum); | |
606 | stw_be_p(xmit_buffer + csum_stuff_offset, csum); | |
607 | trace_sunhme_tx_xsum_stuff(csum, csum_stuff_offset); | |
608 | } | |
609 | ||
610 | if (s->macregs[HME_MACI_TXCFG >> 2] & HME_MAC_TXCFG_ENABLE) { | |
611 | sunhme_transmit_frame(s, xmit_buffer, xmit_pos); | |
612 | trace_sunhme_tx_done(xmit_pos); | |
613 | } | |
614 | } | |
615 | ||
616 | /* Update status */ | |
617 | status &= ~HME_XD_OWN; | |
618 | pci_dma_write(d, tb + cr * HME_DESC_SIZE, &status, 4); | |
619 | ||
620 | /* Move onto next descriptor */ | |
621 | cr++; | |
622 | if (cr >= nr) { | |
623 | cr = 0; | |
624 | } | |
625 | sunhme_set_tx_ring_nr(s, cr); | |
626 | ||
627 | pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4); | |
628 | pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4); | |
629 | ||
630 | /* Indicate TX complete */ | |
631 | intstatus = s->sebregs[HME_SEBI_STAT >> 2]; | |
632 | intstatus |= HME_SEB_STAT_HOSTTOTX; | |
633 | s->sebregs[HME_SEBI_STAT >> 2] = intstatus; | |
634 | ||
635 | /* Autoclear TX pending */ | |
636 | s->etxregs[HME_ETXI_PENDING >> 2] = 0; | |
637 | ||
638 | sunhme_update_irq(s); | |
639 | } | |
640 | ||
641 | /* TX FIFO now clear */ | |
642 | intstatus = s->sebregs[HME_SEBI_STAT >> 2]; | |
643 | intstatus |= HME_SEB_STAT_TXALL; | |
644 | s->sebregs[HME_SEBI_STAT >> 2] = intstatus; | |
645 | sunhme_update_irq(s); | |
646 | } | |
647 | ||
648 | static int sunhme_can_receive(NetClientState *nc) | |
649 | { | |
650 | SunHMEState *s = qemu_get_nic_opaque(nc); | |
651 | ||
652 | return s->macregs[HME_MAC_RXCFG_ENABLE >> 2] & HME_MAC_RXCFG_ENABLE; | |
653 | } | |
654 | ||
655 | static void sunhme_link_status_changed(NetClientState *nc) | |
656 | { | |
657 | SunHMEState *s = qemu_get_nic_opaque(nc); | |
658 | ||
659 | if (nc->link_down) { | |
660 | s->miiregs[MII_ANLPAR] &= ~MII_ANLPAR_TXFD; | |
661 | s->miiregs[MII_BMSR] &= ~MII_BMSR_LINK_ST; | |
662 | } else { | |
663 | s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD; | |
664 | s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST; | |
665 | } | |
666 | ||
667 | /* Exact bits unknown */ | |
668 | s->mifregs[HME_MIFI_STAT >> 2] = 0xffff; | |
669 | sunhme_update_irq(s); | |
670 | } | |
671 | ||
672 | static inline int sunhme_get_rx_ring_count(SunHMEState *s) | |
673 | { | |
674 | uint32_t rings = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_RINGSIZE) | |
675 | >> HME_ERX_CFG_RINGSIZE_SHIFT; | |
676 | ||
677 | switch (rings) { | |
678 | case 0: | |
679 | return 32; | |
680 | case 1: | |
681 | return 64; | |
682 | case 2: | |
683 | return 128; | |
684 | case 3: | |
685 | return 256; | |
686 | } | |
687 | ||
688 | return 0; | |
689 | } | |
690 | ||
691 | static inline int sunhme_get_rx_ring_nr(SunHMEState *s) | |
692 | { | |
693 | return s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_OFFSET; | |
694 | } | |
695 | ||
696 | static inline void sunhme_set_rx_ring_nr(SunHMEState *s, int i) | |
697 | { | |
698 | uint32_t ring = s->erxregs[HME_ERXI_RING >> 2] & ~HME_ERXI_RING_OFFSET; | |
699 | ring |= i & HME_ERXI_RING_OFFSET; | |
700 | ||
701 | s->erxregs[HME_ERXI_RING >> 2] = ring; | |
702 | } | |
703 | ||
c110425d MCA |
704 | #define MIN_BUF_SIZE 60 |
705 | ||
706 | static ssize_t sunhme_receive(NetClientState *nc, const uint8_t *buf, | |
707 | size_t size) | |
708 | { | |
709 | SunHMEState *s = qemu_get_nic_opaque(nc); | |
710 | PCIDevice *d = PCI_DEVICE(s); | |
711 | dma_addr_t rb, addr; | |
712 | uint32_t intstatus, status, buffer, buffersize, sum; | |
713 | uint16_t csum; | |
714 | uint8_t buf1[60]; | |
715 | int nr, cr, len, rxoffset, csum_offset; | |
716 | ||
717 | trace_sunhme_rx_incoming(size); | |
718 | ||
719 | /* Do nothing if MAC RX disabled */ | |
720 | if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE)) { | |
721 | return -1; | |
722 | } | |
723 | ||
724 | trace_sunhme_rx_filter_destmac(buf[0], buf[1], buf[2], | |
725 | buf[3], buf[4], buf[5]); | |
726 | ||
727 | /* Check destination MAC address */ | |
728 | if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_PMISC)) { | |
729 | /* Try and match local MAC address */ | |
730 | if (((s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff00) >> 8) == buf[0] && | |
731 | (s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff) == buf[1] && | |
732 | ((s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff00) >> 8) == buf[2] && | |
733 | (s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff) == buf[3] && | |
734 | ((s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff00) >> 8) == buf[4] && | |
735 | (s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff) == buf[5]) { | |
736 | /* Matched local MAC address */ | |
737 | trace_sunhme_rx_filter_local_match(); | |
738 | } else if (buf[0] == 0xff && buf[1] == 0xff && buf[2] == 0xff && | |
739 | buf[3] == 0xff && buf[4] == 0xff && buf[5] == 0xff) { | |
740 | /* Matched broadcast address */ | |
741 | trace_sunhme_rx_filter_bcast_match(); | |
742 | } else if (s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_HENABLE) { | |
743 | /* Didn't match local address, check hash filter */ | |
a89a6b05 | 744 | int mcast_idx = net_crc32_le(buf, ETH_ALEN) >> 26; |
c110425d MCA |
745 | if (!(s->macregs[(HME_MACI_HASHTAB0 >> 2) - (mcast_idx >> 4)] & |
746 | (1 << (mcast_idx & 0xf)))) { | |
747 | /* Didn't match hash filter */ | |
748 | trace_sunhme_rx_filter_hash_nomatch(); | |
749 | trace_sunhme_rx_filter_reject(); | |
750 | return 0; | |
751 | } else { | |
752 | trace_sunhme_rx_filter_hash_match(); | |
753 | } | |
754 | } else { | |
755 | /* Not for us */ | |
756 | trace_sunhme_rx_filter_reject(); | |
757 | return 0; | |
758 | } | |
759 | } else { | |
760 | trace_sunhme_rx_filter_promisc_match(); | |
761 | } | |
762 | ||
763 | trace_sunhme_rx_filter_accept(); | |
764 | ||
765 | /* If too small buffer, then expand it */ | |
766 | if (size < MIN_BUF_SIZE) { | |
767 | memcpy(buf1, buf, size); | |
768 | memset(buf1 + size, 0, MIN_BUF_SIZE - size); | |
769 | buf = buf1; | |
770 | size = MIN_BUF_SIZE; | |
771 | } | |
772 | ||
773 | rb = s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_ADDR; | |
774 | nr = sunhme_get_rx_ring_count(s); | |
775 | cr = sunhme_get_rx_ring_nr(s); | |
776 | ||
777 | pci_dma_read(d, rb + cr * HME_DESC_SIZE, &status, 4); | |
778 | pci_dma_read(d, rb + cr * HME_DESC_SIZE + 4, &buffer, 4); | |
779 | ||
780 | rxoffset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_BYTEOFFSET) >> | |
781 | HME_ERX_CFG_BYTEOFFSET_SHIFT; | |
782 | ||
783 | addr = buffer + rxoffset; | |
784 | buffersize = (status & HME_XD_RXLENMSK) >> HME_XD_RXLENSHIFT; | |
785 | ||
786 | /* Detect receive overflow */ | |
787 | len = size; | |
788 | if (size > buffersize) { | |
789 | status |= HME_XD_OFL; | |
790 | len = buffersize; | |
791 | } | |
792 | ||
793 | pci_dma_write(d, addr, buf, len); | |
794 | ||
795 | trace_sunhme_rx_desc(buffer, rxoffset, status, len, cr, nr); | |
796 | ||
797 | /* Calculate the receive checksum */ | |
798 | csum_offset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_CSUMSTART) >> | |
799 | HME_ERX_CFG_CSUMSHIFT << 1; | |
800 | sum = 0; | |
801 | sum += net_checksum_add(len - csum_offset, (uint8_t *)buf + csum_offset); | |
802 | csum = net_checksum_finish(sum); | |
803 | ||
804 | trace_sunhme_rx_xsum_calc(csum); | |
805 | ||
806 | /* Update status */ | |
807 | status &= ~HME_XD_OWN; | |
808 | status &= ~HME_XD_RXLENMSK; | |
809 | status |= len << HME_XD_RXLENSHIFT; | |
810 | status &= ~HME_XD_RXCKSUM; | |
811 | status |= csum; | |
812 | ||
813 | pci_dma_write(d, rb + cr * HME_DESC_SIZE, &status, 4); | |
814 | ||
815 | cr++; | |
816 | if (cr >= nr) { | |
817 | cr = 0; | |
818 | } | |
819 | ||
820 | sunhme_set_rx_ring_nr(s, cr); | |
821 | ||
822 | /* Indicate RX complete */ | |
823 | intstatus = s->sebregs[HME_SEBI_STAT >> 2]; | |
824 | intstatus |= HME_SEB_STAT_RXTOHOST; | |
825 | s->sebregs[HME_SEBI_STAT >> 2] = intstatus; | |
826 | ||
827 | sunhme_update_irq(s); | |
828 | ||
829 | return len; | |
830 | } | |
831 | ||
832 | static NetClientInfo net_sunhme_info = { | |
833 | .type = NET_CLIENT_DRIVER_NIC, | |
834 | .size = sizeof(NICState), | |
835 | .can_receive = sunhme_can_receive, | |
836 | .receive = sunhme_receive, | |
837 | .link_status_changed = sunhme_link_status_changed, | |
838 | }; | |
839 | ||
840 | static void sunhme_realize(PCIDevice *pci_dev, Error **errp) | |
841 | { | |
842 | SunHMEState *s = SUNHME(pci_dev); | |
843 | DeviceState *d = DEVICE(pci_dev); | |
844 | uint8_t *pci_conf; | |
845 | ||
846 | pci_conf = pci_dev->config; | |
847 | pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */ | |
848 | ||
849 | memory_region_init(&s->hme, OBJECT(pci_dev), "sunhme", HME_REG_SIZE); | |
850 | pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->hme); | |
851 | ||
852 | memory_region_init_io(&s->sebreg, OBJECT(pci_dev), &sunhme_seb_ops, s, | |
853 | "sunhme.seb", HME_SEB_REG_SIZE); | |
854 | memory_region_add_subregion(&s->hme, 0, &s->sebreg); | |
855 | ||
856 | memory_region_init_io(&s->etxreg, OBJECT(pci_dev), &sunhme_etx_ops, s, | |
857 | "sunhme.etx", HME_ETX_REG_SIZE); | |
858 | memory_region_add_subregion(&s->hme, 0x2000, &s->etxreg); | |
859 | ||
860 | memory_region_init_io(&s->erxreg, OBJECT(pci_dev), &sunhme_erx_ops, s, | |
861 | "sunhme.erx", HME_ERX_REG_SIZE); | |
862 | memory_region_add_subregion(&s->hme, 0x4000, &s->erxreg); | |
863 | ||
864 | memory_region_init_io(&s->macreg, OBJECT(pci_dev), &sunhme_mac_ops, s, | |
865 | "sunhme.mac", HME_MAC_REG_SIZE); | |
866 | memory_region_add_subregion(&s->hme, 0x6000, &s->macreg); | |
867 | ||
868 | memory_region_init_io(&s->mifreg, OBJECT(pci_dev), &sunhme_mif_ops, s, | |
869 | "sunhme.mif", HME_MIF_REG_SIZE); | |
870 | memory_region_add_subregion(&s->hme, 0x7000, &s->mifreg); | |
871 | ||
872 | qemu_macaddr_default_if_unset(&s->conf.macaddr); | |
873 | s->nic = qemu_new_nic(&net_sunhme_info, &s->conf, | |
874 | object_get_typename(OBJECT(d)), d->id, s); | |
875 | qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); | |
876 | } | |
877 | ||
878 | static void sunhme_instance_init(Object *obj) | |
879 | { | |
880 | SunHMEState *s = SUNHME(obj); | |
881 | ||
882 | device_add_bootindex_property(obj, &s->conf.bootindex, | |
883 | "bootindex", "/ethernet-phy@0", | |
884 | DEVICE(obj), NULL); | |
885 | } | |
886 | ||
887 | static void sunhme_reset(DeviceState *ds) | |
888 | { | |
889 | SunHMEState *s = SUNHME(ds); | |
890 | ||
891 | /* Configure internal transceiver */ | |
892 | s->mifregs[HME_MIFI_CFG >> 2] |= HME_MIF_CFG_MDI0; | |
893 | ||
894 | /* Advetise auto, 100Mbps FD */ | |
895 | s->miiregs[MII_ANAR] = MII_ANAR_TXFD; | |
896 | s->miiregs[MII_BMSR] = MII_BMSR_AUTONEG | MII_BMSR_100TX_FD | | |
897 | MII_BMSR_AN_COMP; | |
898 | ||
899 | if (!qemu_get_queue(s->nic)->link_down) { | |
900 | s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD; | |
901 | s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST; | |
902 | } | |
903 | ||
904 | /* Set manufacturer */ | |
905 | s->miiregs[MII_PHYID1] = DP83840_PHYID1; | |
906 | s->miiregs[MII_PHYID2] = DP83840_PHYID2; | |
907 | ||
908 | /* Configure default interrupt mask */ | |
909 | s->mifregs[HME_MIFI_IMASK >> 2] = 0xffff; | |
910 | s->sebregs[HME_SEBI_IMASK >> 2] = 0xff7fffff; | |
911 | } | |
912 | ||
913 | static const VMStateDescription vmstate_hme = { | |
914 | .name = "sunhme", | |
915 | .version_id = 0, | |
916 | .minimum_version_id = 0, | |
917 | .fields = (VMStateField[]) { | |
918 | VMSTATE_PCI_DEVICE(parent_obj, SunHMEState), | |
919 | VMSTATE_MACADDR(conf.macaddr, SunHMEState), | |
920 | VMSTATE_UINT32_ARRAY(sebregs, SunHMEState, (HME_SEB_REG_SIZE >> 2)), | |
921 | VMSTATE_UINT32_ARRAY(etxregs, SunHMEState, (HME_ETX_REG_SIZE >> 2)), | |
922 | VMSTATE_UINT32_ARRAY(erxregs, SunHMEState, (HME_ERX_REG_SIZE >> 2)), | |
923 | VMSTATE_UINT32_ARRAY(macregs, SunHMEState, (HME_MAC_REG_SIZE >> 2)), | |
924 | VMSTATE_UINT32_ARRAY(mifregs, SunHMEState, (HME_MIF_REG_SIZE >> 2)), | |
925 | VMSTATE_UINT16_ARRAY(miiregs, SunHMEState, HME_MII_REGS_SIZE), | |
926 | VMSTATE_END_OF_LIST() | |
927 | } | |
928 | }; | |
929 | ||
930 | static void sunhme_class_init(ObjectClass *klass, void *data) | |
931 | { | |
932 | DeviceClass *dc = DEVICE_CLASS(klass); | |
933 | PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); | |
934 | ||
935 | k->realize = sunhme_realize; | |
936 | k->vendor_id = PCI_VENDOR_ID_SUN; | |
937 | k->device_id = PCI_DEVICE_ID_SUN_HME; | |
938 | k->class_id = PCI_CLASS_NETWORK_ETHERNET; | |
939 | dc->vmsd = &vmstate_hme; | |
940 | dc->reset = sunhme_reset; | |
941 | dc->props = sunhme_properties; | |
942 | set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); | |
943 | } | |
944 | ||
945 | static const TypeInfo sunhme_info = { | |
946 | .name = TYPE_SUNHME, | |
947 | .parent = TYPE_PCI_DEVICE, | |
948 | .class_init = sunhme_class_init, | |
949 | .instance_size = sizeof(SunHMEState), | |
950 | .instance_init = sunhme_instance_init, | |
fd3b02c8 EH |
951 | .interfaces = (InterfaceInfo[]) { |
952 | { INTERFACE_CONVENTIONAL_PCI_DEVICE }, | |
953 | { } | |
954 | } | |
c110425d MCA |
955 | }; |
956 | ||
957 | static void sunhme_register_types(void) | |
958 | { | |
959 | type_register_static(&sunhme_info); | |
960 | } | |
961 | ||
962 | type_init(sunhme_register_types) |