]> git.proxmox.com Git - mirror_qemu.git/blame - hw/net/spapr_llan.c
migration: Move the VMStateDescription typedef to typedefs.h
[mirror_qemu.git] / hw / net / spapr_llan.c
CommitLineData
8d90ad90
DG
1/*
2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
3 *
4 * PAPR Inter-VM Logical Lan, aka ibmveth
5 *
6 * Copyright (c) 2010,2011 David Gibson, IBM Corporation.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 *
26 */
0b8fa32f 27
0d75590d 28#include "qemu/osdep.h"
4771d756 29#include "cpu.h"
83c9f4ca 30#include "hw/hw.h"
64552b6b 31#include "hw/irq.h"
03dd024f 32#include "qemu/log.h"
0b8fa32f 33#include "qemu/module.h"
1422e32d 34#include "net/net.h"
8d90ad90 35#include "hw/qdev.h"
0d09e41a
PB
36#include "hw/ppc/spapr.h"
37#include "hw/ppc/spapr_vio.h"
ad4f62d0 38#include "sysemu/sysemu.h"
e8bb33de 39#include "trace.h"
8d90ad90
DG
40
41#include <libfdt.h>
42
43#define ETH_ALEN 6
44#define MAX_PACKET_SIZE 65536
45
831e8822
TH
46/* Compatibility flags for migration */
47#define SPAPRVLAN_FLAG_RX_BUF_POOLS_BIT 0
48#define SPAPRVLAN_FLAG_RX_BUF_POOLS (1 << SPAPRVLAN_FLAG_RX_BUF_POOLS_BIT)
49
8d90ad90
DG
50/*
51 * Virtual LAN device
52 */
53
54typedef uint64_t vlan_bd_t;
55
56#define VLAN_BD_VALID 0x8000000000000000ULL
57#define VLAN_BD_TOGGLE 0x4000000000000000ULL
58#define VLAN_BD_NO_CSUM 0x0200000000000000ULL
59#define VLAN_BD_CSUM_GOOD 0x0100000000000000ULL
60#define VLAN_BD_LEN_MASK 0x00ffffff00000000ULL
61#define VLAN_BD_LEN(bd) (((bd) & VLAN_BD_LEN_MASK) >> 32)
62#define VLAN_BD_ADDR_MASK 0x00000000ffffffffULL
63#define VLAN_BD_ADDR(bd) ((bd) & VLAN_BD_ADDR_MASK)
64
65#define VLAN_VALID_BD(addr, len) (VLAN_BD_VALID | \
66 (((len) << 32) & VLAN_BD_LEN_MASK) | \
67 (addr & VLAN_BD_ADDR_MASK))
68
69#define VLAN_RXQC_TOGGLE 0x80
70#define VLAN_RXQC_VALID 0x40
71#define VLAN_RXQC_NO_CSUM 0x02
72#define VLAN_RXQC_CSUM_GOOD 0x01
73
74#define VLAN_RQ_ALIGNMENT 16
75#define VLAN_RXQ_BD_OFF 0
76#define VLAN_FILTER_BD_OFF 8
77#define VLAN_RX_BDS_OFF 16
439ce140
AB
78/*
79 * The final 8 bytes of the buffer list is a counter of frames dropped
80 * because there was not a buffer in the buffer list capable of holding
81 * the frame. We must avoid it, or the operating system will report garbage
82 * for this statistic.
83 */
84#define VLAN_RX_BDS_LEN (SPAPR_TCE_PAGE_SIZE - VLAN_RX_BDS_OFF - 8)
85#define VLAN_MAX_BUFS (VLAN_RX_BDS_LEN / 8)
8d90ad90 86
fd506b4f
DG
87#define TYPE_VIO_SPAPR_VLAN_DEVICE "spapr-vlan"
88#define VIO_SPAPR_VLAN_DEVICE(obj) \
ce2918cb 89 OBJECT_CHECK(SpaprVioVlan, (obj), TYPE_VIO_SPAPR_VLAN_DEVICE)
fd506b4f 90
831e8822
TH
91#define RX_POOL_MAX_BDS 4096
92#define RX_MAX_POOLS 5
93
94typedef struct {
95 int32_t bufsize;
96 int32_t count;
97 vlan_bd_t bds[RX_POOL_MAX_BDS];
98} RxBufPool;
99
ce2918cb
DG
100typedef struct SpaprVioVlan {
101 SpaprVioDevice sdev;
8d90ad90
DG
102 NICConf nicconf;
103 NICState *nic;
32f5f50d 104 MACAddr perm_mac;
686fefe4 105 bool isopen;
cbd62f86 106 hwaddr buf_list;
686fefe4 107 uint32_t add_buf_ptr, use_buf_ptr, rx_bufs;
cbd62f86 108 hwaddr rxq_ptr;
8836630f 109 QEMUTimer *rxp_timer;
b12227af 110 uint32_t compat_flags; /* Compatibility flags for migration */
831e8822 111 RxBufPool *rx_pool[RX_MAX_POOLS]; /* Receive buffer descriptor pools */
ce2918cb 112} SpaprVioVlan;
8d90ad90 113
4e68f7a0 114static int spapr_vlan_can_receive(NetClientState *nc)
8d90ad90 115{
ce2918cb 116 SpaprVioVlan *dev = qemu_get_nic_opaque(nc);
8d90ad90
DG
117
118 return (dev->isopen && dev->rx_bufs > 0);
119}
120
5c29dd8c
TH
121/**
122 * The last 8 bytes of the receive buffer list page (that has been
123 * supplied by the guest with the H_REGISTER_LOGICAL_LAN call) contain
124 * a counter for frames that have been dropped because there was no
125 * suitable receive buffer available. This function is used to increase
126 * this counter by one.
127 */
ce2918cb 128static void spapr_vlan_record_dropped_rx_frame(SpaprVioVlan *dev)
5c29dd8c
TH
129{
130 uint64_t cnt;
131
132 cnt = vio_ldq(&dev->sdev, dev->buf_list + 4096 - 8);
133 vio_stq(&dev->sdev, dev->buf_list + 4096 - 8, cnt + 1);
134}
135
831e8822
TH
136/**
137 * Get buffer descriptor from one of our receive buffer pools
138 */
ce2918cb 139static vlan_bd_t spapr_vlan_get_rx_bd_from_pool(SpaprVioVlan *dev,
831e8822
TH
140 size_t size)
141{
142 vlan_bd_t bd;
143 int pool;
144
145 for (pool = 0; pool < RX_MAX_POOLS; pool++) {
146 if (dev->rx_pool[pool]->count > 0 &&
147 dev->rx_pool[pool]->bufsize >= size + 8) {
148 break;
149 }
150 }
151 if (pool == RX_MAX_POOLS) {
152 /* Failed to find a suitable buffer */
153 return 0;
154 }
155
e8bb33de
LV
156
157 trace_spapr_vlan_get_rx_bd_from_pool_found(pool,
158 dev->rx_pool[pool]->count,
159 dev->rx_bufs);
831e8822
TH
160
161 /* Remove the buffer from the pool */
162 dev->rx_pool[pool]->count--;
163 bd = dev->rx_pool[pool]->bds[dev->rx_pool[pool]->count];
164 dev->rx_pool[pool]->bds[dev->rx_pool[pool]->count] = 0;
165
166 return bd;
167}
168
d6f39fdf
TH
169/**
170 * Get buffer descriptor from the receive buffer list page that has been
171 * supplied by the guest with the H_REGISTER_LOGICAL_LAN call
172 */
ce2918cb 173static vlan_bd_t spapr_vlan_get_rx_bd_from_page(SpaprVioVlan *dev,
d6f39fdf
TH
174 size_t size)
175{
176 int buf_ptr = dev->use_buf_ptr;
177 vlan_bd_t bd;
178
179 do {
180 buf_ptr += 8;
181 if (buf_ptr >= VLAN_RX_BDS_LEN + VLAN_RX_BDS_OFF) {
182 buf_ptr = VLAN_RX_BDS_OFF;
183 }
184
185 bd = vio_ldq(&dev->sdev, dev->buf_list + buf_ptr);
e8bb33de
LV
186
187 trace_spapr_vlan_get_rx_bd_from_page(buf_ptr, (uint64_t)bd);
d6f39fdf
TH
188 } while ((!(bd & VLAN_BD_VALID) || VLAN_BD_LEN(bd) < size + 8)
189 && buf_ptr != dev->use_buf_ptr);
190
191 if (!(bd & VLAN_BD_VALID) || VLAN_BD_LEN(bd) < size + 8) {
192 /* Failed to find a suitable buffer */
193 return 0;
194 }
195
196 /* Remove the buffer from the pool */
197 dev->use_buf_ptr = buf_ptr;
198 vio_stq(&dev->sdev, dev->buf_list + dev->use_buf_ptr, 0);
199
e8bb33de 200 trace_spapr_vlan_get_rx_bd_from_page_found(dev->use_buf_ptr, dev->rx_bufs);
d6f39fdf
TH
201
202 return bd;
203}
204
4e68f7a0 205static ssize_t spapr_vlan_receive(NetClientState *nc, const uint8_t *buf,
8d90ad90
DG
206 size_t size)
207{
ce2918cb
DG
208 SpaprVioVlan *dev = qemu_get_nic_opaque(nc);
209 SpaprVioDevice *sdev = VIO_SPAPR_DEVICE(dev);
ad0ebb91 210 vlan_bd_t rxq_bd = vio_ldq(sdev, dev->buf_list + VLAN_RXQ_BD_OFF);
8d90ad90 211 vlan_bd_t bd;
8d90ad90
DG
212 uint64_t handle;
213 uint8_t control;
214
e8bb33de 215 trace_spapr_vlan_receive(sdev->qdev.id, dev->rx_bufs);
8d90ad90
DG
216
217 if (!dev->isopen) {
218 return -1;
219 }
220
221 if (!dev->rx_bufs) {
5c29dd8c 222 spapr_vlan_record_dropped_rx_frame(dev);
8836630f 223 return 0;
8d90ad90
DG
224 }
225
831e8822
TH
226 if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) {
227 bd = spapr_vlan_get_rx_bd_from_pool(dev, size);
228 } else {
229 bd = spapr_vlan_get_rx_bd_from_page(dev, size);
230 }
d6f39fdf 231 if (!bd) {
5c29dd8c 232 spapr_vlan_record_dropped_rx_frame(dev);
8836630f 233 return 0;
8d90ad90
DG
234 }
235
8d90ad90 236 dev->rx_bufs--;
8d90ad90
DG
237
238 /* Transfer the packet data */
ad0ebb91 239 if (spapr_vio_dma_write(sdev, VLAN_BD_ADDR(bd) + 8, buf, size) < 0) {
8d90ad90
DG
240 return -1;
241 }
242
e8bb33de 243 trace_spapr_vlan_receive_dma_completed();
8d90ad90
DG
244
245 /* Update the receive queue */
246 control = VLAN_RXQC_TOGGLE | VLAN_RXQC_VALID;
247 if (rxq_bd & VLAN_BD_TOGGLE) {
248 control ^= VLAN_RXQC_TOGGLE;
249 }
250
ad0ebb91
DG
251 handle = vio_ldq(sdev, VLAN_BD_ADDR(bd));
252 vio_stq(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 8, handle);
253 vio_stl(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 4, size);
254 vio_sth(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 2, 8);
255 vio_stb(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr, control);
8d90ad90 256
e8bb33de
LV
257 trace_spapr_vlan_receive_wrote(dev->rxq_ptr,
258 vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) +
259 dev->rxq_ptr),
260 vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) +
261 dev->rxq_ptr + 8));
8d90ad90
DG
262
263 dev->rxq_ptr += 16;
264 if (dev->rxq_ptr >= VLAN_BD_LEN(rxq_bd)) {
265 dev->rxq_ptr = 0;
ad0ebb91 266 vio_stq(sdev, dev->buf_list + VLAN_RXQ_BD_OFF, rxq_bd ^ VLAN_BD_TOGGLE);
8d90ad90
DG
267 }
268
269 if (sdev->signal_state & 1) {
a307d594 270 qemu_irq_pulse(spapr_vio_qirq(sdev));
8d90ad90
DG
271 }
272
273 return size;
274}
275
276static NetClientInfo net_spapr_vlan_info = {
f394b2e2 277 .type = NET_CLIENT_DRIVER_NIC,
8d90ad90
DG
278 .size = sizeof(NICState),
279 .can_receive = spapr_vlan_can_receive,
280 .receive = spapr_vlan_receive,
281};
282
8836630f
TH
283static void spapr_vlan_flush_rx_queue(void *opaque)
284{
ce2918cb 285 SpaprVioVlan *dev = opaque;
8836630f
TH
286
287 qemu_flush_queued_packets(qemu_get_queue(dev->nic));
288}
289
831e8822
TH
290static void spapr_vlan_reset_rx_pool(RxBufPool *rxp)
291{
292 /*
293 * Use INT_MAX as bufsize so that unused buffers are moved to the end
294 * of the list during the qsort in spapr_vlan_add_rxbuf_to_pool() later.
295 */
296 rxp->bufsize = INT_MAX;
297 rxp->count = 0;
298 memset(rxp->bds, 0, sizeof(rxp->bds));
299}
300
ce2918cb 301static void spapr_vlan_reset(SpaprVioDevice *sdev)
c17491b6 302{
ce2918cb 303 SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
831e8822 304 int i;
c17491b6
DG
305
306 dev->buf_list = 0;
307 dev->rx_bufs = 0;
308 dev->isopen = 0;
831e8822
TH
309
310 if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) {
311 for (i = 0; i < RX_MAX_POOLS; i++) {
312 spapr_vlan_reset_rx_pool(dev->rx_pool[i]);
313 }
314 }
32f5f50d
LV
315
316 memcpy(&dev->nicconf.macaddr.a, &dev->perm_mac.a,
317 sizeof(dev->nicconf.macaddr.a));
318 qemu_format_nic_info_str(qemu_get_queue(dev->nic), dev->nicconf.macaddr.a);
c17491b6
DG
319}
320
ce2918cb 321static void spapr_vlan_realize(SpaprVioDevice *sdev, Error **errp)
8d90ad90 322{
ce2918cb 323 SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
8d90ad90
DG
324
325 qemu_macaddr_default_if_unset(&dev->nicconf.macaddr);
326
32f5f50d
LV
327 memcpy(&dev->perm_mac.a, &dev->nicconf.macaddr.a, sizeof(dev->perm_mac.a));
328
8d90ad90 329 dev->nic = qemu_new_nic(&net_spapr_vlan_info, &dev->nicconf,
f79f2bfc 330 object_get_typename(OBJECT(sdev)), sdev->qdev.id, dev);
b356f76d 331 qemu_format_nic_info_str(qemu_get_queue(dev->nic), dev->nicconf.macaddr.a);
8836630f
TH
332
333 dev->rxp_timer = timer_new_us(QEMU_CLOCK_VIRTUAL, spapr_vlan_flush_rx_queue,
334 dev);
8d90ad90
DG
335}
336
dfe79cf2
GA
337static void spapr_vlan_instance_init(Object *obj)
338{
ce2918cb 339 SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(obj);
831e8822 340 int i;
dfe79cf2
GA
341
342 device_add_bootindex_property(obj, &dev->nicconf.bootindex,
343 "bootindex", "",
344 DEVICE(dev), NULL);
831e8822
TH
345
346 if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) {
347 for (i = 0; i < RX_MAX_POOLS; i++) {
348 dev->rx_pool[i] = g_new(RxBufPool, 1);
349 spapr_vlan_reset_rx_pool(dev->rx_pool[i]);
350 }
351 }
352}
353
354static void spapr_vlan_instance_finalize(Object *obj)
355{
ce2918cb 356 SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(obj);
831e8822
TH
357 int i;
358
359 if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) {
360 for (i = 0; i < RX_MAX_POOLS; i++) {
361 g_free(dev->rx_pool[i]);
362 dev->rx_pool[i] = NULL;
363 }
364 }
8836630f
TH
365
366 if (dev->rxp_timer) {
367 timer_del(dev->rxp_timer);
368 timer_free(dev->rxp_timer);
369 }
dfe79cf2
GA
370}
371
ce2918cb 372void spapr_vlan_create(SpaprVioBus *bus, NICInfo *nd)
8d90ad90
DG
373{
374 DeviceState *dev;
8d90ad90
DG
375
376 dev = qdev_create(&bus->bus, "spapr-vlan");
8d90ad90
DG
377
378 qdev_set_nic_properties(dev, nd);
379
380 qdev_init_nofail(dev);
8d90ad90
DG
381}
382
ce2918cb 383static int spapr_vlan_devnode(SpaprVioDevice *dev, void *fdt, int node_off)
8d90ad90 384{
ce2918cb 385 SpaprVioVlan *vdev = VIO_SPAPR_VLAN_DEVICE(dev);
8d90ad90
DG
386 uint8_t padded_mac[8] = {0, 0};
387 int ret;
388
389 /* Some old phyp versions give the mac address in an 8-byte
87684b4c 390 * property. The kernel driver (before 3.10) has an insane workaround;
8d90ad90
DG
391 * rather than doing the obvious thing and checking the property
392 * length, it checks whether the first byte has 0b10 in the low
393 * bits. If a correct 6-byte property has a different first byte
394 * the kernel will get the wrong mac address, overrunning its
395 * buffer in the process (read only, thank goodness).
396 *
87684b4c
SB
397 * Here we return a 6-byte address unless that would break a pre-3.10
398 * driver. In that case we return a padded 8-byte address to allow the old
399 * workaround to succeed. */
400 if ((vdev->nicconf.macaddr.a[0] & 0x3) == 0x2) {
401 ret = fdt_setprop(fdt, node_off, "local-mac-address",
402 &vdev->nicconf.macaddr, ETH_ALEN);
403 } else {
404 memcpy(&padded_mac[2], &vdev->nicconf.macaddr, ETH_ALEN);
405 ret = fdt_setprop(fdt, node_off, "local-mac-address",
406 padded_mac, sizeof(padded_mac));
407 }
8d90ad90
DG
408 if (ret < 0) {
409 return ret;
410 }
411
412 ret = fdt_setprop_cell(fdt, node_off, "ibm,mac-address-filters", 0);
413 if (ret < 0) {
414 return ret;
415 }
416
417 return 0;
418}
419
ce2918cb 420static int check_bd(SpaprVioVlan *dev, vlan_bd_t bd,
8d90ad90
DG
421 target_ulong alignment)
422{
423 if ((VLAN_BD_ADDR(bd) % alignment)
424 || (VLAN_BD_LEN(bd) % alignment)) {
425 return -1;
426 }
427
ad0ebb91
DG
428 if (!spapr_vio_dma_valid(&dev->sdev, VLAN_BD_ADDR(bd),
429 VLAN_BD_LEN(bd), DMA_DIRECTION_FROM_DEVICE)
430 || !spapr_vio_dma_valid(&dev->sdev, VLAN_BD_ADDR(bd),
431 VLAN_BD_LEN(bd), DMA_DIRECTION_TO_DEVICE)) {
8d90ad90
DG
432 return -1;
433 }
434
435 return 0;
436}
437
b13ce26d 438static target_ulong h_register_logical_lan(PowerPCCPU *cpu,
ce2918cb 439 SpaprMachineState *spapr,
8d90ad90
DG
440 target_ulong opcode,
441 target_ulong *args)
442{
443 target_ulong reg = args[0];
444 target_ulong buf_list = args[1];
445 target_ulong rec_queue = args[2];
446 target_ulong filter_list = args[3];
ce2918cb
DG
447 SpaprVioDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
448 SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
8d90ad90 449 vlan_bd_t filter_list_bd;
8d90ad90
DG
450
451 if (!dev) {
452 return H_PARAMETER;
453 }
454
455 if (dev->isopen) {
456 hcall_dprintf("H_REGISTER_LOGICAL_LAN called twice without "
457 "H_FREE_LOGICAL_LAN\n");
458 return H_RESOURCE;
459 }
460
ad0ebb91
DG
461 if (check_bd(dev, VLAN_VALID_BD(buf_list, SPAPR_TCE_PAGE_SIZE),
462 SPAPR_TCE_PAGE_SIZE) < 0) {
d9599c92 463 hcall_dprintf("Bad buf_list 0x" TARGET_FMT_lx "\n", buf_list);
8d90ad90
DG
464 return H_PARAMETER;
465 }
466
ad0ebb91
DG
467 filter_list_bd = VLAN_VALID_BD(filter_list, SPAPR_TCE_PAGE_SIZE);
468 if (check_bd(dev, filter_list_bd, SPAPR_TCE_PAGE_SIZE) < 0) {
d9599c92 469 hcall_dprintf("Bad filter_list 0x" TARGET_FMT_lx "\n", filter_list);
8d90ad90
DG
470 return H_PARAMETER;
471 }
472
473 if (!(rec_queue & VLAN_BD_VALID)
474 || (check_bd(dev, rec_queue, VLAN_RQ_ALIGNMENT) < 0)) {
d9599c92 475 hcall_dprintf("Bad receive queue\n");
8d90ad90
DG
476 return H_PARAMETER;
477 }
478
479 dev->buf_list = buf_list;
480 sdev->signal_state = 0;
481
482 rec_queue &= ~VLAN_BD_TOGGLE;
483
484 /* Initialize the buffer list */
ad0ebb91
DG
485 vio_stq(sdev, buf_list, rec_queue);
486 vio_stq(sdev, buf_list + 8, filter_list_bd);
487 spapr_vio_dma_set(sdev, buf_list + VLAN_RX_BDS_OFF, 0,
488 SPAPR_TCE_PAGE_SIZE - VLAN_RX_BDS_OFF);
8d90ad90
DG
489 dev->add_buf_ptr = VLAN_RX_BDS_OFF - 8;
490 dev->use_buf_ptr = VLAN_RX_BDS_OFF - 8;
491 dev->rx_bufs = 0;
492 dev->rxq_ptr = 0;
493
494 /* Initialize the receive queue */
ad0ebb91 495 spapr_vio_dma_set(sdev, VLAN_BD_ADDR(rec_queue), 0, VLAN_BD_LEN(rec_queue));
8d90ad90
DG
496
497 dev->isopen = 1;
e0ff466c
AK
498 qemu_flush_queued_packets(qemu_get_queue(dev->nic));
499
8d90ad90
DG
500 return H_SUCCESS;
501}
502
503
28e02042 504static target_ulong h_free_logical_lan(PowerPCCPU *cpu,
ce2918cb 505 SpaprMachineState *spapr,
8d90ad90
DG
506 target_ulong opcode, target_ulong *args)
507{
508 target_ulong reg = args[0];
ce2918cb
DG
509 SpaprVioDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
510 SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
8d90ad90
DG
511
512 if (!dev) {
513 return H_PARAMETER;
514 }
515
516 if (!dev->isopen) {
517 hcall_dprintf("H_FREE_LOGICAL_LAN called without "
518 "H_REGISTER_LOGICAL_LAN\n");
519 return H_RESOURCE;
520 }
521
c17491b6 522 spapr_vlan_reset(sdev);
8d90ad90
DG
523 return H_SUCCESS;
524}
525
831e8822
TH
526/**
527 * Used for qsort, this function compares two RxBufPools by size.
528 */
529static int rx_pool_size_compare(const void *p1, const void *p2)
530{
531 const RxBufPool *pool1 = *(RxBufPool **)p1;
532 const RxBufPool *pool2 = *(RxBufPool **)p2;
533
534 if (pool1->bufsize < pool2->bufsize) {
535 return -1;
536 }
537 return pool1->bufsize > pool2->bufsize;
538}
539
540/**
541 * Search for a matching buffer pool with exact matching size,
542 * or return -1 if no matching pool has been found.
543 */
ce2918cb 544static int spapr_vlan_get_rx_pool_id(SpaprVioVlan *dev, int size)
831e8822
TH
545{
546 int pool;
547
548 for (pool = 0; pool < RX_MAX_POOLS; pool++) {
549 if (dev->rx_pool[pool]->bufsize == size) {
550 return pool;
551 }
552 }
553
554 return -1;
555}
556
557/**
558 * Enqueuing receive buffer by adding it to one of our receive buffer pools
559 */
ce2918cb 560static target_long spapr_vlan_add_rxbuf_to_pool(SpaprVioVlan *dev,
831e8822
TH
561 target_ulong buf)
562{
563 int size = VLAN_BD_LEN(buf);
564 int pool;
565
566 pool = spapr_vlan_get_rx_pool_id(dev, size);
567 if (pool < 0) {
568 /*
569 * No matching pool found? Try to use a new one. If the guest used all
b12227af 570 * pools before, but changed the size of one pool in the meantime, we might
831e8822
TH
571 * need to recycle that pool here (if it's empty already). Thus scan
572 * all buffer pools now, starting with the last (likely empty) one.
573 */
574 for (pool = RX_MAX_POOLS - 1; pool >= 0 ; pool--) {
575 if (dev->rx_pool[pool]->count == 0) {
576 dev->rx_pool[pool]->bufsize = size;
577 /*
578 * Sort pools by size so that spapr_vlan_receive()
579 * can later find the smallest buffer pool easily.
580 */
581 qsort(dev->rx_pool, RX_MAX_POOLS, sizeof(dev->rx_pool[0]),
582 rx_pool_size_compare);
583 pool = spapr_vlan_get_rx_pool_id(dev, size);
e8bb33de
LV
584 trace_spapr_vlan_add_rxbuf_to_pool_create(pool,
585 VLAN_BD_LEN(buf));
831e8822
TH
586 break;
587 }
588 }
589 }
590 /* Still no usable pool? Give up */
591 if (pool < 0 || dev->rx_pool[pool]->count >= RX_POOL_MAX_BDS) {
592 return H_RESOURCE;
593 }
594
e8bb33de
LV
595 trace_spapr_vlan_add_rxbuf_to_pool(pool, VLAN_BD_LEN(buf),
596 dev->rx_pool[pool]->count);
831e8822
TH
597
598 dev->rx_pool[pool]->bds[dev->rx_pool[pool]->count++] = buf;
599
600 return 0;
601}
602
603/**
604 * This is the old way of enqueuing receive buffers: Add it to the rx queue
605 * page that has been supplied by the guest (which is quite limited in size).
606 */
ce2918cb 607static target_long spapr_vlan_add_rxbuf_to_page(SpaprVioVlan *dev,
d6f39fdf
TH
608 target_ulong buf)
609{
610 vlan_bd_t bd;
611
612 if (dev->rx_bufs >= VLAN_MAX_BUFS) {
613 return H_RESOURCE;
614 }
615
616 do {
617 dev->add_buf_ptr += 8;
618 if (dev->add_buf_ptr >= VLAN_RX_BDS_LEN + VLAN_RX_BDS_OFF) {
619 dev->add_buf_ptr = VLAN_RX_BDS_OFF;
620 }
621
622 bd = vio_ldq(&dev->sdev, dev->buf_list + dev->add_buf_ptr);
623 } while (bd & VLAN_BD_VALID);
624
625 vio_stq(&dev->sdev, dev->buf_list + dev->add_buf_ptr, buf);
626
e8bb33de 627 trace_spapr_vlan_add_rxbuf_to_page(dev->add_buf_ptr, dev->rx_bufs, buf);
d6f39fdf
TH
628
629 return 0;
630}
631
b13ce26d 632static target_ulong h_add_logical_lan_buffer(PowerPCCPU *cpu,
ce2918cb 633 SpaprMachineState *spapr,
8d90ad90
DG
634 target_ulong opcode,
635 target_ulong *args)
636{
637 target_ulong reg = args[0];
638 target_ulong buf = args[1];
ce2918cb
DG
639 SpaprVioDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
640 SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
d6f39fdf 641 target_long ret;
8d90ad90 642
e8bb33de 643 trace_spapr_vlan_h_add_logical_lan_buffer(reg, buf);
8d90ad90
DG
644
645 if (!sdev) {
d9599c92 646 hcall_dprintf("Bad device\n");
8d90ad90
DG
647 return H_PARAMETER;
648 }
649
650 if ((check_bd(dev, buf, 4) < 0)
651 || (VLAN_BD_LEN(buf) < 16)) {
d9599c92 652 hcall_dprintf("Bad buffer enqueued\n");
8d90ad90
DG
653 return H_PARAMETER;
654 }
655
d6f39fdf 656 if (!dev->isopen) {
8d90ad90
DG
657 return H_RESOURCE;
658 }
659
831e8822
TH
660 if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) {
661 ret = spapr_vlan_add_rxbuf_to_pool(dev, buf);
662 } else {
663 ret = spapr_vlan_add_rxbuf_to_page(dev, buf);
664 }
d6f39fdf
TH
665 if (ret) {
666 return ret;
667 }
8d90ad90
DG
668
669 dev->rx_bufs++;
670
8836630f
TH
671 /*
672 * Give guest some more time to add additional RX buffers before we
673 * flush the receive queue, so that e.g. fragmented IP packets can
674 * be passed to the guest in one go later (instead of passing single
675 * fragments if there is only one receive buffer available).
676 */
677 timer_mod(dev->rxp_timer, qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) + 500);
0a61f3b4 678
8d90ad90
DG
679 return H_SUCCESS;
680}
681
28e02042 682static target_ulong h_send_logical_lan(PowerPCCPU *cpu,
ce2918cb 683 SpaprMachineState *spapr,
8d90ad90
DG
684 target_ulong opcode, target_ulong *args)
685{
686 target_ulong reg = args[0];
687 target_ulong *bufs = args + 1;
688 target_ulong continue_token = args[7];
ce2918cb
DG
689 SpaprVioDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
690 SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
8d90ad90
DG
691 unsigned total_len;
692 uint8_t *lbuf, *p;
693 int i, nbufs;
694 int ret;
695
e8bb33de 696 trace_spapr_vlan_h_send_logical_lan(reg, continue_token);
8d90ad90
DG
697
698 if (!sdev) {
699 return H_PARAMETER;
700 }
701
e8bb33de 702 trace_spapr_vlan_h_send_logical_lan_rxbufs(dev->rx_bufs);
8d90ad90
DG
703
704 if (!dev->isopen) {
705 return H_DROPPED;
706 }
707
708 if (continue_token) {
709 return H_HARDWARE; /* FIXME actually handle this */
710 }
711
712 total_len = 0;
713 for (i = 0; i < 6; i++) {
e8bb33de 714 trace_spapr_vlan_h_send_logical_lan_buf_desc(bufs[i]);
8d90ad90
DG
715 if (!(bufs[i] & VLAN_BD_VALID)) {
716 break;
717 }
718 total_len += VLAN_BD_LEN(bufs[i]);
719 }
720
721 nbufs = i;
e8bb33de 722 trace_spapr_vlan_h_send_logical_lan_total(nbufs, total_len);
8d90ad90
DG
723
724 if (total_len == 0) {
725 return H_SUCCESS;
726 }
727
728 if (total_len > MAX_PACKET_SIZE) {
729 /* Don't let the guest force too large an allocation */
730 return H_RESOURCE;
731 }
732
733 lbuf = alloca(total_len);
734 p = lbuf;
735 for (i = 0; i < nbufs; i++) {
ad0ebb91 736 ret = spapr_vio_dma_read(sdev, VLAN_BD_ADDR(bufs[i]),
8d90ad90
DG
737 p, VLAN_BD_LEN(bufs[i]));
738 if (ret < 0) {
739 return ret;
740 }
741
742 p += VLAN_BD_LEN(bufs[i]);
743 }
744
b356f76d 745 qemu_send_packet(qemu_get_queue(dev->nic), lbuf, total_len);
8d90ad90
DG
746
747 return H_SUCCESS;
748}
749
ce2918cb 750static target_ulong h_multicast_ctrl(PowerPCCPU *cpu, SpaprMachineState *spapr,
8d90ad90
DG
751 target_ulong opcode, target_ulong *args)
752{
753 target_ulong reg = args[0];
ce2918cb 754 SpaprVioDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
8d90ad90
DG
755
756 if (!dev) {
757 return H_PARAMETER;
758 }
759
760 return H_SUCCESS;
761}
762
32f5f50d 763static target_ulong h_change_logical_lan_mac(PowerPCCPU *cpu,
ce2918cb 764 SpaprMachineState *spapr,
32f5f50d
LV
765 target_ulong opcode,
766 target_ulong *args)
767{
768 target_ulong reg = args[0];
769 target_ulong macaddr = args[1];
ce2918cb
DG
770 SpaprVioDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
771 SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
32f5f50d
LV
772 int i;
773
774 for (i = 0; i < ETH_ALEN; i++) {
775 dev->nicconf.macaddr.a[ETH_ALEN - i - 1] = macaddr & 0xff;
776 macaddr >>= 8;
777 }
778
779 qemu_format_nic_info_str(qemu_get_queue(dev->nic), dev->nicconf.macaddr.a);
780
781 return H_SUCCESS;
782}
783
3954d33a 784static Property spapr_vlan_properties[] = {
ce2918cb
DG
785 DEFINE_SPAPR_PROPERTIES(SpaprVioVlan, sdev),
786 DEFINE_NIC_PROPERTIES(SpaprVioVlan, nicconf),
787 DEFINE_PROP_BIT("use-rx-buffer-pools", SpaprVioVlan,
57c522f4 788 compat_flags, SPAPRVLAN_FLAG_RX_BUF_POOLS_BIT, true),
3954d33a
AL
789 DEFINE_PROP_END_OF_LIST(),
790};
791
831e8822
TH
792static bool spapr_vlan_rx_buffer_pools_needed(void *opaque)
793{
ce2918cb 794 SpaprVioVlan *dev = opaque;
831e8822
TH
795
796 return (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) != 0;
797}
798
799static const VMStateDescription vmstate_rx_buffer_pool = {
800 .name = "spapr_llan/rx_buffer_pool",
801 .version_id = 1,
802 .minimum_version_id = 1,
803 .needed = spapr_vlan_rx_buffer_pools_needed,
804 .fields = (VMStateField[]) {
805 VMSTATE_INT32(bufsize, RxBufPool),
806 VMSTATE_INT32(count, RxBufPool),
807 VMSTATE_UINT64_ARRAY(bds, RxBufPool, RX_POOL_MAX_BDS),
808 VMSTATE_END_OF_LIST()
809 }
810};
811
812static const VMStateDescription vmstate_rx_pools = {
813 .name = "spapr_llan/rx_pools",
814 .version_id = 1,
815 .minimum_version_id = 1,
816 .needed = spapr_vlan_rx_buffer_pools_needed,
817 .fields = (VMStateField[]) {
ce2918cb 818 VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(rx_pool, SpaprVioVlan,
831e8822
TH
819 RX_MAX_POOLS, 1,
820 vmstate_rx_buffer_pool, RxBufPool),
821 VMSTATE_END_OF_LIST()
822 }
823};
824
686fefe4
DG
825static const VMStateDescription vmstate_spapr_llan = {
826 .name = "spapr_llan",
827 .version_id = 1,
828 .minimum_version_id = 1,
3aff6c2f 829 .fields = (VMStateField[]) {
ce2918cb 830 VMSTATE_SPAPR_VIO(sdev, SpaprVioVlan),
686fefe4 831 /* LLAN state */
ce2918cb
DG
832 VMSTATE_BOOL(isopen, SpaprVioVlan),
833 VMSTATE_UINT64(buf_list, SpaprVioVlan),
834 VMSTATE_UINT32(add_buf_ptr, SpaprVioVlan),
835 VMSTATE_UINT32(use_buf_ptr, SpaprVioVlan),
836 VMSTATE_UINT32(rx_bufs, SpaprVioVlan),
837 VMSTATE_UINT64(rxq_ptr, SpaprVioVlan),
686fefe4
DG
838
839 VMSTATE_END_OF_LIST()
840 },
831e8822
TH
841 .subsections = (const VMStateDescription * []) {
842 &vmstate_rx_pools,
843 NULL
844 }
686fefe4
DG
845};
846
3954d33a
AL
847static void spapr_vlan_class_init(ObjectClass *klass, void *data)
848{
39bffca2 849 DeviceClass *dc = DEVICE_CLASS(klass);
ce2918cb 850 SpaprVioDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass);
3954d33a 851
28b07e73 852 k->realize = spapr_vlan_realize;
c17491b6 853 k->reset = spapr_vlan_reset;
3954d33a
AL
854 k->devnode = spapr_vlan_devnode;
855 k->dt_name = "l-lan";
856 k->dt_type = "network";
857 k->dt_compatible = "IBM,l-lan";
858 k->signal_mask = 0x1;
29fdedfe 859 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
39bffca2 860 dc->props = spapr_vlan_properties;
ad0ebb91 861 k->rtce_window_size = 0x10000000;
686fefe4 862 dc->vmsd = &vmstate_spapr_llan;
3954d33a
AL
863}
864
8c43a6f0 865static const TypeInfo spapr_vlan_info = {
fd506b4f 866 .name = TYPE_VIO_SPAPR_VLAN_DEVICE,
39bffca2 867 .parent = TYPE_VIO_SPAPR_DEVICE,
ce2918cb 868 .instance_size = sizeof(SpaprVioVlan),
39bffca2 869 .class_init = spapr_vlan_class_init,
dfe79cf2 870 .instance_init = spapr_vlan_instance_init,
831e8822 871 .instance_finalize = spapr_vlan_instance_finalize,
8d90ad90
DG
872};
873
83f7d43a 874static void spapr_vlan_register_types(void)
8d90ad90 875{
1fc02533
DG
876 spapr_register_hypercall(H_REGISTER_LOGICAL_LAN, h_register_logical_lan);
877 spapr_register_hypercall(H_FREE_LOGICAL_LAN, h_free_logical_lan);
878 spapr_register_hypercall(H_SEND_LOGICAL_LAN, h_send_logical_lan);
879 spapr_register_hypercall(H_ADD_LOGICAL_LAN_BUFFER,
880 h_add_logical_lan_buffer);
881 spapr_register_hypercall(H_MULTICAST_CTRL, h_multicast_ctrl);
32f5f50d
LV
882 spapr_register_hypercall(H_CHANGE_LOGICAL_LAN_MAC,
883 h_change_logical_lan_mac);
39bffca2 884 type_register_static(&spapr_vlan_info);
8d90ad90 885}
83f7d43a
AF
886
887type_init(spapr_vlan_register_types)