]> git.proxmox.com Git - mirror_qemu.git/blobdiff - net/vhost-user.c
Fix input-linux reading from device
[mirror_qemu.git] / net / vhost-user.c
index b753b3d80066227ba03812a9adfef1b727169c08..e7e63408a1fbb280586f20a395e69bfeb89b59d0 100644 (file)
 
 typedef struct VhostUserState {
     NetClientState nc;
-    CharDriverState *chr;
+    CharBackend chr; /* only queue index 0 */
     VHostNetState *vhost_net;
+    guint watch;
+    uint64_t acked_features;
+    bool started;
 } VhostUserState;
 
-typedef struct VhostUserChardevProps {
-    bool is_socket;
-    bool is_unix;
-    bool is_server;
-} VhostUserChardevProps;
-
 VHostNetState *vhost_user_get_vhost_net(NetClientState *nc)
 {
     VhostUserState *s = DO_UPCAST(VhostUserState, nc, nc);
-    assert(nc->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER);
+    assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_USER);
     return s->vhost_net;
 }
 
-static int vhost_user_running(VhostUserState *s)
+uint64_t vhost_user_get_acked_features(NetClientState *nc)
 {
-    return (s->vhost_net) ? 1 : 0;
+    VhostUserState *s = DO_UPCAST(VhostUserState, nc, nc);
+    assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_USER);
+    return s->acked_features;
 }
 
 static void vhost_user_stop(int queues, NetClientState *ncs[])
@@ -48,23 +47,25 @@ static void vhost_user_stop(int queues, NetClientState *ncs[])
     int i;
 
     for (i = 0; i < queues; i++) {
-        assert (ncs[i]->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER);
+        assert(ncs[i]->info->type == NET_CLIENT_DRIVER_VHOST_USER);
 
         s = DO_UPCAST(VhostUserState, nc, ncs[i]);
-        if (!vhost_user_running(s)) {
-            continue;
-        }
 
         if (s->vhost_net) {
+            /* save acked features */
+            uint64_t features = vhost_net_get_acked_features(s->vhost_net);
+            if (features) {
+                s->acked_features = features;
+            }
             vhost_net_cleanup(s->vhost_net);
-            s->vhost_net = NULL;
         }
     }
 }
 
-static int vhost_user_start(int queues, NetClientState *ncs[])
+static int vhost_user_start(int queues, NetClientState *ncs[], CharBackend *be)
 {
     VhostNetOptions options;
+    struct vhost_net *net = NULL;
     VhostUserState *s;
     int max_queues;
     int i;
@@ -72,35 +73,42 @@ static int vhost_user_start(int queues, NetClientState *ncs[])
     options.backend_type = VHOST_BACKEND_TYPE_USER;
 
     for (i = 0; i < queues; i++) {
-        assert (ncs[i]->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER);
+        assert(ncs[i]->info->type == NET_CLIENT_DRIVER_VHOST_USER);
 
         s = DO_UPCAST(VhostUserState, nc, ncs[i]);
-        if (vhost_user_running(s)) {
-            continue;
-        }
 
         options.net_backend = ncs[i];
-        options.opaque      = s->chr;
-        s->vhost_net = vhost_net_init(&options);
-        if (!s->vhost_net) {
+        options.opaque      = be;
+        options.busyloop_timeout = 0;
+        net = vhost_net_init(&options);
+        if (!net) {
             error_report("failed to init vhost_net for queue %d", i);
             goto err;
         }
 
         if (i == 0) {
-            max_queues = vhost_net_get_max_queues(s->vhost_net);
+            max_queues = vhost_net_get_max_queues(net);
             if (queues > max_queues) {
                 error_report("you are asking more queues than supported: %d",
                              max_queues);
                 goto err;
             }
         }
+
+        if (s->vhost_net) {
+            vhost_net_cleanup(s->vhost_net);
+            g_free(s->vhost_net);
+        }
+        s->vhost_net = net;
     }
 
     return 0;
 
 err:
-    vhost_user_stop(i + 1, ncs);
+    if (net) {
+        vhost_net_cleanup(net);
+    }
+    vhost_user_stop(i, ncs);
     return -1;
 }
 
@@ -139,28 +147,35 @@ static void vhost_user_cleanup(NetClientState *nc)
 
     if (s->vhost_net) {
         vhost_net_cleanup(s->vhost_net);
+        g_free(s->vhost_net);
         s->vhost_net = NULL;
     }
+    if (nc->queue_index == 0) {
+        Chardev *chr = qemu_chr_fe_get_driver(&s->chr);
+
+        qemu_chr_fe_deinit(&s->chr);
+        qemu_chr_delete(chr);
+    }
 
     qemu_purge_queued_packets(nc);
 }
 
 static bool vhost_user_has_vnet_hdr(NetClientState *nc)
 {
-    assert(nc->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER);
+    assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_USER);
 
     return true;
 }
 
 static bool vhost_user_has_ufo(NetClientState *nc)
 {
-    assert(nc->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER);
+    assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_USER);
 
     return true;
 }
 
 static NetClientInfo net_vhost_user_info = {
-        .type = NET_CLIENT_OPTIONS_KIND_VHOST_USER,
+        .type = NET_CLIENT_DRIVER_VHOST_USER,
         .size = sizeof(VhostUserState),
         .receive = vhost_user_receive,
         .cleanup = vhost_user_cleanup,
@@ -168,29 +183,88 @@ static NetClientInfo net_vhost_user_info = {
         .has_ufo = vhost_user_has_ufo,
 };
 
+static gboolean net_vhost_user_watch(GIOChannel *chan, GIOCondition cond,
+                                           void *opaque)
+{
+    VhostUserState *s = opaque;
+
+    qemu_chr_fe_disconnect(&s->chr);
+
+    return TRUE;
+}
+
+static void net_vhost_user_event(void *opaque, int event);
+
+static void chr_closed_bh(void *opaque)
+{
+    const char *name = opaque;
+    NetClientState *ncs[MAX_QUEUE_NUM];
+    VhostUserState *s;
+    Error *err = NULL;
+    int queues;
+
+    queues = qemu_find_net_clients_except(name, ncs,
+                                          NET_CLIENT_DRIVER_NIC,
+                                          MAX_QUEUE_NUM);
+    assert(queues < MAX_QUEUE_NUM);
+
+    s = DO_UPCAST(VhostUserState, nc, ncs[0]);
+
+    qmp_set_link(name, false, &err);
+    vhost_user_stop(queues, ncs);
+
+    qemu_chr_fe_set_handlers(&s->chr, NULL, NULL, net_vhost_user_event,
+                             opaque, NULL, true);
+
+    if (err) {
+        error_report_err(err);
+    }
+}
+
 static void net_vhost_user_event(void *opaque, int event)
 {
     const char *name = opaque;
     NetClientState *ncs[MAX_QUEUE_NUM];
     VhostUserState *s;
+    Chardev *chr;
     Error *err = NULL;
     int queues;
 
     queues = qemu_find_net_clients_except(name, ncs,
-                                          NET_CLIENT_OPTIONS_KIND_NIC,
+                                          NET_CLIENT_DRIVER_NIC,
                                           MAX_QUEUE_NUM);
+    assert(queues < MAX_QUEUE_NUM);
+
     s = DO_UPCAST(VhostUserState, nc, ncs[0]);
-    trace_vhost_user_event(s->chr->label, event);
+    chr = qemu_chr_fe_get_driver(&s->chr);
+    trace_vhost_user_event(chr->label, event);
     switch (event) {
     case CHR_EVENT_OPENED:
-        if (vhost_user_start(queues, ncs) < 0) {
-            exit(1);
+        if (vhost_user_start(queues, ncs, &s->chr) < 0) {
+            qemu_chr_fe_disconnect(&s->chr);
+            return;
         }
+        s->watch = qemu_chr_fe_add_watch(&s->chr, G_IO_HUP,
+                                         net_vhost_user_watch, s);
         qmp_set_link(name, true, &err);
+        s->started = true;
         break;
     case CHR_EVENT_CLOSED:
-        qmp_set_link(name, false, &err);
-        vhost_user_stop(queues, ncs);
+        /* a close event may happen during a read/write, but vhost
+         * code assumes the vhost_dev remains setup, so delay the
+         * stop & clear to idle.
+         * FIXME: better handle failure in vhost code, remove bh
+         */
+        if (s->watch) {
+            AioContext *ctx = qemu_get_current_aio_context();
+
+            g_source_remove(s->watch);
+            s->watch = 0;
+            qemu_chr_fe_set_handlers(&s->chr, NULL, NULL, NULL,
+                                     NULL, NULL, false);
+
+            aio_bh_schedule_oneshot(ctx, chr_closed_bh, opaque);
+        }
         break;
     }
 
@@ -200,76 +274,69 @@ static void net_vhost_user_event(void *opaque, int event)
 }
 
 static int net_vhost_user_init(NetClientState *peer, const char *device,
-                               const char *name, CharDriverState *chr,
+                               const char *name, Chardev *chr,
                                int queues)
 {
-    NetClientState *nc;
+    Error *err = NULL;
+    NetClientState *nc, *nc0 = NULL;
     VhostUserState *s;
     int i;
 
+    assert(name);
+    assert(queues > 0);
+
     for (i = 0; i < queues; i++) {
         nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
-
         snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user%d to %s",
                  i, chr->label);
-
         nc->queue_index = i;
+        if (!nc0) {
+            nc0 = nc;
+            s = DO_UPCAST(VhostUserState, nc, nc);
+            if (!qemu_chr_fe_init(&s->chr, chr, &err)) {
+                error_report_err(err);
+                return -1;
+            }
+        }
 
-        s = DO_UPCAST(VhostUserState, nc, nc);
-        s->chr = chr;
     }
 
-    qemu_chr_add_handlers(chr, NULL, NULL, net_vhost_user_event, (void*)name);
+    s = DO_UPCAST(VhostUserState, nc, nc0);
+    do {
+        if (qemu_chr_fe_wait_connected(&s->chr, &err) < 0) {
+            error_report_err(err);
+            return -1;
+        }
+        qemu_chr_fe_set_handlers(&s->chr, NULL, NULL,
+                                 net_vhost_user_event, nc0->name, NULL, true);
+    } while (!s->started);
 
-    return 0;
-}
+    assert(s->vhost_net);
 
-static int net_vhost_chardev_opts(void *opaque,
-                                  const char *name, const char *value,
-                                  Error **errp)
-{
-    VhostUserChardevProps *props = opaque;
-
-    if (strcmp(name, "backend") == 0 && strcmp(value, "socket") == 0) {
-        props->is_socket = true;
-    } else if (strcmp(name, "path") == 0) {
-        props->is_unix = true;
-    } else if (strcmp(name, "server") == 0) {
-        props->is_server = true;
-    } else {
-        error_setg(errp,
-                   "vhost-user does not support a chardev with option %s=%s",
-                   name, value);
-        return -1;
-    }
     return 0;
 }
 
-static CharDriverState *net_vhost_parse_chardev(
+static Chardev *net_vhost_claim_chardev(
     const NetdevVhostUserOptions *opts, Error **errp)
 {
-    CharDriverState *chr = qemu_chr_find(opts->chardev);
-    VhostUserChardevProps props;
+    Chardev *chr = qemu_chr_find(opts->chardev);
 
     if (chr == NULL) {
         error_setg(errp, "chardev \"%s\" not found", opts->chardev);
         return NULL;
     }
 
-    /* inspect chardev opts */
-    memset(&props, 0, sizeof(props));
-    if (qemu_opt_foreach(chr->opts, net_vhost_chardev_opts, &props, errp)) {
+    if (!qemu_chr_has_feature(chr, QEMU_CHAR_FEATURE_RECONNECTABLE)) {
+        error_setg(errp, "chardev \"%s\" is not reconnectable",
+                   opts->chardev);
         return NULL;
     }
-
-    if (!props.is_socket || !props.is_unix) {
-        error_setg(errp, "chardev \"%s\" is not a unix socket",
+    if (!qemu_chr_has_feature(chr, QEMU_CHAR_FEATURE_FD_PASS)) {
+        error_setg(errp, "chardev \"%s\" does not support FD passing",
                    opts->chardev);
         return NULL;
     }
 
-    qemu_chr_fe_claim_no_fail(chr);
-
     return chr;
 }
 
@@ -277,7 +344,6 @@ static int net_vhost_check_net(void *opaque, QemuOpts *opts, Error **errp)
 {
     const char *name = opaque;
     const char *driver, *netdev;
-    const char virtio_name[] = "virtio-net-";
 
     driver = qemu_opt_get(opts, "driver");
     netdev = qemu_opt_get(opts, "netdev");
@@ -287,7 +353,7 @@ static int net_vhost_check_net(void *opaque, QemuOpts *opts, Error **errp)
     }
 
     if (strcmp(netdev, name) == 0 &&
-        strncmp(driver, virtio_name, strlen(virtio_name)) != 0) {
+        !g_str_has_prefix(driver, "virtio-net-")) {
         error_setg(errp, "vhost-user requires frontend driver virtio-net-*");
         return -1;
     }
@@ -295,17 +361,17 @@ static int net_vhost_check_net(void *opaque, QemuOpts *opts, Error **errp)
     return 0;
 }
 
-int net_init_vhost_user(const NetClientOptions *opts, const char *name,
+int net_init_vhost_user(const Netdev *netdev, const char *name,
                         NetClientState *peer, Error **errp)
 {
     int queues;
     const NetdevVhostUserOptions *vhost_user_opts;
-    CharDriverState *chr;
+    Chardev *chr;
 
-    assert(opts->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER);
-    vhost_user_opts = opts->u.vhost_user;
+    assert(netdev->type == NET_CLIENT_DRIVER_VHOST_USER);
+    vhost_user_opts = &netdev->u.vhost_user;
 
-    chr = net_vhost_parse_chardev(vhost_user_opts, errp);
+    chr = net_vhost_claim_chardev(vhost_user_opts, errp);
     if (!chr) {
         return -1;
     }