};
struct napi_struct napi;
bool napi_enabled;
+ bool napi_frags_enabled;
struct mutex napi_mutex; /* Protects access to the above napi */
struct list_head next;
struct tun_struct *detached;
}
static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
- bool napi_en)
+ bool napi_en, bool napi_frags)
{
tfile->napi_enabled = napi_en;
+ tfile->napi_frags_enabled = napi_en && napi_frags;
if (napi_en) {
- netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_tx_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
+ NAPI_POLL_WEIGHT);
napi_enable(&tfile->napi);
}
}
netif_napi_del(&tfile->napi);
}
-static bool tun_napi_frags_enabled(const struct tun_struct *tun)
+static bool tun_napi_frags_enabled(const struct tun_file *tfile)
{
- return READ_ONCE(tun->flags) & IFF_NAPI_FRAGS;
+ return tfile->napi_frags_enabled;
}
#ifdef CONFIG_TUN_VNET_CROSS_LE
}
static int tun_attach(struct tun_struct *tun, struct file *file,
- bool skip_filter, bool napi)
+ bool skip_filter, bool napi, bool napi_frags,
+ bool publish_tun)
{
struct tun_file *tfile = file->private_data;
struct net_device *dev = tun->dev;
tfile->queue_index = tun->numqueues;
tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
- rcu_assign_pointer(tfile->tun, tun);
- rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
- tun->numqueues++;
-
if (tfile->detached) {
tun_enable_queue(tfile);
} else {
sock_hold(&tfile->sk);
- tun_napi_init(tun, tfile, napi);
+ tun_napi_init(tun, tfile, napi, napi_frags);
}
- tun_set_real_num_queues(tun);
-
/* device is allowed to go away first, so no need to hold extra
* refcnt.
*/
+ /* Publish tfile->tun and tun->tfiles only after we've fully
+ * initialized tfile; otherwise we risk using half-initialized
+ * object.
+ */
+ if (publish_tun)
+ rcu_assign_pointer(tfile->tun, tun);
+ rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
+ tun->numqueues++;
+ tun_set_real_num_queues(tun);
out:
return err;
}
/* Net device open. */
static int tun_net_open(struct net_device *dev)
{
- struct tun_struct *tun = netdev_priv(dev);
- int i;
-
netif_tx_start_all_queues(dev);
- for (i = 0; i < tun->numqueues; i++) {
- struct tun_file *tfile;
-
- tfile = rtnl_dereference(tun->tfiles[i]);
- tfile->socket.sk->sk_write_space(tfile->socket.sk);
- }
-
return 0;
}
struct tun_file *tfile;
int i;
- if (tun_napi_frags_enabled(tun))
- return;
-
rcu_read_lock();
for (i = 0; i < tun->numqueues; i++) {
tfile = rcu_dereference(tun->tfiles[i]);
- if (tfile->napi_enabled)
+ if (!tun_napi_frags_enabled(tfile) &&
+ tfile->napi_enabled)
napi_schedule(&tfile->napi);
}
rcu_read_unlock();
dev->max_mtu = MAX_MTU - dev->hard_header_len;
}
+static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
+{
+ struct sock *sk = tfile->socket.sk;
+
+ return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
+}
+
/* Character device part */
/* Poll */
if (!skb_array_empty(&tfile->tx_array))
mask |= POLLIN | POLLRDNORM;
- if (tun->dev->flags & IFF_UP &&
- (sock_writeable(sk) ||
- (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
- sock_writeable(sk))))
+ /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
+ * guarantee EPOLLOUT to be raised by either here or
+ * tun_sock_write_space(). Then process could get notification
+ * after it writes to a down device and meets -EIO.
+ */
+ if (tun_sock_writeable(tun, tfile) ||
+ (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
+ tun_sock_writeable(tun, tfile)))
mask |= POLLOUT | POLLWRNORM;
if (tun->dev->reg_state != NETREG_REGISTERED)
if (!rx_batched || (!more && skb_queue_empty(queue))) {
local_bh_disable();
+ skb_record_rx_queue(skb, tfile->queue_index);
netif_receive_skb(skb);
local_bh_enable();
return;
struct sk_buff *nskb;
local_bh_disable();
- while ((nskb = __skb_dequeue(&process_queue)))
+ while ((nskb = __skb_dequeue(&process_queue))) {
+ skb_record_rx_queue(nskb, tfile->queue_index);
netif_receive_skb(nskb);
+ }
+ skb_record_rx_queue(skb, tfile->queue_index);
netif_receive_skb(skb);
local_bh_enable();
}
skb_reserve(skb, pad - delta);
skb_put(skb, len + delta);
+ skb_set_owner_w(skb, tfile->socket.sk);
get_page(alloc_frag->page);
alloc_frag->offset += buflen;
int err;
u32 rxhash;
int skb_xdp = 1;
- bool frags = tun_napi_frags_enabled(tun);
-
- if (!(tun->dev->flags & IFF_UP))
- return -EIO;
+ bool frags = tun_napi_frags_enabled(tfile);
if (!(tun->flags & IFF_NO_PI)) {
if (len < sizeof(pi))
err = skb_copy_datagram_from_iter(skb, 0, from, len);
if (err) {
+ err = -EFAULT;
+drop:
this_cpu_inc(tun->pcpu_stats->rx_dropped);
kfree_skb(skb);
if (frags) {
mutex_unlock(&tfile->napi_mutex);
}
- return -EFAULT;
+ return err;
}
}
if (ret != XDP_PASS) {
rcu_read_unlock();
local_bh_enable();
+ if (frags) {
+ tfile->napi.skb = NULL;
+ mutex_unlock(&tfile->napi_mutex);
+ }
return total_len;
}
}
rxhash = __skb_get_hash_symmetric(skb);
+ rcu_read_lock();
+ if (unlikely(!(tun->dev->flags & IFF_UP))) {
+ err = -EIO;
+ rcu_read_unlock();
+ goto drop;
+ }
+
if (frags) {
/* Exercise flow dissector code path. */
u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb));
if (unlikely(headlen > skb_headlen(skb))) {
this_cpu_inc(tun->pcpu_stats->rx_dropped);
napi_free_frags(&tfile->napi);
+ rcu_read_unlock();
mutex_unlock(&tfile->napi_mutex);
WARN_ON(1);
return -ENOMEM;
} else {
netif_rx_ni(skb);
}
+ rcu_read_unlock();
stats = get_cpu_ptr(tun->pcpu_stats);
u64_stats_update_begin(&stats->syncp);
}
add_wait_queue(&tfile->wq.wait, &wait);
- current->state = TASK_INTERRUPTIBLE;
while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
skb = skb_array_consume(&tfile->tx_array);
if (skb)
break;
schedule();
}
- current->state = TASK_RUNNING;
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(&tfile->wq.wait, &wait);
out:
static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
- return -EINVAL;
+ NL_SET_ERR_MSG(extack,
+ "tun/tap creation via rtnetlink is not supported.");
+ return -EOPNOTSUPP;
}
static struct rtnl_link_ops tun_link_ops __read_mostly = {
return err;
err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
- ifr->ifr_flags & IFF_NAPI);
+ ifr->ifr_flags & IFF_NAPI,
+ ifr->ifr_flags & IFF_NAPI_FRAGS, true);
if (err < 0)
return err;
NETIF_F_HW_VLAN_STAG_TX);
INIT_LIST_HEAD(&tun->disabled);
- err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI);
+ err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
+ ifr->ifr_flags & IFF_NAPI_FRAGS, false);
if (err < 0)
goto err_free_flow;
err = register_netdevice(tun->dev);
if (err < 0)
goto err_detach;
+ /* free_netdev() won't check refcnt, to aovid race
+ * with dev_put() we need publish tun after registration.
+ */
+ rcu_assign_pointer(tfile->tun, tun);
}
netif_carrier_on(tun->dev);
ret = security_tun_dev_attach_queue(tun->security);
if (ret < 0)
goto unlock;
- ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI);
+ ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
+ tun->flags & IFF_NAPI_FRAGS, true);
} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
tun = rtnl_dereference(tfile->tun);
if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct tun_struct *tun = netdev_priv(dev);
+ int i;
if (dev->rtnl_link_ops != &tun_link_ops)
return NOTIFY_DONE;
if (tun_queue_resize(tun))
return NOTIFY_BAD;
break;
+ case NETDEV_UP:
+ for (i = 0; i < tun->numqueues; i++) {
+ struct tun_file *tfile;
+
+ tfile = rtnl_dereference(tun->tfiles[i]);
+ tfile->socket.sk->sk_write_space(tfile->socket.sk);
+ }
+ break;
default:
break;
}