]> git.proxmox.com Git - zfsonlinux.git/blob - zfs-patches/0011-Trim-new-line-from-zfs_vdev_scheduler.patch
update/rebase to zfs-0.7.10 with patches from ZOL
[zfsonlinux.git] / zfs-patches / 0011-Trim-new-line-from-zfs_vdev_scheduler.patch
1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Brian Behlendorf <behlendorf1@llnl.gov>
3 Date: Tue, 5 Sep 2017 13:41:32 -0700
4 Subject: [PATCH] Trim new line from zfs_vdev_scheduler
5
6 Add a helper function to trim the tailing new line. While we're
7 here use this new hook to immediately apply the new scheduler.
8
9 Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
10 Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
11 Closes #3356
12 Closes #6573
13
14 Signed-off-by: Stoiko Ivanov <s.ivanov@proxmox.com>
15 ---
16 module/zfs/vdev_disk.c | 71 +++++++++++++++++++++++++++++++++++++-------------
17 1 file changed, 53 insertions(+), 18 deletions(-)
18
19 diff --git a/module/zfs/vdev_disk.c b/module/zfs/vdev_disk.c
20 index 5ae50a31..d6212835 100644
21 --- a/module/zfs/vdev_disk.c
22 +++ b/module/zfs/vdev_disk.c
23 @@ -27,13 +27,14 @@
24 */
25
26 #include <sys/zfs_context.h>
27 -#include <sys/spa.h>
28 +#include <sys/spa_impl.h>
29 #include <sys/vdev_disk.h>
30 #include <sys/vdev_impl.h>
31 #include <sys/abd.h>
32 #include <sys/fs/zfs.h>
33 #include <sys/zio.h>
34 #include <sys/sunldi.h>
35 +#include <linux/mod_compat.h>
36
37 char *zfs_vdev_scheduler = VDEV_SCHEDULER;
38 static void *zfs_vdev_holder = VDEV_HOLDER;
39 @@ -113,15 +114,23 @@ vdev_disk_error(zio_t *zio)
40 * physical device. This yields the largest possible requests for
41 * the device with the lowest total overhead.
42 */
43 -static int
44 +static void
45 vdev_elevator_switch(vdev_t *v, char *elevator)
46 {
47 vdev_disk_t *vd = v->vdev_tsd;
48 - struct block_device *bdev = vd->vd_bdev;
49 - struct request_queue *q = bdev_get_queue(bdev);
50 - char *device = bdev->bd_disk->disk_name;
51 + struct request_queue *q;
52 + char *device;
53 int error;
54
55 + for (int c = 0; c < v->vdev_children; c++)
56 + vdev_elevator_switch(v->vdev_child[c], elevator);
57 +
58 + if (!v->vdev_ops->vdev_op_leaf || vd->vd_bdev == NULL)
59 + return;
60 +
61 + q = bdev_get_queue(vd->vd_bdev);
62 + device = vd->vd_bdev->bd_disk->disk_name;
63 +
64 /*
65 * Skip devices which are not whole disks (partitions).
66 * Device-mapper devices are excepted since they may be whole
67 @@ -131,15 +140,15 @@ vdev_elevator_switch(vdev_t *v, char *elevator)
68 * "Skip devices without schedulers" check below will fail.
69 */
70 if (!v->vdev_wholedisk && strncmp(device, "dm-", 3) != 0)
71 - return (0);
72 + return;
73
74 /* Skip devices without schedulers (loop, ram, dm, etc) */
75 if (!q->elevator || !blk_queue_stackable(q))
76 - return (0);
77 + return;
78
79 /* Leave existing scheduler when set to "none" */
80 if ((strncmp(elevator, "none", 4) == 0) && (strlen(elevator) == 4))
81 - return (0);
82 + return;
83
84 #ifdef HAVE_ELEVATOR_CHANGE
85 error = elevator_change(q, elevator);
86 @@ -156,20 +165,16 @@ vdev_elevator_switch(vdev_t *v, char *elevator)
87 " 2>/dev/null; " \
88 "echo %s"
89
90 - {
91 - char *argv[] = { "/bin/sh", "-c", NULL, NULL };
92 - char *envp[] = { NULL };
93 + char *argv[] = { "/bin/sh", "-c", NULL, NULL };
94 + char *envp[] = { NULL };
95
96 - argv[2] = kmem_asprintf(SET_SCHEDULER_CMD, device, elevator);
97 - error = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
98 - strfree(argv[2]);
99 - }
100 + argv[2] = kmem_asprintf(SET_SCHEDULER_CMD, device, elevator);
101 + error = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
102 + strfree(argv[2]);
103 #endif /* HAVE_ELEVATOR_CHANGE */
104 if (error)
105 printk("ZFS: Unable to set \"%s\" scheduler for %s (%s): %d\n",
106 elevator, v->vdev_path, device, error);
107 -
108 - return (error);
109 }
110
111 /*
112 @@ -798,6 +803,35 @@ vdev_disk_rele(vdev_t *vd)
113 /* XXX: Implement me as a vnode rele for the device */
114 }
115
116 +static int
117 +param_set_vdev_scheduler(const char *val, zfs_kernel_param_t *kp)
118 +{
119 + spa_t *spa = NULL;
120 + char *p;
121 +
122 + if (val == NULL)
123 + return (SET_ERROR(-EINVAL));
124 +
125 + if ((p = strchr(val, '\n')) != NULL)
126 + *p = '\0';
127 +
128 + mutex_enter(&spa_namespace_lock);
129 + while ((spa = spa_next(spa)) != NULL) {
130 + if (spa_state(spa) != POOL_STATE_ACTIVE ||
131 + !spa_writeable(spa) || spa_suspended(spa))
132 + continue;
133 +
134 + spa_open_ref(spa, FTAG);
135 + mutex_exit(&spa_namespace_lock);
136 + vdev_elevator_switch(spa->spa_root_vdev, (char *)val);
137 + mutex_enter(&spa_namespace_lock);
138 + spa_close(spa, FTAG);
139 + }
140 + mutex_exit(&spa_namespace_lock);
141 +
142 + return (param_set_charp(val, kp));
143 +}
144 +
145 vdev_ops_t vdev_disk_ops = {
146 vdev_disk_open,
147 vdev_disk_close,
148 @@ -812,5 +846,6 @@ vdev_ops_t vdev_disk_ops = {
149 B_TRUE /* leaf vdev */
150 };
151
152 -module_param(zfs_vdev_scheduler, charp, 0644);
153 +module_param_call(zfs_vdev_scheduler, param_set_vdev_scheduler,
154 + param_get_charp, &zfs_vdev_scheduler, 0644);
155 MODULE_PARM_DESC(zfs_vdev_scheduler, "I/O scheduler");