]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. |
a08ee875 | 23 | * Copyright (c) 2012 by Delphix. All rights reserved. |
34dc7c2f BB |
24 | */ |
25 | ||
34dc7c2f BB |
26 | /* |
27 | * This file is intended for functions that ought to be common between user | |
28 | * land (libzfs) and the kernel. When many common routines need to be shared | |
29 | * then a separate file should to be created. | |
30 | */ | |
31 | ||
32 | #if defined(_KERNEL) | |
33 | #include <sys/systm.h> | |
428870ff BB |
34 | #else |
35 | #include <string.h> | |
34dc7c2f BB |
36 | #endif |
37 | ||
38 | #include <sys/types.h> | |
39 | #include <sys/fs/zfs.h> | |
428870ff | 40 | #include <sys/int_limits.h> |
34dc7c2f | 41 | #include <sys/nvpair.h> |
428870ff | 42 | #include "zfs_comutil.h" |
cae5b340 | 43 | #include <sys/zfs_ratelimit.h> |
34dc7c2f BB |
44 | |
45 | /* | |
46 | * Are there allocatable vdevs? | |
47 | */ | |
48 | boolean_t | |
49 | zfs_allocatable_devs(nvlist_t *nv) | |
50 | { | |
51 | uint64_t is_log; | |
52 | uint_t c; | |
53 | nvlist_t **child; | |
54 | uint_t children; | |
55 | ||
56 | if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, | |
57 | &child, &children) != 0) { | |
58 | return (B_FALSE); | |
59 | } | |
60 | for (c = 0; c < children; c++) { | |
61 | is_log = 0; | |
62 | (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, | |
63 | &is_log); | |
64 | if (!is_log) | |
65 | return (B_TRUE); | |
66 | } | |
67 | return (B_FALSE); | |
68 | } | |
428870ff BB |
69 | |
70 | void | |
71 | zpool_get_rewind_policy(nvlist_t *nvl, zpool_rewind_policy_t *zrpp) | |
72 | { | |
73 | nvlist_t *policy; | |
74 | nvpair_t *elem; | |
75 | char *nm; | |
76 | ||
77 | /* Defaults */ | |
78 | zrpp->zrp_request = ZPOOL_NO_REWIND; | |
79 | zrpp->zrp_maxmeta = 0; | |
80 | zrpp->zrp_maxdata = UINT64_MAX; | |
81 | zrpp->zrp_txg = UINT64_MAX; | |
82 | ||
83 | if (nvl == NULL) | |
84 | return; | |
85 | ||
86 | elem = NULL; | |
87 | while ((elem = nvlist_next_nvpair(nvl, elem)) != NULL) { | |
88 | nm = nvpair_name(elem); | |
89 | if (strcmp(nm, ZPOOL_REWIND_POLICY) == 0) { | |
90 | if (nvpair_value_nvlist(elem, &policy) == 0) | |
91 | zpool_get_rewind_policy(policy, zrpp); | |
92 | return; | |
93 | } else if (strcmp(nm, ZPOOL_REWIND_REQUEST) == 0) { | |
94 | if (nvpair_value_uint32(elem, &zrpp->zrp_request) == 0) | |
95 | if (zrpp->zrp_request & ~ZPOOL_REWIND_POLICIES) | |
96 | zrpp->zrp_request = ZPOOL_NO_REWIND; | |
97 | } else if (strcmp(nm, ZPOOL_REWIND_REQUEST_TXG) == 0) { | |
98 | (void) nvpair_value_uint64(elem, &zrpp->zrp_txg); | |
99 | } else if (strcmp(nm, ZPOOL_REWIND_META_THRESH) == 0) { | |
100 | (void) nvpair_value_uint64(elem, &zrpp->zrp_maxmeta); | |
101 | } else if (strcmp(nm, ZPOOL_REWIND_DATA_THRESH) == 0) { | |
102 | (void) nvpair_value_uint64(elem, &zrpp->zrp_maxdata); | |
103 | } | |
104 | } | |
105 | if (zrpp->zrp_request == 0) | |
106 | zrpp->zrp_request = ZPOOL_NO_REWIND; | |
107 | } | |
108 | ||
109 | typedef struct zfs_version_spa_map { | |
110 | int version_zpl; | |
111 | int version_spa; | |
112 | } zfs_version_spa_map_t; | |
113 | ||
114 | /* | |
115 | * Keep this table in monotonically increasing version number order. | |
116 | */ | |
117 | static zfs_version_spa_map_t zfs_version_table[] = { | |
118 | {ZPL_VERSION_INITIAL, SPA_VERSION_INITIAL}, | |
119 | {ZPL_VERSION_DIRENT_TYPE, SPA_VERSION_INITIAL}, | |
120 | {ZPL_VERSION_FUID, SPA_VERSION_FUID}, | |
121 | {ZPL_VERSION_USERSPACE, SPA_VERSION_USERSPACE}, | |
122 | {ZPL_VERSION_SA, SPA_VERSION_SA}, | |
123 | {0, 0} | |
124 | }; | |
125 | ||
126 | /* | |
127 | * Return the max zpl version for a corresponding spa version | |
128 | * -1 is returned if no mapping exists. | |
129 | */ | |
130 | int | |
131 | zfs_zpl_version_map(int spa_version) | |
132 | { | |
133 | int i; | |
134 | int version = -1; | |
135 | ||
136 | for (i = 0; zfs_version_table[i].version_spa; i++) { | |
137 | if (spa_version >= zfs_version_table[i].version_spa) | |
138 | version = zfs_version_table[i].version_zpl; | |
139 | } | |
140 | ||
141 | return (version); | |
142 | } | |
143 | ||
144 | /* | |
145 | * Return the min spa version for a corresponding spa version | |
146 | * -1 is returned if no mapping exists. | |
147 | */ | |
148 | int | |
149 | zfs_spa_version_map(int zpl_version) | |
150 | { | |
151 | int i; | |
152 | int version = -1; | |
153 | ||
154 | for (i = 0; zfs_version_table[i].version_zpl; i++) { | |
155 | if (zfs_version_table[i].version_zpl >= zpl_version) | |
156 | return (zfs_version_table[i].version_spa); | |
157 | } | |
158 | ||
159 | return (version); | |
160 | } | |
161 | ||
a08ee875 LG |
162 | /* |
163 | * This is the table of legacy internal event names; it should not be modified. | |
164 | * The internal events are now stored in the history log as strings. | |
165 | */ | |
166 | const char *zfs_history_event_names[ZFS_NUM_LEGACY_HISTORY_EVENTS] = { | |
428870ff BB |
167 | "invalid event", |
168 | "pool create", | |
169 | "vdev add", | |
170 | "pool remove", | |
171 | "pool destroy", | |
172 | "pool export", | |
173 | "pool import", | |
174 | "vdev attach", | |
175 | "vdev replace", | |
176 | "vdev detach", | |
177 | "vdev online", | |
178 | "vdev offline", | |
179 | "vdev upgrade", | |
180 | "pool clear", | |
181 | "pool scrub", | |
182 | "pool property set", | |
183 | "create", | |
184 | "clone", | |
185 | "destroy", | |
186 | "destroy_begin_sync", | |
187 | "inherit", | |
188 | "property set", | |
189 | "quota set", | |
190 | "permission update", | |
191 | "permission remove", | |
192 | "permission who remove", | |
193 | "promote", | |
194 | "receive", | |
195 | "rename", | |
196 | "reservation set", | |
197 | "replay_inc_sync", | |
198 | "replay_full_sync", | |
199 | "rollback", | |
200 | "snapshot", | |
201 | "filesystem version upgrade", | |
202 | "refquota set", | |
203 | "refreservation set", | |
204 | "pool scrub done", | |
205 | "user hold", | |
206 | "user release", | |
207 | "pool split", | |
208 | }; | |
c28b2279 | 209 | |
cae5b340 AX |
210 | /* |
211 | * Initialize rate limit struct | |
212 | * | |
213 | * rl: zfs_ratelimit_t struct | |
214 | * burst: Number to allow in an interval before rate limiting | |
215 | * interval: Interval time in seconds | |
216 | */ | |
217 | void | |
42f7b73b | 218 | zfs_ratelimit_init(zfs_ratelimit_t *rl, unsigned int *burst, |
cae5b340 AX |
219 | unsigned int interval) |
220 | { | |
221 | rl->count = 0; | |
222 | rl->start = 0; | |
223 | rl->interval = interval; | |
224 | rl->burst = burst; | |
225 | mutex_init(&rl->lock, NULL, MUTEX_DEFAULT, NULL); | |
226 | } | |
227 | ||
228 | /* | |
229 | * Finalize rate limit struct | |
230 | * | |
231 | * rl: zfs_ratelimit_t struct | |
232 | */ | |
233 | void | |
234 | zfs_ratelimit_fini(zfs_ratelimit_t *rl) | |
235 | { | |
236 | mutex_destroy(&rl->lock); | |
237 | } | |
238 | ||
239 | /* | |
240 | * Re-implementation of the kernel's __ratelimit() function | |
241 | * | |
242 | * We had to write our own rate limiter because the kernel's __ratelimit() | |
243 | * function annoyingly prints out how many times it rate limited to the kernel | |
244 | * logs (and there's no way to turn it off): | |
245 | * | |
246 | * __ratelimit: 59 callbacks suppressed | |
247 | * | |
248 | * If the kernel ever allows us to disable these prints, we should go back to | |
249 | * using __ratelimit() instead. | |
250 | * | |
251 | * Return values are the same as __ratelimit(): | |
252 | * | |
253 | * 0: If we're rate limiting | |
254 | * 1: If we're not rate limiting. | |
255 | */ | |
256 | int | |
257 | zfs_ratelimit(zfs_ratelimit_t *rl) | |
258 | { | |
259 | hrtime_t now; | |
260 | hrtime_t elapsed; | |
261 | int rc = 1; | |
262 | ||
263 | mutex_enter(&rl->lock); | |
264 | ||
265 | now = gethrtime(); | |
266 | elapsed = now - rl->start; | |
267 | ||
268 | rl->count++; | |
269 | if (NSEC2SEC(elapsed) >= rl->interval) { | |
270 | rl->start = now; | |
271 | rl->count = 0; | |
272 | } else { | |
42f7b73b | 273 | if (rl->count >= *rl->burst) { |
cae5b340 AX |
274 | rc = 0; /* We're ratelimiting */ |
275 | } | |
276 | } | |
277 | mutex_exit(&rl->lock); | |
278 | ||
279 | return (rc); | |
280 | } | |
281 | ||
c28b2279 BB |
282 | #if defined(_KERNEL) && defined(HAVE_SPL) |
283 | EXPORT_SYMBOL(zfs_allocatable_devs); | |
284 | EXPORT_SYMBOL(zpool_get_rewind_policy); | |
285 | EXPORT_SYMBOL(zfs_zpl_version_map); | |
286 | EXPORT_SYMBOL(zfs_spa_version_map); | |
287 | EXPORT_SYMBOL(zfs_history_event_names); | |
cae5b340 AX |
288 | EXPORT_SYMBOL(zfs_ratelimit_init); |
289 | EXPORT_SYMBOL(zfs_ratelimit_fini); | |
290 | EXPORT_SYMBOL(zfs_ratelimit); | |
c28b2279 | 291 | #endif |