]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | md_k.h : kernel internal structure of the Linux MD driver | |
3 | Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman | |
4 | ||
5 | This program is free software; you can redistribute it and/or modify | |
6 | it under the terms of the GNU General Public License as published by | |
7 | the Free Software Foundation; either version 2, or (at your option) | |
8 | any later version. | |
9 | ||
10 | You should have received a copy of the GNU General Public License | |
11 | (for example /usr/src/linux/COPYING); if not, write to the Free | |
12 | Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
13 | */ | |
14 | ||
15 | #ifndef _MD_K_H | |
16 | #define _MD_K_H | |
17 | ||
18 | /* and dm-bio-list.h is not under include/linux because.... ??? */ | |
19 | #include "../../../drivers/md/dm-bio-list.h" | |
20 | ||
21 | #define MD_RESERVED 0UL | |
22 | #define LINEAR 1UL | |
23 | #define RAID0 2UL | |
24 | #define RAID1 3UL | |
25 | #define RAID5 4UL | |
26 | #define TRANSLUCENT 5UL | |
27 | #define HSM 6UL | |
28 | #define MULTIPATH 7UL | |
29 | #define RAID6 8UL | |
30 | #define RAID10 9UL | |
31 | #define FAULTY 10UL | |
32 | #define MAX_PERSONALITY 11UL | |
33 | ||
34 | #define LEVEL_MULTIPATH (-4) | |
35 | #define LEVEL_LINEAR (-1) | |
36 | #define LEVEL_FAULTY (-5) | |
37 | ||
38 | #define MaxSector (~(sector_t)0) | |
39 | #define MD_THREAD_NAME_MAX 14 | |
40 | ||
41 | static inline int pers_to_level (int pers) | |
42 | { | |
43 | switch (pers) { | |
44 | case FAULTY: return LEVEL_FAULTY; | |
45 | case MULTIPATH: return LEVEL_MULTIPATH; | |
46 | case HSM: return -3; | |
47 | case TRANSLUCENT: return -2; | |
48 | case LINEAR: return LEVEL_LINEAR; | |
49 | case RAID0: return 0; | |
50 | case RAID1: return 1; | |
51 | case RAID5: return 5; | |
52 | case RAID6: return 6; | |
53 | case RAID10: return 10; | |
54 | } | |
55 | BUG(); | |
56 | return MD_RESERVED; | |
57 | } | |
58 | ||
59 | static inline int level_to_pers (int level) | |
60 | { | |
61 | switch (level) { | |
62 | case LEVEL_FAULTY: return FAULTY; | |
63 | case LEVEL_MULTIPATH: return MULTIPATH; | |
64 | case -3: return HSM; | |
65 | case -2: return TRANSLUCENT; | |
66 | case LEVEL_LINEAR: return LINEAR; | |
67 | case 0: return RAID0; | |
68 | case 1: return RAID1; | |
69 | case 4: | |
70 | case 5: return RAID5; | |
71 | case 6: return RAID6; | |
72 | case 10: return RAID10; | |
73 | } | |
74 | return MD_RESERVED; | |
75 | } | |
76 | ||
77 | typedef struct mddev_s mddev_t; | |
78 | typedef struct mdk_rdev_s mdk_rdev_t; | |
79 | ||
80 | #define MAX_MD_DEVS 256 /* Max number of md dev */ | |
81 | ||
82 | /* | |
83 | * options passed in raidrun: | |
84 | */ | |
85 | ||
86 | #define MAX_CHUNK_SIZE (4096*1024) | |
87 | ||
88 | /* | |
89 | * default readahead | |
90 | */ | |
91 | ||
92 | static inline int disk_faulty(mdp_disk_t * d) | |
93 | { | |
94 | return d->state & (1 << MD_DISK_FAULTY); | |
95 | } | |
96 | ||
97 | static inline int disk_active(mdp_disk_t * d) | |
98 | { | |
99 | return d->state & (1 << MD_DISK_ACTIVE); | |
100 | } | |
101 | ||
102 | static inline int disk_sync(mdp_disk_t * d) | |
103 | { | |
104 | return d->state & (1 << MD_DISK_SYNC); | |
105 | } | |
106 | ||
107 | static inline int disk_spare(mdp_disk_t * d) | |
108 | { | |
109 | return !disk_sync(d) && !disk_active(d) && !disk_faulty(d); | |
110 | } | |
111 | ||
112 | static inline int disk_removed(mdp_disk_t * d) | |
113 | { | |
114 | return d->state & (1 << MD_DISK_REMOVED); | |
115 | } | |
116 | ||
117 | static inline void mark_disk_faulty(mdp_disk_t * d) | |
118 | { | |
119 | d->state |= (1 << MD_DISK_FAULTY); | |
120 | } | |
121 | ||
122 | static inline void mark_disk_active(mdp_disk_t * d) | |
123 | { | |
124 | d->state |= (1 << MD_DISK_ACTIVE); | |
125 | } | |
126 | ||
127 | static inline void mark_disk_sync(mdp_disk_t * d) | |
128 | { | |
129 | d->state |= (1 << MD_DISK_SYNC); | |
130 | } | |
131 | ||
132 | static inline void mark_disk_spare(mdp_disk_t * d) | |
133 | { | |
134 | d->state = 0; | |
135 | } | |
136 | ||
137 | static inline void mark_disk_removed(mdp_disk_t * d) | |
138 | { | |
139 | d->state = (1 << MD_DISK_FAULTY) | (1 << MD_DISK_REMOVED); | |
140 | } | |
141 | ||
142 | static inline void mark_disk_inactive(mdp_disk_t * d) | |
143 | { | |
144 | d->state &= ~(1 << MD_DISK_ACTIVE); | |
145 | } | |
146 | ||
147 | static inline void mark_disk_nonsync(mdp_disk_t * d) | |
148 | { | |
149 | d->state &= ~(1 << MD_DISK_SYNC); | |
150 | } | |
151 | ||
152 | /* | |
153 | * MD's 'extended' device | |
154 | */ | |
155 | struct mdk_rdev_s | |
156 | { | |
157 | struct list_head same_set; /* RAID devices within the same set */ | |
158 | ||
159 | sector_t size; /* Device size (in blocks) */ | |
160 | mddev_t *mddev; /* RAID array if running */ | |
161 | unsigned long last_events; /* IO event timestamp */ | |
162 | ||
163 | struct block_device *bdev; /* block device handle */ | |
164 | ||
165 | struct page *sb_page; | |
166 | int sb_loaded; | |
167 | sector_t data_offset; /* start of data in array */ | |
168 | sector_t sb_offset; | |
169 | int preferred_minor; /* autorun support */ | |
170 | ||
171 | /* A device can be in one of three states based on two flags: | |
172 | * Not working: faulty==1 in_sync==0 | |
173 | * Fully working: faulty==0 in_sync==1 | |
174 | * Working, but not | |
175 | * in sync with array | |
176 | * faulty==0 in_sync==0 | |
177 | * | |
178 | * It can never have faulty==1, in_sync==1 | |
179 | * This reduces the burden of testing multiple flags in many cases | |
180 | */ | |
181 | int faulty; /* if faulty do not issue IO requests */ | |
182 | int in_sync; /* device is a full member of the array */ | |
183 | ||
184 | int desc_nr; /* descriptor index in the superblock */ | |
185 | int raid_disk; /* role of device in array */ | |
186 | int saved_raid_disk; /* role that device used to have in the | |
187 | * array and could again if we did a partial | |
188 | * resync from the bitmap | |
189 | */ | |
190 | ||
191 | atomic_t nr_pending; /* number of pending requests. | |
192 | * only maintained for arrays that | |
193 | * support hot removal | |
194 | */ | |
195 | }; | |
196 | ||
197 | typedef struct mdk_personality_s mdk_personality_t; | |
198 | ||
199 | struct mddev_s | |
200 | { | |
201 | void *private; | |
202 | mdk_personality_t *pers; | |
203 | dev_t unit; | |
204 | int md_minor; | |
205 | struct list_head disks; | |
206 | int sb_dirty; | |
207 | int ro; | |
208 | ||
209 | struct gendisk *gendisk; | |
210 | ||
211 | /* Superblock information */ | |
212 | int major_version, | |
213 | minor_version, | |
214 | patch_version; | |
215 | int persistent; | |
216 | int chunk_size; | |
217 | time_t ctime, utime; | |
218 | int level, layout; | |
219 | int raid_disks; | |
220 | int max_disks; | |
221 | sector_t size; /* used size of component devices */ | |
222 | sector_t array_size; /* exported array size */ | |
223 | __u64 events; | |
224 | ||
225 | char uuid[16]; | |
226 | ||
227 | struct mdk_thread_s *thread; /* management thread */ | |
228 | struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */ | |
229 | sector_t curr_resync; /* blocks scheduled */ | |
230 | unsigned long resync_mark; /* a recent timestamp */ | |
231 | sector_t resync_mark_cnt;/* blocks written at resync_mark */ | |
232 | ||
233 | sector_t resync_max_sectors; /* may be set by personality */ | |
234 | /* recovery/resync flags | |
235 | * NEEDED: we might need to start a resync/recover | |
236 | * RUNNING: a thread is running, or about to be started | |
237 | * SYNC: actually doing a resync, not a recovery | |
238 | * ERR: and IO error was detected - abort the resync/recovery | |
239 | * INTR: someone requested a (clean) early abort. | |
240 | * DONE: thread is done and is waiting to be reaped | |
241 | */ | |
242 | #define MD_RECOVERY_RUNNING 0 | |
243 | #define MD_RECOVERY_SYNC 1 | |
244 | #define MD_RECOVERY_ERR 2 | |
245 | #define MD_RECOVERY_INTR 3 | |
246 | #define MD_RECOVERY_DONE 4 | |
247 | #define MD_RECOVERY_NEEDED 5 | |
248 | unsigned long recovery; | |
249 | ||
250 | int in_sync; /* know to not need resync */ | |
251 | struct semaphore reconfig_sem; | |
252 | atomic_t active; | |
253 | ||
254 | int changed; /* true if we might need to reread partition info */ | |
255 | int degraded; /* whether md should consider | |
256 | * adding a spare | |
257 | */ | |
258 | ||
259 | atomic_t recovery_active; /* blocks scheduled, but not written */ | |
260 | wait_queue_head_t recovery_wait; | |
261 | sector_t recovery_cp; | |
262 | ||
263 | spinlock_t write_lock; | |
264 | struct bio_list write_list; | |
265 | ||
266 | unsigned int safemode; /* if set, update "clean" superblock | |
267 | * when no writes pending. | |
268 | */ | |
269 | unsigned int safemode_delay; | |
270 | struct timer_list safemode_timer; | |
271 | atomic_t writes_pending; | |
272 | request_queue_t *queue; /* for plugging ... */ | |
273 | ||
274 | struct bitmap *bitmap; /* the bitmap for the device */ | |
275 | struct file *bitmap_file; /* the bitmap file */ | |
276 | ||
277 | struct list_head all_mddevs; | |
278 | }; | |
279 | ||
280 | ||
281 | static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev) | |
282 | { | |
283 | int faulty = rdev->faulty; | |
284 | if (atomic_dec_and_test(&rdev->nr_pending) && faulty) | |
285 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | |
286 | } | |
287 | ||
288 | static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) | |
289 | { | |
290 | atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); | |
291 | } | |
292 | ||
293 | struct mdk_personality_s | |
294 | { | |
295 | char *name; | |
296 | struct module *owner; | |
297 | int (*make_request)(request_queue_t *q, struct bio *bio); | |
298 | int (*run)(mddev_t *mddev); | |
299 | int (*stop)(mddev_t *mddev); | |
300 | void (*status)(struct seq_file *seq, mddev_t *mddev); | |
301 | /* error_handler must set ->faulty and clear ->in_sync | |
302 | * if appropriate, and should abort recovery if needed | |
303 | */ | |
304 | void (*error_handler)(mddev_t *mddev, mdk_rdev_t *rdev); | |
305 | int (*hot_add_disk) (mddev_t *mddev, mdk_rdev_t *rdev); | |
306 | int (*hot_remove_disk) (mddev_t *mddev, int number); | |
307 | int (*spare_active) (mddev_t *mddev); | |
308 | sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster); | |
309 | int (*resize) (mddev_t *mddev, sector_t sectors); | |
310 | int (*reshape) (mddev_t *mddev, int raid_disks); | |
311 | int (*reconfig) (mddev_t *mddev, int layout, int chunk_size); | |
312 | }; | |
313 | ||
314 | ||
315 | static inline char * mdname (mddev_t * mddev) | |
316 | { | |
317 | return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; | |
318 | } | |
319 | ||
320 | extern mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr); | |
321 | ||
322 | /* | |
323 | * iterates through some rdev ringlist. It's safe to remove the | |
324 | * current 'rdev'. Dont touch 'tmp' though. | |
325 | */ | |
326 | #define ITERATE_RDEV_GENERIC(head,rdev,tmp) \ | |
327 | \ | |
328 | for ((tmp) = (head).next; \ | |
329 | (rdev) = (list_entry((tmp), mdk_rdev_t, same_set)), \ | |
330 | (tmp) = (tmp)->next, (tmp)->prev != &(head) \ | |
331 | ; ) | |
332 | /* | |
333 | * iterates through the 'same array disks' ringlist | |
334 | */ | |
335 | #define ITERATE_RDEV(mddev,rdev,tmp) \ | |
336 | ITERATE_RDEV_GENERIC((mddev)->disks,rdev,tmp) | |
337 | ||
338 | /* | |
339 | * Iterates through 'pending RAID disks' | |
340 | */ | |
341 | #define ITERATE_RDEV_PENDING(rdev,tmp) \ | |
342 | ITERATE_RDEV_GENERIC(pending_raid_disks,rdev,tmp) | |
343 | ||
344 | typedef struct mdk_thread_s { | |
345 | void (*run) (mddev_t *mddev); | |
346 | mddev_t *mddev; | |
347 | wait_queue_head_t wqueue; | |
348 | unsigned long flags; | |
349 | struct completion *event; | |
350 | struct task_struct *tsk; | |
351 | unsigned long timeout; | |
352 | const char *name; | |
353 | } mdk_thread_t; | |
354 | ||
355 | #define THREAD_WAKEUP 0 | |
356 | ||
357 | #define __wait_event_lock_irq(wq, condition, lock, cmd) \ | |
358 | do { \ | |
359 | wait_queue_t __wait; \ | |
360 | init_waitqueue_entry(&__wait, current); \ | |
361 | \ | |
362 | add_wait_queue(&wq, &__wait); \ | |
363 | for (;;) { \ | |
364 | set_current_state(TASK_UNINTERRUPTIBLE); \ | |
365 | if (condition) \ | |
366 | break; \ | |
367 | spin_unlock_irq(&lock); \ | |
368 | cmd; \ | |
369 | schedule(); \ | |
370 | spin_lock_irq(&lock); \ | |
371 | } \ | |
372 | current->state = TASK_RUNNING; \ | |
373 | remove_wait_queue(&wq, &__wait); \ | |
374 | } while (0) | |
375 | ||
376 | #define wait_event_lock_irq(wq, condition, lock, cmd) \ | |
377 | do { \ | |
378 | if (condition) \ | |
379 | break; \ | |
380 | __wait_event_lock_irq(wq, condition, lock, cmd); \ | |
381 | } while (0) | |
382 | ||
383 | #endif | |
384 |