]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/char_dev.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | */ | |
6 | ||
7 | #include <linux/config.h> | |
8 | #include <linux/init.h> | |
9 | #include <linux/fs.h> | |
10 | #include <linux/slab.h> | |
11 | #include <linux/string.h> | |
12 | ||
13 | #include <linux/major.h> | |
14 | #include <linux/errno.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/smp_lock.h> | |
17 | #include <linux/devfs_fs_kernel.h> | |
18 | ||
19 | #include <linux/kobject.h> | |
20 | #include <linux/kobj_map.h> | |
21 | #include <linux/cdev.h> | |
22 | ||
23 | #ifdef CONFIG_KMOD | |
24 | #include <linux/kmod.h> | |
25 | #endif | |
26 | ||
27 | static struct kobj_map *cdev_map; | |
28 | ||
1d4d5b32 | 29 | #define MAX_PROBE_HASH 255 /* random */ |
1da177e4 LT |
30 | |
31 | static DECLARE_MUTEX(chrdevs_lock); | |
32 | ||
33 | static struct char_device_struct { | |
34 | struct char_device_struct *next; | |
35 | unsigned int major; | |
36 | unsigned int baseminor; | |
37 | int minorct; | |
38 | const char *name; | |
39 | struct file_operations *fops; | |
40 | struct cdev *cdev; /* will die */ | |
41 | } *chrdevs[MAX_PROBE_HASH]; | |
42 | ||
43 | /* index in the above */ | |
44 | static inline int major_to_index(int major) | |
45 | { | |
46 | return major % MAX_PROBE_HASH; | |
47 | } | |
48 | ||
49 | /* get char device names in somewhat random order */ | |
50 | int get_chrdev_list(char *page) | |
51 | { | |
52 | struct char_device_struct *cd; | |
53 | int i, len; | |
54 | ||
55 | len = sprintf(page, "Character devices:\n"); | |
56 | ||
57 | down(&chrdevs_lock); | |
58 | for (i = 0; i < ARRAY_SIZE(chrdevs) ; i++) { | |
ac20427e NH |
59 | for (cd = chrdevs[i]; cd; cd = cd->next) { |
60 | /* | |
61 | * if the current name, plus the 5 extra characters | |
62 | * in the device line for this entry | |
63 | * would run us off the page, we're done | |
64 | */ | |
65 | if ((len+strlen(cd->name) + 5) >= PAGE_SIZE) | |
66 | goto page_full; | |
67 | ||
68 | ||
1da177e4 LT |
69 | len += sprintf(page+len, "%3d %s\n", |
70 | cd->major, cd->name); | |
ac20427e | 71 | } |
1da177e4 | 72 | } |
ac20427e | 73 | page_full: |
1da177e4 LT |
74 | up(&chrdevs_lock); |
75 | ||
76 | return len; | |
77 | } | |
78 | ||
79 | /* | |
80 | * Register a single major with a specified minor range. | |
81 | * | |
82 | * If major == 0 this functions will dynamically allocate a major and return | |
83 | * its number. | |
84 | * | |
85 | * If major > 0 this function will attempt to reserve the passed range of | |
86 | * minors and will return zero on success. | |
87 | * | |
88 | * Returns a -ve errno on failure. | |
89 | */ | |
90 | static struct char_device_struct * | |
91 | __register_chrdev_region(unsigned int major, unsigned int baseminor, | |
92 | int minorct, const char *name) | |
93 | { | |
94 | struct char_device_struct *cd, **cp; | |
95 | int ret = 0; | |
96 | int i; | |
97 | ||
98 | cd = kmalloc(sizeof(struct char_device_struct), GFP_KERNEL); | |
99 | if (cd == NULL) | |
100 | return ERR_PTR(-ENOMEM); | |
101 | ||
102 | memset(cd, 0, sizeof(struct char_device_struct)); | |
103 | ||
104 | down(&chrdevs_lock); | |
105 | ||
106 | /* temporary */ | |
107 | if (major == 0) { | |
108 | for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) { | |
109 | if (chrdevs[i] == NULL) | |
110 | break; | |
111 | } | |
112 | ||
113 | if (i == 0) { | |
114 | ret = -EBUSY; | |
115 | goto out; | |
116 | } | |
117 | major = i; | |
118 | ret = major; | |
119 | } | |
120 | ||
121 | cd->major = major; | |
122 | cd->baseminor = baseminor; | |
123 | cd->minorct = minorct; | |
124 | cd->name = name; | |
125 | ||
126 | i = major_to_index(major); | |
127 | ||
128 | for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next) | |
129 | if ((*cp)->major > major || | |
130 | ((*cp)->major == major && (*cp)->baseminor >= baseminor)) | |
131 | break; | |
132 | if (*cp && (*cp)->major == major && | |
133 | (*cp)->baseminor < baseminor + minorct) { | |
134 | ret = -EBUSY; | |
135 | goto out; | |
136 | } | |
137 | cd->next = *cp; | |
138 | *cp = cd; | |
139 | up(&chrdevs_lock); | |
140 | return cd; | |
141 | out: | |
142 | up(&chrdevs_lock); | |
143 | kfree(cd); | |
144 | return ERR_PTR(ret); | |
145 | } | |
146 | ||
147 | static struct char_device_struct * | |
148 | __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct) | |
149 | { | |
150 | struct char_device_struct *cd = NULL, **cp; | |
151 | int i = major_to_index(major); | |
152 | ||
153 | up(&chrdevs_lock); | |
154 | for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next) | |
155 | if ((*cp)->major == major && | |
156 | (*cp)->baseminor == baseminor && | |
157 | (*cp)->minorct == minorct) | |
158 | break; | |
159 | if (*cp) { | |
160 | cd = *cp; | |
161 | *cp = cd->next; | |
162 | } | |
163 | up(&chrdevs_lock); | |
164 | return cd; | |
165 | } | |
166 | ||
167 | int register_chrdev_region(dev_t from, unsigned count, const char *name) | |
168 | { | |
169 | struct char_device_struct *cd; | |
170 | dev_t to = from + count; | |
171 | dev_t n, next; | |
172 | ||
173 | for (n = from; n < to; n = next) { | |
174 | next = MKDEV(MAJOR(n)+1, 0); | |
175 | if (next > to) | |
176 | next = to; | |
177 | cd = __register_chrdev_region(MAJOR(n), MINOR(n), | |
178 | next - n, name); | |
179 | if (IS_ERR(cd)) | |
180 | goto fail; | |
181 | } | |
182 | return 0; | |
183 | fail: | |
184 | to = n; | |
185 | for (n = from; n < to; n = next) { | |
186 | next = MKDEV(MAJOR(n)+1, 0); | |
187 | kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n)); | |
188 | } | |
189 | return PTR_ERR(cd); | |
190 | } | |
191 | ||
192 | int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count, | |
193 | const char *name) | |
194 | { | |
195 | struct char_device_struct *cd; | |
196 | cd = __register_chrdev_region(0, baseminor, count, name); | |
197 | if (IS_ERR(cd)) | |
198 | return PTR_ERR(cd); | |
199 | *dev = MKDEV(cd->major, cd->baseminor); | |
200 | return 0; | |
201 | } | |
202 | ||
203 | int register_chrdev(unsigned int major, const char *name, | |
204 | struct file_operations *fops) | |
205 | { | |
206 | struct char_device_struct *cd; | |
207 | struct cdev *cdev; | |
208 | char *s; | |
209 | int err = -ENOMEM; | |
210 | ||
211 | cd = __register_chrdev_region(major, 0, 256, name); | |
212 | if (IS_ERR(cd)) | |
213 | return PTR_ERR(cd); | |
214 | ||
215 | cdev = cdev_alloc(); | |
216 | if (!cdev) | |
217 | goto out2; | |
218 | ||
219 | cdev->owner = fops->owner; | |
220 | cdev->ops = fops; | |
221 | kobject_set_name(&cdev->kobj, "%s", name); | |
222 | for (s = strchr(kobject_name(&cdev->kobj),'/'); s; s = strchr(s, '/')) | |
223 | *s = '!'; | |
224 | ||
225 | err = cdev_add(cdev, MKDEV(cd->major, 0), 256); | |
226 | if (err) | |
227 | goto out; | |
228 | ||
229 | cd->cdev = cdev; | |
230 | ||
231 | return major ? 0 : cd->major; | |
232 | out: | |
233 | kobject_put(&cdev->kobj); | |
234 | out2: | |
235 | kfree(__unregister_chrdev_region(cd->major, 0, 256)); | |
236 | return err; | |
237 | } | |
238 | ||
239 | void unregister_chrdev_region(dev_t from, unsigned count) | |
240 | { | |
241 | dev_t to = from + count; | |
242 | dev_t n, next; | |
243 | ||
244 | for (n = from; n < to; n = next) { | |
245 | next = MKDEV(MAJOR(n)+1, 0); | |
246 | if (next > to) | |
247 | next = to; | |
248 | kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n)); | |
249 | } | |
250 | } | |
251 | ||
252 | int unregister_chrdev(unsigned int major, const char *name) | |
253 | { | |
254 | struct char_device_struct *cd; | |
255 | cd = __unregister_chrdev_region(major, 0, 256); | |
256 | if (cd && cd->cdev) | |
257 | cdev_del(cd->cdev); | |
258 | kfree(cd); | |
259 | return 0; | |
260 | } | |
261 | ||
262 | static DEFINE_SPINLOCK(cdev_lock); | |
263 | ||
264 | static struct kobject *cdev_get(struct cdev *p) | |
265 | { | |
266 | struct module *owner = p->owner; | |
267 | struct kobject *kobj; | |
268 | ||
269 | if (owner && !try_module_get(owner)) | |
270 | return NULL; | |
271 | kobj = kobject_get(&p->kobj); | |
272 | if (!kobj) | |
273 | module_put(owner); | |
274 | return kobj; | |
275 | } | |
276 | ||
277 | void cdev_put(struct cdev *p) | |
278 | { | |
279 | if (p) { | |
280 | kobject_put(&p->kobj); | |
281 | module_put(p->owner); | |
282 | } | |
283 | } | |
284 | ||
285 | /* | |
286 | * Called every time a character special file is opened | |
287 | */ | |
288 | int chrdev_open(struct inode * inode, struct file * filp) | |
289 | { | |
290 | struct cdev *p; | |
291 | struct cdev *new = NULL; | |
292 | int ret = 0; | |
293 | ||
294 | spin_lock(&cdev_lock); | |
295 | p = inode->i_cdev; | |
296 | if (!p) { | |
297 | struct kobject *kobj; | |
298 | int idx; | |
299 | spin_unlock(&cdev_lock); | |
300 | kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx); | |
301 | if (!kobj) | |
302 | return -ENXIO; | |
303 | new = container_of(kobj, struct cdev, kobj); | |
304 | spin_lock(&cdev_lock); | |
305 | p = inode->i_cdev; | |
306 | if (!p) { | |
307 | inode->i_cdev = p = new; | |
308 | inode->i_cindex = idx; | |
309 | list_add(&inode->i_devices, &p->list); | |
310 | new = NULL; | |
311 | } else if (!cdev_get(p)) | |
312 | ret = -ENXIO; | |
313 | } else if (!cdev_get(p)) | |
314 | ret = -ENXIO; | |
315 | spin_unlock(&cdev_lock); | |
316 | cdev_put(new); | |
317 | if (ret) | |
318 | return ret; | |
319 | filp->f_op = fops_get(p->ops); | |
320 | if (!filp->f_op) { | |
321 | cdev_put(p); | |
322 | return -ENXIO; | |
323 | } | |
324 | if (filp->f_op->open) { | |
325 | lock_kernel(); | |
326 | ret = filp->f_op->open(inode,filp); | |
327 | unlock_kernel(); | |
328 | } | |
329 | if (ret) | |
330 | cdev_put(p); | |
331 | return ret; | |
332 | } | |
333 | ||
334 | void cd_forget(struct inode *inode) | |
335 | { | |
336 | spin_lock(&cdev_lock); | |
337 | list_del_init(&inode->i_devices); | |
338 | inode->i_cdev = NULL; | |
339 | spin_unlock(&cdev_lock); | |
340 | } | |
341 | ||
75c96f85 | 342 | static void cdev_purge(struct cdev *cdev) |
1da177e4 LT |
343 | { |
344 | spin_lock(&cdev_lock); | |
345 | while (!list_empty(&cdev->list)) { | |
346 | struct inode *inode; | |
347 | inode = container_of(cdev->list.next, struct inode, i_devices); | |
348 | list_del_init(&inode->i_devices); | |
349 | inode->i_cdev = NULL; | |
350 | } | |
351 | spin_unlock(&cdev_lock); | |
352 | } | |
353 | ||
354 | /* | |
355 | * Dummy default file-operations: the only thing this does | |
356 | * is contain the open that then fills in the correct operations | |
357 | * depending on the special file... | |
358 | */ | |
359 | struct file_operations def_chr_fops = { | |
360 | .open = chrdev_open, | |
361 | }; | |
362 | ||
363 | static struct kobject *exact_match(dev_t dev, int *part, void *data) | |
364 | { | |
365 | struct cdev *p = data; | |
366 | return &p->kobj; | |
367 | } | |
368 | ||
369 | static int exact_lock(dev_t dev, void *data) | |
370 | { | |
371 | struct cdev *p = data; | |
372 | return cdev_get(p) ? 0 : -1; | |
373 | } | |
374 | ||
375 | int cdev_add(struct cdev *p, dev_t dev, unsigned count) | |
376 | { | |
377 | p->dev = dev; | |
378 | p->count = count; | |
379 | return kobj_map(cdev_map, dev, count, NULL, exact_match, exact_lock, p); | |
380 | } | |
381 | ||
382 | static void cdev_unmap(dev_t dev, unsigned count) | |
383 | { | |
384 | kobj_unmap(cdev_map, dev, count); | |
385 | } | |
386 | ||
387 | void cdev_del(struct cdev *p) | |
388 | { | |
389 | cdev_unmap(p->dev, p->count); | |
390 | kobject_put(&p->kobj); | |
391 | } | |
392 | ||
393 | ||
394 | static void cdev_default_release(struct kobject *kobj) | |
395 | { | |
396 | struct cdev *p = container_of(kobj, struct cdev, kobj); | |
397 | cdev_purge(p); | |
398 | } | |
399 | ||
400 | static void cdev_dynamic_release(struct kobject *kobj) | |
401 | { | |
402 | struct cdev *p = container_of(kobj, struct cdev, kobj); | |
403 | cdev_purge(p); | |
404 | kfree(p); | |
405 | } | |
406 | ||
407 | static struct kobj_type ktype_cdev_default = { | |
408 | .release = cdev_default_release, | |
409 | }; | |
410 | ||
411 | static struct kobj_type ktype_cdev_dynamic = { | |
412 | .release = cdev_dynamic_release, | |
413 | }; | |
414 | ||
415 | struct cdev *cdev_alloc(void) | |
416 | { | |
417 | struct cdev *p = kmalloc(sizeof(struct cdev), GFP_KERNEL); | |
418 | if (p) { | |
419 | memset(p, 0, sizeof(struct cdev)); | |
420 | p->kobj.ktype = &ktype_cdev_dynamic; | |
421 | INIT_LIST_HEAD(&p->list); | |
422 | kobject_init(&p->kobj); | |
423 | } | |
424 | return p; | |
425 | } | |
426 | ||
427 | void cdev_init(struct cdev *cdev, struct file_operations *fops) | |
428 | { | |
429 | memset(cdev, 0, sizeof *cdev); | |
430 | INIT_LIST_HEAD(&cdev->list); | |
431 | cdev->kobj.ktype = &ktype_cdev_default; | |
432 | kobject_init(&cdev->kobj); | |
433 | cdev->ops = fops; | |
434 | } | |
435 | ||
436 | static struct kobject *base_probe(dev_t dev, int *part, void *data) | |
437 | { | |
438 | if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0) | |
439 | /* Make old-style 2.4 aliases work */ | |
440 | request_module("char-major-%d", MAJOR(dev)); | |
441 | return NULL; | |
442 | } | |
443 | ||
444 | void __init chrdev_init(void) | |
445 | { | |
446 | cdev_map = kobj_map_init(base_probe, &chrdevs_lock); | |
447 | } | |
448 | ||
449 | ||
450 | /* Let modules do char dev stuff */ | |
451 | EXPORT_SYMBOL(register_chrdev_region); | |
452 | EXPORT_SYMBOL(unregister_chrdev_region); | |
453 | EXPORT_SYMBOL(alloc_chrdev_region); | |
454 | EXPORT_SYMBOL(cdev_init); | |
455 | EXPORT_SYMBOL(cdev_alloc); | |
456 | EXPORT_SYMBOL(cdev_del); | |
457 | EXPORT_SYMBOL(cdev_add); | |
458 | EXPORT_SYMBOL(register_chrdev); | |
459 | EXPORT_SYMBOL(unregister_chrdev); |