]>
Commit | Line | Data |
---|---|---|
19d337df JB |
1 | /* |
2 | * Copyright (C) 2006 - 2007 Ivo van Doorn | |
3 | * Copyright (C) 2007 Dmitry Torokhov | |
4 | * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the | |
18 | * Free Software Foundation, Inc., | |
19 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
20 | */ | |
21 | ||
22 | #include <linux/kernel.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/init.h> | |
25 | #include <linux/workqueue.h> | |
26 | #include <linux/capability.h> | |
27 | #include <linux/list.h> | |
28 | #include <linux/mutex.h> | |
29 | #include <linux/rfkill.h> | |
30 | #include <linux/spinlock.h> | |
c64fb016 JB |
31 | #include <linux/miscdevice.h> |
32 | #include <linux/wait.h> | |
33 | #include <linux/poll.h> | |
34 | #include <linux/fs.h> | |
19d337df JB |
35 | |
36 | #include "rfkill.h" | |
37 | ||
38 | #define POLL_INTERVAL (5 * HZ) | |
39 | ||
40 | #define RFKILL_BLOCK_HW BIT(0) | |
41 | #define RFKILL_BLOCK_SW BIT(1) | |
42 | #define RFKILL_BLOCK_SW_PREV BIT(2) | |
43 | #define RFKILL_BLOCK_ANY (RFKILL_BLOCK_HW |\ | |
44 | RFKILL_BLOCK_SW |\ | |
45 | RFKILL_BLOCK_SW_PREV) | |
46 | #define RFKILL_BLOCK_SW_SETCALL BIT(31) | |
47 | ||
48 | struct rfkill { | |
49 | spinlock_t lock; | |
50 | ||
51 | const char *name; | |
52 | enum rfkill_type type; | |
53 | ||
54 | unsigned long state; | |
55 | ||
c64fb016 JB |
56 | u32 idx; |
57 | ||
19d337df JB |
58 | bool registered; |
59 | bool suspended; | |
60 | ||
61 | const struct rfkill_ops *ops; | |
62 | void *data; | |
63 | ||
64 | #ifdef CONFIG_RFKILL_LEDS | |
65 | struct led_trigger led_trigger; | |
66 | const char *ledtrigname; | |
67 | #endif | |
68 | ||
69 | struct device dev; | |
70 | struct list_head node; | |
71 | ||
72 | struct delayed_work poll_work; | |
73 | struct work_struct uevent_work; | |
74 | struct work_struct sync_work; | |
75 | }; | |
76 | #define to_rfkill(d) container_of(d, struct rfkill, dev) | |
77 | ||
c64fb016 JB |
78 | struct rfkill_int_event { |
79 | struct list_head list; | |
80 | struct rfkill_event ev; | |
81 | }; | |
82 | ||
83 | struct rfkill_data { | |
84 | struct list_head list; | |
85 | struct list_head events; | |
86 | struct mutex mtx; | |
87 | wait_queue_head_t read_wait; | |
88 | bool input_handler; | |
89 | }; | |
19d337df JB |
90 | |
91 | ||
92 | MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>"); | |
93 | MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); | |
94 | MODULE_DESCRIPTION("RF switch support"); | |
95 | MODULE_LICENSE("GPL"); | |
96 | ||
97 | ||
98 | /* | |
99 | * The locking here should be made much smarter, we currently have | |
100 | * a bit of a stupid situation because drivers might want to register | |
101 | * the rfkill struct under their own lock, and take this lock during | |
102 | * rfkill method calls -- which will cause an AB-BA deadlock situation. | |
103 | * | |
104 | * To fix that, we need to rework this code here to be mostly lock-free | |
105 | * and only use the mutex for list manipulations, not to protect the | |
106 | * various other global variables. Then we can avoid holding the mutex | |
107 | * around driver operations, and all is happy. | |
108 | */ | |
109 | static LIST_HEAD(rfkill_list); /* list of registered rf switches */ | |
110 | static DEFINE_MUTEX(rfkill_global_mutex); | |
c64fb016 | 111 | static LIST_HEAD(rfkill_fds); /* list of open fds of /dev/rfkill */ |
19d337df JB |
112 | |
113 | static unsigned int rfkill_default_state = 1; | |
114 | module_param_named(default_state, rfkill_default_state, uint, 0444); | |
115 | MODULE_PARM_DESC(default_state, | |
116 | "Default initial state for all radio types, 0 = radio off"); | |
117 | ||
118 | static struct { | |
119 | bool cur, def; | |
120 | } rfkill_global_states[NUM_RFKILL_TYPES]; | |
121 | ||
122 | static unsigned long rfkill_states_default_locked; | |
123 | ||
124 | static bool rfkill_epo_lock_active; | |
125 | ||
126 | ||
127 | #ifdef CONFIG_RFKILL_LEDS | |
128 | static void rfkill_led_trigger_event(struct rfkill *rfkill) | |
129 | { | |
130 | struct led_trigger *trigger; | |
131 | ||
132 | if (!rfkill->registered) | |
133 | return; | |
134 | ||
135 | trigger = &rfkill->led_trigger; | |
136 | ||
137 | if (rfkill->state & RFKILL_BLOCK_ANY) | |
138 | led_trigger_event(trigger, LED_OFF); | |
139 | else | |
140 | led_trigger_event(trigger, LED_FULL); | |
141 | } | |
142 | ||
143 | static void rfkill_led_trigger_activate(struct led_classdev *led) | |
144 | { | |
145 | struct rfkill *rfkill; | |
146 | ||
147 | rfkill = container_of(led->trigger, struct rfkill, led_trigger); | |
148 | ||
149 | rfkill_led_trigger_event(rfkill); | |
150 | } | |
151 | ||
152 | const char *rfkill_get_led_trigger_name(struct rfkill *rfkill) | |
153 | { | |
154 | return rfkill->led_trigger.name; | |
155 | } | |
156 | EXPORT_SYMBOL(rfkill_get_led_trigger_name); | |
157 | ||
158 | void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name) | |
159 | { | |
160 | BUG_ON(!rfkill); | |
161 | ||
162 | rfkill->ledtrigname = name; | |
163 | } | |
164 | EXPORT_SYMBOL(rfkill_set_led_trigger_name); | |
165 | ||
166 | static int rfkill_led_trigger_register(struct rfkill *rfkill) | |
167 | { | |
168 | rfkill->led_trigger.name = rfkill->ledtrigname | |
169 | ? : dev_name(&rfkill->dev); | |
170 | rfkill->led_trigger.activate = rfkill_led_trigger_activate; | |
171 | return led_trigger_register(&rfkill->led_trigger); | |
172 | } | |
173 | ||
174 | static void rfkill_led_trigger_unregister(struct rfkill *rfkill) | |
175 | { | |
176 | led_trigger_unregister(&rfkill->led_trigger); | |
177 | } | |
178 | #else | |
179 | static void rfkill_led_trigger_event(struct rfkill *rfkill) | |
180 | { | |
181 | } | |
182 | ||
183 | static inline int rfkill_led_trigger_register(struct rfkill *rfkill) | |
184 | { | |
185 | return 0; | |
186 | } | |
187 | ||
188 | static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill) | |
189 | { | |
190 | } | |
191 | #endif /* CONFIG_RFKILL_LEDS */ | |
192 | ||
c64fb016 JB |
193 | static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill, |
194 | enum rfkill_operation op) | |
195 | { | |
196 | unsigned long flags; | |
197 | ||
198 | ev->idx = rfkill->idx; | |
199 | ev->type = rfkill->type; | |
200 | ev->op = op; | |
201 | ||
202 | spin_lock_irqsave(&rfkill->lock, flags); | |
203 | ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW); | |
204 | ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW | | |
205 | RFKILL_BLOCK_SW_PREV)); | |
206 | spin_unlock_irqrestore(&rfkill->lock, flags); | |
207 | } | |
208 | ||
209 | static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op) | |
210 | { | |
211 | struct rfkill_data *data; | |
212 | struct rfkill_int_event *ev; | |
213 | ||
214 | list_for_each_entry(data, &rfkill_fds, list) { | |
215 | ev = kzalloc(sizeof(*ev), GFP_KERNEL); | |
216 | if (!ev) | |
217 | continue; | |
218 | rfkill_fill_event(&ev->ev, rfkill, op); | |
219 | mutex_lock(&data->mtx); | |
220 | list_add_tail(&ev->list, &data->events); | |
221 | mutex_unlock(&data->mtx); | |
222 | wake_up_interruptible(&data->read_wait); | |
223 | } | |
224 | } | |
225 | ||
226 | static void rfkill_event(struct rfkill *rfkill) | |
19d337df JB |
227 | { |
228 | if (!rfkill->registered || rfkill->suspended) | |
229 | return; | |
230 | ||
231 | kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE); | |
c64fb016 JB |
232 | |
233 | /* also send event to /dev/rfkill */ | |
234 | rfkill_send_events(rfkill, RFKILL_OP_CHANGE); | |
19d337df JB |
235 | } |
236 | ||
237 | static bool __rfkill_set_hw_state(struct rfkill *rfkill, | |
238 | bool blocked, bool *change) | |
239 | { | |
240 | unsigned long flags; | |
241 | bool prev, any; | |
242 | ||
243 | BUG_ON(!rfkill); | |
244 | ||
245 | spin_lock_irqsave(&rfkill->lock, flags); | |
246 | prev = !!(rfkill->state & RFKILL_BLOCK_HW); | |
247 | if (blocked) | |
248 | rfkill->state |= RFKILL_BLOCK_HW; | |
249 | else | |
250 | rfkill->state &= ~RFKILL_BLOCK_HW; | |
251 | *change = prev != blocked; | |
252 | any = rfkill->state & RFKILL_BLOCK_ANY; | |
253 | spin_unlock_irqrestore(&rfkill->lock, flags); | |
254 | ||
255 | rfkill_led_trigger_event(rfkill); | |
256 | ||
257 | return any; | |
258 | } | |
259 | ||
260 | /** | |
261 | * rfkill_set_block - wrapper for set_block method | |
262 | * | |
263 | * @rfkill: the rfkill struct to use | |
264 | * @blocked: the new software state | |
265 | * | |
266 | * Calls the set_block method (when applicable) and handles notifications | |
267 | * etc. as well. | |
268 | */ | |
269 | static void rfkill_set_block(struct rfkill *rfkill, bool blocked) | |
270 | { | |
271 | unsigned long flags; | |
272 | int err; | |
273 | ||
274 | /* | |
275 | * Some platforms (...!) generate input events which affect the | |
276 | * _hard_ kill state -- whenever something tries to change the | |
277 | * current software state query the hardware state too. | |
278 | */ | |
279 | if (rfkill->ops->query) | |
280 | rfkill->ops->query(rfkill, rfkill->data); | |
281 | ||
282 | spin_lock_irqsave(&rfkill->lock, flags); | |
283 | if (rfkill->state & RFKILL_BLOCK_SW) | |
284 | rfkill->state |= RFKILL_BLOCK_SW_PREV; | |
285 | else | |
286 | rfkill->state &= ~RFKILL_BLOCK_SW_PREV; | |
287 | ||
288 | if (blocked) | |
289 | rfkill->state |= RFKILL_BLOCK_SW; | |
290 | else | |
291 | rfkill->state &= ~RFKILL_BLOCK_SW; | |
292 | ||
293 | rfkill->state |= RFKILL_BLOCK_SW_SETCALL; | |
294 | spin_unlock_irqrestore(&rfkill->lock, flags); | |
295 | ||
296 | if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP)) | |
297 | return; | |
298 | ||
299 | err = rfkill->ops->set_block(rfkill->data, blocked); | |
300 | ||
301 | spin_lock_irqsave(&rfkill->lock, flags); | |
302 | if (err) { | |
303 | /* | |
304 | * Failed -- reset status to _prev, this may be different | |
305 | * from what set set _PREV to earlier in this function | |
306 | * if rfkill_set_sw_state was invoked. | |
307 | */ | |
308 | if (rfkill->state & RFKILL_BLOCK_SW_PREV) | |
309 | rfkill->state |= RFKILL_BLOCK_SW; | |
310 | else | |
311 | rfkill->state &= ~RFKILL_BLOCK_SW; | |
312 | } | |
313 | rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL; | |
314 | rfkill->state &= ~RFKILL_BLOCK_SW_PREV; | |
315 | spin_unlock_irqrestore(&rfkill->lock, flags); | |
316 | ||
317 | rfkill_led_trigger_event(rfkill); | |
c64fb016 | 318 | rfkill_event(rfkill); |
19d337df JB |
319 | } |
320 | ||
c64fb016 JB |
321 | #ifdef CONFIG_RFKILL_INPUT |
322 | static atomic_t rfkill_input_disabled = ATOMIC_INIT(0); | |
323 | ||
19d337df JB |
324 | /** |
325 | * __rfkill_switch_all - Toggle state of all switches of given type | |
326 | * @type: type of interfaces to be affected | |
327 | * @state: the new state | |
328 | * | |
329 | * This function sets the state of all switches of given type, | |
330 | * unless a specific switch is claimed by userspace (in which case, | |
331 | * that switch is left alone) or suspended. | |
332 | * | |
333 | * Caller must have acquired rfkill_global_mutex. | |
334 | */ | |
335 | static void __rfkill_switch_all(const enum rfkill_type type, bool blocked) | |
336 | { | |
337 | struct rfkill *rfkill; | |
338 | ||
339 | rfkill_global_states[type].cur = blocked; | |
340 | list_for_each_entry(rfkill, &rfkill_list, node) { | |
341 | if (rfkill->type != type) | |
342 | continue; | |
343 | ||
344 | rfkill_set_block(rfkill, blocked); | |
345 | } | |
346 | } | |
347 | ||
348 | /** | |
349 | * rfkill_switch_all - Toggle state of all switches of given type | |
350 | * @type: type of interfaces to be affected | |
351 | * @state: the new state | |
352 | * | |
353 | * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state). | |
354 | * Please refer to __rfkill_switch_all() for details. | |
355 | * | |
356 | * Does nothing if the EPO lock is active. | |
357 | */ | |
358 | void rfkill_switch_all(enum rfkill_type type, bool blocked) | |
359 | { | |
c64fb016 JB |
360 | if (atomic_read(&rfkill_input_disabled)) |
361 | return; | |
362 | ||
19d337df JB |
363 | mutex_lock(&rfkill_global_mutex); |
364 | ||
365 | if (!rfkill_epo_lock_active) | |
366 | __rfkill_switch_all(type, blocked); | |
367 | ||
368 | mutex_unlock(&rfkill_global_mutex); | |
369 | } | |
370 | ||
371 | /** | |
372 | * rfkill_epo - emergency power off all transmitters | |
373 | * | |
374 | * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED, | |
375 | * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex. | |
376 | * | |
377 | * The global state before the EPO is saved and can be restored later | |
378 | * using rfkill_restore_states(). | |
379 | */ | |
380 | void rfkill_epo(void) | |
381 | { | |
382 | struct rfkill *rfkill; | |
383 | int i; | |
384 | ||
c64fb016 JB |
385 | if (atomic_read(&rfkill_input_disabled)) |
386 | return; | |
387 | ||
19d337df JB |
388 | mutex_lock(&rfkill_global_mutex); |
389 | ||
390 | rfkill_epo_lock_active = true; | |
391 | list_for_each_entry(rfkill, &rfkill_list, node) | |
392 | rfkill_set_block(rfkill, true); | |
393 | ||
394 | for (i = 0; i < NUM_RFKILL_TYPES; i++) { | |
395 | rfkill_global_states[i].def = rfkill_global_states[i].cur; | |
396 | rfkill_global_states[i].cur = true; | |
397 | } | |
c64fb016 | 398 | |
19d337df JB |
399 | mutex_unlock(&rfkill_global_mutex); |
400 | } | |
401 | ||
402 | /** | |
403 | * rfkill_restore_states - restore global states | |
404 | * | |
405 | * Restore (and sync switches to) the global state from the | |
406 | * states in rfkill_default_states. This can undo the effects of | |
407 | * a call to rfkill_epo(). | |
408 | */ | |
409 | void rfkill_restore_states(void) | |
410 | { | |
411 | int i; | |
412 | ||
c64fb016 JB |
413 | if (atomic_read(&rfkill_input_disabled)) |
414 | return; | |
415 | ||
19d337df JB |
416 | mutex_lock(&rfkill_global_mutex); |
417 | ||
418 | rfkill_epo_lock_active = false; | |
419 | for (i = 0; i < NUM_RFKILL_TYPES; i++) | |
420 | __rfkill_switch_all(i, rfkill_global_states[i].def); | |
421 | mutex_unlock(&rfkill_global_mutex); | |
422 | } | |
423 | ||
424 | /** | |
425 | * rfkill_remove_epo_lock - unlock state changes | |
426 | * | |
427 | * Used by rfkill-input manually unlock state changes, when | |
428 | * the EPO switch is deactivated. | |
429 | */ | |
430 | void rfkill_remove_epo_lock(void) | |
431 | { | |
c64fb016 JB |
432 | if (atomic_read(&rfkill_input_disabled)) |
433 | return; | |
434 | ||
19d337df JB |
435 | mutex_lock(&rfkill_global_mutex); |
436 | rfkill_epo_lock_active = false; | |
437 | mutex_unlock(&rfkill_global_mutex); | |
438 | } | |
439 | ||
440 | /** | |
441 | * rfkill_is_epo_lock_active - returns true EPO is active | |
442 | * | |
443 | * Returns 0 (false) if there is NOT an active EPO contidion, | |
444 | * and 1 (true) if there is an active EPO contition, which | |
445 | * locks all radios in one of the BLOCKED states. | |
446 | * | |
447 | * Can be called in atomic context. | |
448 | */ | |
449 | bool rfkill_is_epo_lock_active(void) | |
450 | { | |
451 | return rfkill_epo_lock_active; | |
452 | } | |
453 | ||
454 | /** | |
455 | * rfkill_get_global_sw_state - returns global state for a type | |
456 | * @type: the type to get the global state of | |
457 | * | |
458 | * Returns the current global state for a given wireless | |
459 | * device type. | |
460 | */ | |
461 | bool rfkill_get_global_sw_state(const enum rfkill_type type) | |
462 | { | |
463 | return rfkill_global_states[type].cur; | |
464 | } | |
c64fb016 | 465 | #endif |
19d337df JB |
466 | |
467 | void rfkill_set_global_sw_state(const enum rfkill_type type, bool blocked) | |
468 | { | |
c64fb016 JB |
469 | BUG_ON(type == RFKILL_TYPE_ALL); |
470 | ||
19d337df JB |
471 | mutex_lock(&rfkill_global_mutex); |
472 | ||
473 | /* don't allow unblock when epo */ | |
474 | if (rfkill_epo_lock_active && !blocked) | |
475 | goto out; | |
476 | ||
477 | /* too late */ | |
478 | if (rfkill_states_default_locked & BIT(type)) | |
479 | goto out; | |
480 | ||
481 | rfkill_states_default_locked |= BIT(type); | |
482 | ||
483 | rfkill_global_states[type].cur = blocked; | |
484 | rfkill_global_states[type].def = blocked; | |
485 | out: | |
486 | mutex_unlock(&rfkill_global_mutex); | |
487 | } | |
488 | EXPORT_SYMBOL(rfkill_set_global_sw_state); | |
489 | ||
490 | ||
491 | bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) | |
492 | { | |
493 | bool ret, change; | |
494 | ||
495 | ret = __rfkill_set_hw_state(rfkill, blocked, &change); | |
496 | ||
497 | if (!rfkill->registered) | |
498 | return ret; | |
499 | ||
500 | if (change) | |
501 | schedule_work(&rfkill->uevent_work); | |
502 | ||
503 | return ret; | |
504 | } | |
505 | EXPORT_SYMBOL(rfkill_set_hw_state); | |
506 | ||
507 | static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) | |
508 | { | |
509 | u32 bit = RFKILL_BLOCK_SW; | |
510 | ||
511 | /* if in a ops->set_block right now, use other bit */ | |
512 | if (rfkill->state & RFKILL_BLOCK_SW_SETCALL) | |
513 | bit = RFKILL_BLOCK_SW_PREV; | |
514 | ||
515 | if (blocked) | |
516 | rfkill->state |= bit; | |
517 | else | |
518 | rfkill->state &= ~bit; | |
519 | } | |
520 | ||
521 | bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) | |
522 | { | |
523 | unsigned long flags; | |
524 | bool prev, hwblock; | |
525 | ||
526 | BUG_ON(!rfkill); | |
527 | ||
528 | spin_lock_irqsave(&rfkill->lock, flags); | |
529 | prev = !!(rfkill->state & RFKILL_BLOCK_SW); | |
530 | __rfkill_set_sw_state(rfkill, blocked); | |
531 | hwblock = !!(rfkill->state & RFKILL_BLOCK_HW); | |
532 | blocked = blocked || hwblock; | |
533 | spin_unlock_irqrestore(&rfkill->lock, flags); | |
534 | ||
535 | if (!rfkill->registered) | |
536 | return blocked; | |
537 | ||
538 | if (prev != blocked && !hwblock) | |
539 | schedule_work(&rfkill->uevent_work); | |
540 | ||
541 | rfkill_led_trigger_event(rfkill); | |
542 | ||
543 | return blocked; | |
544 | } | |
545 | EXPORT_SYMBOL(rfkill_set_sw_state); | |
546 | ||
547 | void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) | |
548 | { | |
549 | unsigned long flags; | |
550 | bool swprev, hwprev; | |
551 | ||
552 | BUG_ON(!rfkill); | |
553 | ||
554 | spin_lock_irqsave(&rfkill->lock, flags); | |
555 | ||
556 | /* | |
557 | * No need to care about prev/setblock ... this is for uevent only | |
558 | * and that will get triggered by rfkill_set_block anyway. | |
559 | */ | |
560 | swprev = !!(rfkill->state & RFKILL_BLOCK_SW); | |
561 | hwprev = !!(rfkill->state & RFKILL_BLOCK_HW); | |
562 | __rfkill_set_sw_state(rfkill, sw); | |
563 | ||
564 | spin_unlock_irqrestore(&rfkill->lock, flags); | |
565 | ||
566 | if (!rfkill->registered) | |
567 | return; | |
568 | ||
569 | if (swprev != sw || hwprev != hw) | |
570 | schedule_work(&rfkill->uevent_work); | |
571 | ||
572 | rfkill_led_trigger_event(rfkill); | |
573 | } | |
574 | EXPORT_SYMBOL(rfkill_set_states); | |
575 | ||
576 | static ssize_t rfkill_name_show(struct device *dev, | |
577 | struct device_attribute *attr, | |
578 | char *buf) | |
579 | { | |
580 | struct rfkill *rfkill = to_rfkill(dev); | |
581 | ||
582 | return sprintf(buf, "%s\n", rfkill->name); | |
583 | } | |
584 | ||
585 | static const char *rfkill_get_type_str(enum rfkill_type type) | |
586 | { | |
587 | switch (type) { | |
588 | case RFKILL_TYPE_WLAN: | |
589 | return "wlan"; | |
590 | case RFKILL_TYPE_BLUETOOTH: | |
591 | return "bluetooth"; | |
592 | case RFKILL_TYPE_UWB: | |
593 | return "ultrawideband"; | |
594 | case RFKILL_TYPE_WIMAX: | |
595 | return "wimax"; | |
596 | case RFKILL_TYPE_WWAN: | |
597 | return "wwan"; | |
598 | default: | |
599 | BUG(); | |
600 | } | |
601 | ||
602 | BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_WWAN + 1); | |
603 | } | |
604 | ||
605 | static ssize_t rfkill_type_show(struct device *dev, | |
606 | struct device_attribute *attr, | |
607 | char *buf) | |
608 | { | |
609 | struct rfkill *rfkill = to_rfkill(dev); | |
610 | ||
611 | return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type)); | |
612 | } | |
613 | ||
c64fb016 JB |
614 | static ssize_t rfkill_idx_show(struct device *dev, |
615 | struct device_attribute *attr, | |
616 | char *buf) | |
617 | { | |
618 | struct rfkill *rfkill = to_rfkill(dev); | |
619 | ||
620 | return sprintf(buf, "%d\n", rfkill->idx); | |
621 | } | |
622 | ||
19d337df JB |
623 | static u8 user_state_from_blocked(unsigned long state) |
624 | { | |
625 | if (state & RFKILL_BLOCK_HW) | |
626 | return RFKILL_USER_STATE_HARD_BLOCKED; | |
627 | if (state & RFKILL_BLOCK_SW) | |
628 | return RFKILL_USER_STATE_SOFT_BLOCKED; | |
629 | ||
630 | return RFKILL_USER_STATE_UNBLOCKED; | |
631 | } | |
632 | ||
633 | static ssize_t rfkill_state_show(struct device *dev, | |
634 | struct device_attribute *attr, | |
635 | char *buf) | |
636 | { | |
637 | struct rfkill *rfkill = to_rfkill(dev); | |
638 | unsigned long flags; | |
639 | u32 state; | |
640 | ||
641 | spin_lock_irqsave(&rfkill->lock, flags); | |
642 | state = rfkill->state; | |
643 | spin_unlock_irqrestore(&rfkill->lock, flags); | |
644 | ||
645 | return sprintf(buf, "%d\n", user_state_from_blocked(state)); | |
646 | } | |
647 | ||
648 | static ssize_t rfkill_state_store(struct device *dev, | |
649 | struct device_attribute *attr, | |
650 | const char *buf, size_t count) | |
651 | { | |
652 | /* | |
653 | * The intention was that userspace can only take control over | |
654 | * a given device when/if rfkill-input doesn't control it due | |
655 | * to user_claim. Since user_claim is currently unsupported, | |
656 | * we never support changing the state from userspace -- this | |
657 | * can be implemented again later. | |
658 | */ | |
659 | ||
660 | return -EPERM; | |
661 | } | |
662 | ||
663 | static ssize_t rfkill_claim_show(struct device *dev, | |
664 | struct device_attribute *attr, | |
665 | char *buf) | |
666 | { | |
667 | return sprintf(buf, "%d\n", 0); | |
668 | } | |
669 | ||
670 | static ssize_t rfkill_claim_store(struct device *dev, | |
671 | struct device_attribute *attr, | |
672 | const char *buf, size_t count) | |
673 | { | |
674 | return -EOPNOTSUPP; | |
675 | } | |
676 | ||
677 | static struct device_attribute rfkill_dev_attrs[] = { | |
678 | __ATTR(name, S_IRUGO, rfkill_name_show, NULL), | |
679 | __ATTR(type, S_IRUGO, rfkill_type_show, NULL), | |
c64fb016 | 680 | __ATTR(index, S_IRUGO, rfkill_idx_show, NULL), |
19d337df JB |
681 | __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store), |
682 | __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store), | |
683 | __ATTR_NULL | |
684 | }; | |
685 | ||
686 | static void rfkill_release(struct device *dev) | |
687 | { | |
688 | struct rfkill *rfkill = to_rfkill(dev); | |
689 | ||
690 | kfree(rfkill); | |
691 | } | |
692 | ||
693 | static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env) | |
694 | { | |
695 | struct rfkill *rfkill = to_rfkill(dev); | |
696 | unsigned long flags; | |
697 | u32 state; | |
698 | int error; | |
699 | ||
700 | error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name); | |
701 | if (error) | |
702 | return error; | |
703 | error = add_uevent_var(env, "RFKILL_TYPE=%s", | |
704 | rfkill_get_type_str(rfkill->type)); | |
705 | if (error) | |
706 | return error; | |
707 | spin_lock_irqsave(&rfkill->lock, flags); | |
708 | state = rfkill->state; | |
709 | spin_unlock_irqrestore(&rfkill->lock, flags); | |
710 | error = add_uevent_var(env, "RFKILL_STATE=%d", | |
711 | user_state_from_blocked(state)); | |
712 | return error; | |
713 | } | |
714 | ||
715 | void rfkill_pause_polling(struct rfkill *rfkill) | |
716 | { | |
717 | BUG_ON(!rfkill); | |
718 | ||
719 | if (!rfkill->ops->poll) | |
720 | return; | |
721 | ||
722 | cancel_delayed_work_sync(&rfkill->poll_work); | |
723 | } | |
724 | EXPORT_SYMBOL(rfkill_pause_polling); | |
725 | ||
726 | void rfkill_resume_polling(struct rfkill *rfkill) | |
727 | { | |
728 | BUG_ON(!rfkill); | |
729 | ||
730 | if (!rfkill->ops->poll) | |
731 | return; | |
732 | ||
733 | schedule_work(&rfkill->poll_work.work); | |
734 | } | |
735 | EXPORT_SYMBOL(rfkill_resume_polling); | |
736 | ||
737 | static int rfkill_suspend(struct device *dev, pm_message_t state) | |
738 | { | |
739 | struct rfkill *rfkill = to_rfkill(dev); | |
740 | ||
741 | rfkill_pause_polling(rfkill); | |
742 | ||
743 | rfkill->suspended = true; | |
744 | ||
745 | return 0; | |
746 | } | |
747 | ||
748 | static int rfkill_resume(struct device *dev) | |
749 | { | |
750 | struct rfkill *rfkill = to_rfkill(dev); | |
751 | bool cur; | |
752 | ||
753 | mutex_lock(&rfkill_global_mutex); | |
754 | cur = rfkill_global_states[rfkill->type].cur; | |
755 | rfkill_set_block(rfkill, cur); | |
756 | mutex_unlock(&rfkill_global_mutex); | |
757 | ||
758 | rfkill->suspended = false; | |
759 | ||
760 | schedule_work(&rfkill->uevent_work); | |
761 | ||
762 | rfkill_resume_polling(rfkill); | |
763 | ||
764 | return 0; | |
765 | } | |
766 | ||
767 | static struct class rfkill_class = { | |
768 | .name = "rfkill", | |
769 | .dev_release = rfkill_release, | |
770 | .dev_attrs = rfkill_dev_attrs, | |
771 | .dev_uevent = rfkill_dev_uevent, | |
772 | .suspend = rfkill_suspend, | |
773 | .resume = rfkill_resume, | |
774 | }; | |
775 | ||
6081162e JB |
776 | bool rfkill_blocked(struct rfkill *rfkill) |
777 | { | |
778 | unsigned long flags; | |
779 | u32 state; | |
780 | ||
781 | spin_lock_irqsave(&rfkill->lock, flags); | |
782 | state = rfkill->state; | |
783 | spin_unlock_irqrestore(&rfkill->lock, flags); | |
784 | ||
785 | return !!(state & RFKILL_BLOCK_ANY); | |
786 | } | |
787 | EXPORT_SYMBOL(rfkill_blocked); | |
788 | ||
19d337df JB |
789 | |
790 | struct rfkill * __must_check rfkill_alloc(const char *name, | |
791 | struct device *parent, | |
792 | const enum rfkill_type type, | |
793 | const struct rfkill_ops *ops, | |
794 | void *ops_data) | |
795 | { | |
796 | struct rfkill *rfkill; | |
797 | struct device *dev; | |
798 | ||
799 | if (WARN_ON(!ops)) | |
800 | return NULL; | |
801 | ||
802 | if (WARN_ON(!ops->set_block)) | |
803 | return NULL; | |
804 | ||
805 | if (WARN_ON(!name)) | |
806 | return NULL; | |
807 | ||
c64fb016 | 808 | if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES)) |
19d337df JB |
809 | return NULL; |
810 | ||
811 | rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL); | |
812 | if (!rfkill) | |
813 | return NULL; | |
814 | ||
815 | spin_lock_init(&rfkill->lock); | |
816 | INIT_LIST_HEAD(&rfkill->node); | |
817 | rfkill->type = type; | |
818 | rfkill->name = name; | |
819 | rfkill->ops = ops; | |
820 | rfkill->data = ops_data; | |
821 | ||
822 | dev = &rfkill->dev; | |
823 | dev->class = &rfkill_class; | |
824 | dev->parent = parent; | |
825 | device_initialize(dev); | |
826 | ||
827 | return rfkill; | |
828 | } | |
829 | EXPORT_SYMBOL(rfkill_alloc); | |
830 | ||
831 | static void rfkill_poll(struct work_struct *work) | |
832 | { | |
833 | struct rfkill *rfkill; | |
834 | ||
835 | rfkill = container_of(work, struct rfkill, poll_work.work); | |
836 | ||
837 | /* | |
838 | * Poll hardware state -- driver will use one of the | |
839 | * rfkill_set{,_hw,_sw}_state functions and use its | |
840 | * return value to update the current status. | |
841 | */ | |
842 | rfkill->ops->poll(rfkill, rfkill->data); | |
843 | ||
844 | schedule_delayed_work(&rfkill->poll_work, | |
845 | round_jiffies_relative(POLL_INTERVAL)); | |
846 | } | |
847 | ||
848 | static void rfkill_uevent_work(struct work_struct *work) | |
849 | { | |
850 | struct rfkill *rfkill; | |
851 | ||
852 | rfkill = container_of(work, struct rfkill, uevent_work); | |
853 | ||
c64fb016 JB |
854 | mutex_lock(&rfkill_global_mutex); |
855 | rfkill_event(rfkill); | |
856 | mutex_unlock(&rfkill_global_mutex); | |
19d337df JB |
857 | } |
858 | ||
859 | static void rfkill_sync_work(struct work_struct *work) | |
860 | { | |
861 | struct rfkill *rfkill; | |
862 | bool cur; | |
863 | ||
864 | rfkill = container_of(work, struct rfkill, sync_work); | |
865 | ||
866 | mutex_lock(&rfkill_global_mutex); | |
867 | cur = rfkill_global_states[rfkill->type].cur; | |
868 | rfkill_set_block(rfkill, cur); | |
869 | mutex_unlock(&rfkill_global_mutex); | |
870 | } | |
871 | ||
872 | int __must_check rfkill_register(struct rfkill *rfkill) | |
873 | { | |
874 | static unsigned long rfkill_no; | |
875 | struct device *dev = &rfkill->dev; | |
876 | int error; | |
877 | ||
878 | BUG_ON(!rfkill); | |
879 | ||
880 | mutex_lock(&rfkill_global_mutex); | |
881 | ||
882 | if (rfkill->registered) { | |
883 | error = -EALREADY; | |
884 | goto unlock; | |
885 | } | |
886 | ||
c64fb016 | 887 | rfkill->idx = rfkill_no; |
19d337df JB |
888 | dev_set_name(dev, "rfkill%lu", rfkill_no); |
889 | rfkill_no++; | |
890 | ||
891 | if (!(rfkill_states_default_locked & BIT(rfkill->type))) { | |
892 | /* first of its kind */ | |
893 | BUILD_BUG_ON(NUM_RFKILL_TYPES > | |
894 | sizeof(rfkill_states_default_locked) * 8); | |
895 | rfkill_states_default_locked |= BIT(rfkill->type); | |
896 | rfkill_global_states[rfkill->type].cur = | |
897 | rfkill_global_states[rfkill->type].def; | |
898 | } | |
899 | ||
900 | list_add_tail(&rfkill->node, &rfkill_list); | |
901 | ||
902 | error = device_add(dev); | |
903 | if (error) | |
904 | goto remove; | |
905 | ||
906 | error = rfkill_led_trigger_register(rfkill); | |
907 | if (error) | |
908 | goto devdel; | |
909 | ||
910 | rfkill->registered = true; | |
911 | ||
2ec2c68c | 912 | INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll); |
19d337df | 913 | INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work); |
19d337df | 914 | INIT_WORK(&rfkill->sync_work, rfkill_sync_work); |
2ec2c68c JB |
915 | |
916 | if (rfkill->ops->poll) | |
917 | schedule_delayed_work(&rfkill->poll_work, | |
918 | round_jiffies_relative(POLL_INTERVAL)); | |
19d337df | 919 | schedule_work(&rfkill->sync_work); |
2ec2c68c | 920 | |
c64fb016 | 921 | rfkill_send_events(rfkill, RFKILL_OP_ADD); |
19d337df JB |
922 | |
923 | mutex_unlock(&rfkill_global_mutex); | |
924 | return 0; | |
925 | ||
926 | devdel: | |
927 | device_del(&rfkill->dev); | |
928 | remove: | |
929 | list_del_init(&rfkill->node); | |
930 | unlock: | |
931 | mutex_unlock(&rfkill_global_mutex); | |
932 | return error; | |
933 | } | |
934 | EXPORT_SYMBOL(rfkill_register); | |
935 | ||
936 | void rfkill_unregister(struct rfkill *rfkill) | |
937 | { | |
938 | BUG_ON(!rfkill); | |
939 | ||
940 | if (rfkill->ops->poll) | |
941 | cancel_delayed_work_sync(&rfkill->poll_work); | |
942 | ||
943 | cancel_work_sync(&rfkill->uevent_work); | |
944 | cancel_work_sync(&rfkill->sync_work); | |
945 | ||
946 | rfkill->registered = false; | |
947 | ||
948 | device_del(&rfkill->dev); | |
949 | ||
950 | mutex_lock(&rfkill_global_mutex); | |
c64fb016 | 951 | rfkill_send_events(rfkill, RFKILL_OP_DEL); |
19d337df JB |
952 | list_del_init(&rfkill->node); |
953 | mutex_unlock(&rfkill_global_mutex); | |
954 | ||
955 | rfkill_led_trigger_unregister(rfkill); | |
956 | } | |
957 | EXPORT_SYMBOL(rfkill_unregister); | |
958 | ||
959 | void rfkill_destroy(struct rfkill *rfkill) | |
960 | { | |
961 | if (rfkill) | |
962 | put_device(&rfkill->dev); | |
963 | } | |
964 | EXPORT_SYMBOL(rfkill_destroy); | |
965 | ||
c64fb016 JB |
966 | static int rfkill_fop_open(struct inode *inode, struct file *file) |
967 | { | |
968 | struct rfkill_data *data; | |
969 | struct rfkill *rfkill; | |
970 | struct rfkill_int_event *ev, *tmp; | |
971 | ||
972 | data = kzalloc(sizeof(*data), GFP_KERNEL); | |
973 | if (!data) | |
974 | return -ENOMEM; | |
975 | ||
976 | INIT_LIST_HEAD(&data->events); | |
977 | mutex_init(&data->mtx); | |
978 | init_waitqueue_head(&data->read_wait); | |
979 | ||
980 | mutex_lock(&rfkill_global_mutex); | |
981 | mutex_lock(&data->mtx); | |
982 | /* | |
983 | * start getting events from elsewhere but hold mtx to get | |
984 | * startup events added first | |
985 | */ | |
986 | list_add(&data->list, &rfkill_fds); | |
987 | ||
988 | list_for_each_entry(rfkill, &rfkill_list, node) { | |
989 | ev = kzalloc(sizeof(*ev), GFP_KERNEL); | |
990 | if (!ev) | |
991 | goto free; | |
992 | rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD); | |
993 | list_add_tail(&ev->list, &data->events); | |
994 | } | |
995 | mutex_unlock(&data->mtx); | |
996 | mutex_unlock(&rfkill_global_mutex); | |
997 | ||
998 | file->private_data = data; | |
999 | ||
1000 | return nonseekable_open(inode, file); | |
1001 | ||
1002 | free: | |
1003 | mutex_unlock(&data->mtx); | |
1004 | mutex_unlock(&rfkill_global_mutex); | |
1005 | mutex_destroy(&data->mtx); | |
1006 | list_for_each_entry_safe(ev, tmp, &data->events, list) | |
1007 | kfree(ev); | |
1008 | kfree(data); | |
1009 | return -ENOMEM; | |
1010 | } | |
1011 | ||
1012 | static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait) | |
1013 | { | |
1014 | struct rfkill_data *data = file->private_data; | |
1015 | unsigned int res = POLLOUT | POLLWRNORM; | |
1016 | ||
1017 | poll_wait(file, &data->read_wait, wait); | |
1018 | ||
1019 | mutex_lock(&data->mtx); | |
1020 | if (!list_empty(&data->events)) | |
1021 | res = POLLIN | POLLRDNORM; | |
1022 | mutex_unlock(&data->mtx); | |
1023 | ||
1024 | return res; | |
1025 | } | |
1026 | ||
1027 | static bool rfkill_readable(struct rfkill_data *data) | |
1028 | { | |
1029 | bool r; | |
1030 | ||
1031 | mutex_lock(&data->mtx); | |
1032 | r = !list_empty(&data->events); | |
1033 | mutex_unlock(&data->mtx); | |
1034 | ||
1035 | return r; | |
1036 | } | |
1037 | ||
1038 | static ssize_t rfkill_fop_read(struct file *file, char __user *buf, | |
1039 | size_t count, loff_t *pos) | |
1040 | { | |
1041 | struct rfkill_data *data = file->private_data; | |
1042 | struct rfkill_int_event *ev; | |
1043 | unsigned long sz; | |
1044 | int ret; | |
1045 | ||
1046 | mutex_lock(&data->mtx); | |
1047 | ||
1048 | while (list_empty(&data->events)) { | |
1049 | if (file->f_flags & O_NONBLOCK) { | |
1050 | ret = -EAGAIN; | |
1051 | goto out; | |
1052 | } | |
1053 | mutex_unlock(&data->mtx); | |
1054 | ret = wait_event_interruptible(data->read_wait, | |
1055 | rfkill_readable(data)); | |
1056 | mutex_lock(&data->mtx); | |
1057 | ||
1058 | if (ret) | |
1059 | goto out; | |
1060 | } | |
1061 | ||
1062 | ev = list_first_entry(&data->events, struct rfkill_int_event, | |
1063 | list); | |
1064 | ||
1065 | sz = min_t(unsigned long, sizeof(ev->ev), count); | |
1066 | ret = sz; | |
1067 | if (copy_to_user(buf, &ev->ev, sz)) | |
1068 | ret = -EFAULT; | |
1069 | ||
1070 | list_del(&ev->list); | |
1071 | kfree(ev); | |
1072 | out: | |
1073 | mutex_unlock(&data->mtx); | |
1074 | return ret; | |
1075 | } | |
1076 | ||
1077 | static ssize_t rfkill_fop_write(struct file *file, const char __user *buf, | |
1078 | size_t count, loff_t *pos) | |
1079 | { | |
1080 | struct rfkill *rfkill; | |
1081 | struct rfkill_event ev; | |
1082 | ||
1083 | /* we don't need the 'hard' variable but accept it */ | |
1084 | if (count < sizeof(ev) - 1) | |
1085 | return -EINVAL; | |
1086 | ||
1087 | if (copy_from_user(&ev, buf, sizeof(ev) - 1)) | |
1088 | return -EFAULT; | |
1089 | ||
1090 | if (ev.op != RFKILL_OP_CHANGE && ev.op != RFKILL_OP_CHANGE_ALL) | |
1091 | return -EINVAL; | |
1092 | ||
1093 | if (ev.type >= NUM_RFKILL_TYPES) | |
1094 | return -EINVAL; | |
1095 | ||
1096 | mutex_lock(&rfkill_global_mutex); | |
1097 | ||
1098 | if (ev.op == RFKILL_OP_CHANGE_ALL) { | |
1099 | if (ev.type == RFKILL_TYPE_ALL) { | |
1100 | enum rfkill_type i; | |
1101 | for (i = 0; i < NUM_RFKILL_TYPES; i++) | |
1102 | rfkill_global_states[i].cur = ev.soft; | |
1103 | } else { | |
1104 | rfkill_global_states[ev.type].cur = ev.soft; | |
1105 | } | |
1106 | } | |
1107 | ||
1108 | list_for_each_entry(rfkill, &rfkill_list, node) { | |
1109 | if (rfkill->idx != ev.idx && ev.op != RFKILL_OP_CHANGE_ALL) | |
1110 | continue; | |
1111 | ||
1112 | if (rfkill->type != ev.type && ev.type != RFKILL_TYPE_ALL) | |
1113 | continue; | |
1114 | ||
1115 | rfkill_set_block(rfkill, ev.soft); | |
1116 | } | |
1117 | mutex_unlock(&rfkill_global_mutex); | |
1118 | ||
1119 | return count; | |
1120 | } | |
1121 | ||
1122 | static int rfkill_fop_release(struct inode *inode, struct file *file) | |
1123 | { | |
1124 | struct rfkill_data *data = file->private_data; | |
1125 | struct rfkill_int_event *ev, *tmp; | |
1126 | ||
1127 | mutex_lock(&rfkill_global_mutex); | |
1128 | list_del(&data->list); | |
1129 | mutex_unlock(&rfkill_global_mutex); | |
1130 | ||
1131 | mutex_destroy(&data->mtx); | |
1132 | list_for_each_entry_safe(ev, tmp, &data->events, list) | |
1133 | kfree(ev); | |
1134 | ||
1135 | #ifdef CONFIG_RFKILL_INPUT | |
1136 | if (data->input_handler) | |
1137 | atomic_dec(&rfkill_input_disabled); | |
1138 | #endif | |
1139 | ||
1140 | kfree(data); | |
1141 | ||
1142 | return 0; | |
1143 | } | |
1144 | ||
1145 | #ifdef CONFIG_RFKILL_INPUT | |
1146 | static long rfkill_fop_ioctl(struct file *file, unsigned int cmd, | |
1147 | unsigned long arg) | |
1148 | { | |
1149 | struct rfkill_data *data = file->private_data; | |
1150 | ||
1151 | if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC) | |
1152 | return -ENOSYS; | |
1153 | ||
1154 | if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT) | |
1155 | return -ENOSYS; | |
1156 | ||
1157 | mutex_lock(&data->mtx); | |
1158 | ||
1159 | if (!data->input_handler) { | |
1160 | atomic_inc(&rfkill_input_disabled); | |
1161 | data->input_handler = true; | |
1162 | } | |
1163 | ||
1164 | mutex_unlock(&data->mtx); | |
1165 | ||
1166 | return 0; | |
1167 | } | |
1168 | #endif | |
1169 | ||
1170 | static const struct file_operations rfkill_fops = { | |
1171 | .open = rfkill_fop_open, | |
1172 | .read = rfkill_fop_read, | |
1173 | .write = rfkill_fop_write, | |
1174 | .poll = rfkill_fop_poll, | |
1175 | .release = rfkill_fop_release, | |
1176 | #ifdef CONFIG_RFKILL_INPUT | |
1177 | .unlocked_ioctl = rfkill_fop_ioctl, | |
1178 | .compat_ioctl = rfkill_fop_ioctl, | |
1179 | #endif | |
1180 | }; | |
1181 | ||
1182 | static struct miscdevice rfkill_miscdev = { | |
1183 | .name = "rfkill", | |
1184 | .fops = &rfkill_fops, | |
1185 | .minor = MISC_DYNAMIC_MINOR, | |
1186 | }; | |
19d337df JB |
1187 | |
1188 | static int __init rfkill_init(void) | |
1189 | { | |
1190 | int error; | |
1191 | int i; | |
1192 | ||
1193 | for (i = 0; i < NUM_RFKILL_TYPES; i++) | |
1194 | rfkill_global_states[i].def = !rfkill_default_state; | |
1195 | ||
1196 | error = class_register(&rfkill_class); | |
1197 | if (error) | |
1198 | goto out; | |
1199 | ||
c64fb016 JB |
1200 | error = misc_register(&rfkill_miscdev); |
1201 | if (error) { | |
1202 | class_unregister(&rfkill_class); | |
1203 | goto out; | |
1204 | } | |
1205 | ||
19d337df JB |
1206 | #ifdef CONFIG_RFKILL_INPUT |
1207 | error = rfkill_handler_init(); | |
c64fb016 JB |
1208 | if (error) { |
1209 | misc_deregister(&rfkill_miscdev); | |
19d337df | 1210 | class_unregister(&rfkill_class); |
c64fb016 JB |
1211 | goto out; |
1212 | } | |
19d337df JB |
1213 | #endif |
1214 | ||
1215 | out: | |
1216 | return error; | |
1217 | } | |
1218 | subsys_initcall(rfkill_init); | |
1219 | ||
1220 | static void __exit rfkill_exit(void) | |
1221 | { | |
1222 | #ifdef CONFIG_RFKILL_INPUT | |
1223 | rfkill_handler_exit(); | |
1224 | #endif | |
c64fb016 | 1225 | misc_deregister(&rfkill_miscdev); |
19d337df JB |
1226 | class_unregister(&rfkill_class); |
1227 | } | |
1228 | module_exit(rfkill_exit); |