]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/rfkill/core.c
rfkill: rewrite
[mirror_ubuntu-bionic-kernel.git] / net / rfkill / core.c
CommitLineData
19d337df
JB
1/*
2 * Copyright (C) 2006 - 2007 Ivo van Doorn
3 * Copyright (C) 2007 Dmitry Torokhov
4 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the
18 * Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/workqueue.h>
26#include <linux/capability.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/rfkill.h>
30#include <linux/spinlock.h>
31
32#include "rfkill.h"
33
34#define POLL_INTERVAL (5 * HZ)
35
36#define RFKILL_BLOCK_HW BIT(0)
37#define RFKILL_BLOCK_SW BIT(1)
38#define RFKILL_BLOCK_SW_PREV BIT(2)
39#define RFKILL_BLOCK_ANY (RFKILL_BLOCK_HW |\
40 RFKILL_BLOCK_SW |\
41 RFKILL_BLOCK_SW_PREV)
42#define RFKILL_BLOCK_SW_SETCALL BIT(31)
43
44struct rfkill {
45 spinlock_t lock;
46
47 const char *name;
48 enum rfkill_type type;
49
50 unsigned long state;
51
52 bool registered;
53 bool suspended;
54
55 const struct rfkill_ops *ops;
56 void *data;
57
58#ifdef CONFIG_RFKILL_LEDS
59 struct led_trigger led_trigger;
60 const char *ledtrigname;
61#endif
62
63 struct device dev;
64 struct list_head node;
65
66 struct delayed_work poll_work;
67 struct work_struct uevent_work;
68 struct work_struct sync_work;
69};
70#define to_rfkill(d) container_of(d, struct rfkill, dev)
71
72
73
74MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
75MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
76MODULE_DESCRIPTION("RF switch support");
77MODULE_LICENSE("GPL");
78
79
80/*
81 * The locking here should be made much smarter, we currently have
82 * a bit of a stupid situation because drivers might want to register
83 * the rfkill struct under their own lock, and take this lock during
84 * rfkill method calls -- which will cause an AB-BA deadlock situation.
85 *
86 * To fix that, we need to rework this code here to be mostly lock-free
87 * and only use the mutex for list manipulations, not to protect the
88 * various other global variables. Then we can avoid holding the mutex
89 * around driver operations, and all is happy.
90 */
91static LIST_HEAD(rfkill_list); /* list of registered rf switches */
92static DEFINE_MUTEX(rfkill_global_mutex);
93
94static unsigned int rfkill_default_state = 1;
95module_param_named(default_state, rfkill_default_state, uint, 0444);
96MODULE_PARM_DESC(default_state,
97 "Default initial state for all radio types, 0 = radio off");
98
99static struct {
100 bool cur, def;
101} rfkill_global_states[NUM_RFKILL_TYPES];
102
103static unsigned long rfkill_states_default_locked;
104
105static bool rfkill_epo_lock_active;
106
107
108#ifdef CONFIG_RFKILL_LEDS
109static void rfkill_led_trigger_event(struct rfkill *rfkill)
110{
111 struct led_trigger *trigger;
112
113 if (!rfkill->registered)
114 return;
115
116 trigger = &rfkill->led_trigger;
117
118 if (rfkill->state & RFKILL_BLOCK_ANY)
119 led_trigger_event(trigger, LED_OFF);
120 else
121 led_trigger_event(trigger, LED_FULL);
122}
123
124static void rfkill_led_trigger_activate(struct led_classdev *led)
125{
126 struct rfkill *rfkill;
127
128 rfkill = container_of(led->trigger, struct rfkill, led_trigger);
129
130 rfkill_led_trigger_event(rfkill);
131}
132
133const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
134{
135 return rfkill->led_trigger.name;
136}
137EXPORT_SYMBOL(rfkill_get_led_trigger_name);
138
139void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
140{
141 BUG_ON(!rfkill);
142
143 rfkill->ledtrigname = name;
144}
145EXPORT_SYMBOL(rfkill_set_led_trigger_name);
146
147static int rfkill_led_trigger_register(struct rfkill *rfkill)
148{
149 rfkill->led_trigger.name = rfkill->ledtrigname
150 ? : dev_name(&rfkill->dev);
151 rfkill->led_trigger.activate = rfkill_led_trigger_activate;
152 return led_trigger_register(&rfkill->led_trigger);
153}
154
155static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
156{
157 led_trigger_unregister(&rfkill->led_trigger);
158}
159#else
160static void rfkill_led_trigger_event(struct rfkill *rfkill)
161{
162}
163
164static inline int rfkill_led_trigger_register(struct rfkill *rfkill)
165{
166 return 0;
167}
168
169static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
170{
171}
172#endif /* CONFIG_RFKILL_LEDS */
173
174static void rfkill_uevent(struct rfkill *rfkill)
175{
176 if (!rfkill->registered || rfkill->suspended)
177 return;
178
179 kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
180}
181
182static bool __rfkill_set_hw_state(struct rfkill *rfkill,
183 bool blocked, bool *change)
184{
185 unsigned long flags;
186 bool prev, any;
187
188 BUG_ON(!rfkill);
189
190 spin_lock_irqsave(&rfkill->lock, flags);
191 prev = !!(rfkill->state & RFKILL_BLOCK_HW);
192 if (blocked)
193 rfkill->state |= RFKILL_BLOCK_HW;
194 else
195 rfkill->state &= ~RFKILL_BLOCK_HW;
196 *change = prev != blocked;
197 any = rfkill->state & RFKILL_BLOCK_ANY;
198 spin_unlock_irqrestore(&rfkill->lock, flags);
199
200 rfkill_led_trigger_event(rfkill);
201
202 return any;
203}
204
205/**
206 * rfkill_set_block - wrapper for set_block method
207 *
208 * @rfkill: the rfkill struct to use
209 * @blocked: the new software state
210 *
211 * Calls the set_block method (when applicable) and handles notifications
212 * etc. as well.
213 */
214static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
215{
216 unsigned long flags;
217 int err;
218
219 /*
220 * Some platforms (...!) generate input events which affect the
221 * _hard_ kill state -- whenever something tries to change the
222 * current software state query the hardware state too.
223 */
224 if (rfkill->ops->query)
225 rfkill->ops->query(rfkill, rfkill->data);
226
227 spin_lock_irqsave(&rfkill->lock, flags);
228 if (rfkill->state & RFKILL_BLOCK_SW)
229 rfkill->state |= RFKILL_BLOCK_SW_PREV;
230 else
231 rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
232
233 if (blocked)
234 rfkill->state |= RFKILL_BLOCK_SW;
235 else
236 rfkill->state &= ~RFKILL_BLOCK_SW;
237
238 rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
239 spin_unlock_irqrestore(&rfkill->lock, flags);
240
241 if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
242 return;
243
244 err = rfkill->ops->set_block(rfkill->data, blocked);
245
246 spin_lock_irqsave(&rfkill->lock, flags);
247 if (err) {
248 /*
249 * Failed -- reset status to _prev, this may be different
250 * from what set set _PREV to earlier in this function
251 * if rfkill_set_sw_state was invoked.
252 */
253 if (rfkill->state & RFKILL_BLOCK_SW_PREV)
254 rfkill->state |= RFKILL_BLOCK_SW;
255 else
256 rfkill->state &= ~RFKILL_BLOCK_SW;
257 }
258 rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL;
259 rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
260 spin_unlock_irqrestore(&rfkill->lock, flags);
261
262 rfkill_led_trigger_event(rfkill);
263 rfkill_uevent(rfkill);
264}
265
266/**
267 * __rfkill_switch_all - Toggle state of all switches of given type
268 * @type: type of interfaces to be affected
269 * @state: the new state
270 *
271 * This function sets the state of all switches of given type,
272 * unless a specific switch is claimed by userspace (in which case,
273 * that switch is left alone) or suspended.
274 *
275 * Caller must have acquired rfkill_global_mutex.
276 */
277static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
278{
279 struct rfkill *rfkill;
280
281 rfkill_global_states[type].cur = blocked;
282 list_for_each_entry(rfkill, &rfkill_list, node) {
283 if (rfkill->type != type)
284 continue;
285
286 rfkill_set_block(rfkill, blocked);
287 }
288}
289
290/**
291 * rfkill_switch_all - Toggle state of all switches of given type
292 * @type: type of interfaces to be affected
293 * @state: the new state
294 *
295 * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
296 * Please refer to __rfkill_switch_all() for details.
297 *
298 * Does nothing if the EPO lock is active.
299 */
300void rfkill_switch_all(enum rfkill_type type, bool blocked)
301{
302 mutex_lock(&rfkill_global_mutex);
303
304 if (!rfkill_epo_lock_active)
305 __rfkill_switch_all(type, blocked);
306
307 mutex_unlock(&rfkill_global_mutex);
308}
309
310/**
311 * rfkill_epo - emergency power off all transmitters
312 *
313 * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
314 * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
315 *
316 * The global state before the EPO is saved and can be restored later
317 * using rfkill_restore_states().
318 */
319void rfkill_epo(void)
320{
321 struct rfkill *rfkill;
322 int i;
323
324 mutex_lock(&rfkill_global_mutex);
325
326 rfkill_epo_lock_active = true;
327 list_for_each_entry(rfkill, &rfkill_list, node)
328 rfkill_set_block(rfkill, true);
329
330 for (i = 0; i < NUM_RFKILL_TYPES; i++) {
331 rfkill_global_states[i].def = rfkill_global_states[i].cur;
332 rfkill_global_states[i].cur = true;
333 }
334 mutex_unlock(&rfkill_global_mutex);
335}
336
337/**
338 * rfkill_restore_states - restore global states
339 *
340 * Restore (and sync switches to) the global state from the
341 * states in rfkill_default_states. This can undo the effects of
342 * a call to rfkill_epo().
343 */
344void rfkill_restore_states(void)
345{
346 int i;
347
348 mutex_lock(&rfkill_global_mutex);
349
350 rfkill_epo_lock_active = false;
351 for (i = 0; i < NUM_RFKILL_TYPES; i++)
352 __rfkill_switch_all(i, rfkill_global_states[i].def);
353 mutex_unlock(&rfkill_global_mutex);
354}
355
356/**
357 * rfkill_remove_epo_lock - unlock state changes
358 *
359 * Used by rfkill-input manually unlock state changes, when
360 * the EPO switch is deactivated.
361 */
362void rfkill_remove_epo_lock(void)
363{
364 mutex_lock(&rfkill_global_mutex);
365 rfkill_epo_lock_active = false;
366 mutex_unlock(&rfkill_global_mutex);
367}
368
369/**
370 * rfkill_is_epo_lock_active - returns true EPO is active
371 *
372 * Returns 0 (false) if there is NOT an active EPO contidion,
373 * and 1 (true) if there is an active EPO contition, which
374 * locks all radios in one of the BLOCKED states.
375 *
376 * Can be called in atomic context.
377 */
378bool rfkill_is_epo_lock_active(void)
379{
380 return rfkill_epo_lock_active;
381}
382
383/**
384 * rfkill_get_global_sw_state - returns global state for a type
385 * @type: the type to get the global state of
386 *
387 * Returns the current global state for a given wireless
388 * device type.
389 */
390bool rfkill_get_global_sw_state(const enum rfkill_type type)
391{
392 return rfkill_global_states[type].cur;
393}
394
395void rfkill_set_global_sw_state(const enum rfkill_type type, bool blocked)
396{
397 mutex_lock(&rfkill_global_mutex);
398
399 /* don't allow unblock when epo */
400 if (rfkill_epo_lock_active && !blocked)
401 goto out;
402
403 /* too late */
404 if (rfkill_states_default_locked & BIT(type))
405 goto out;
406
407 rfkill_states_default_locked |= BIT(type);
408
409 rfkill_global_states[type].cur = blocked;
410 rfkill_global_states[type].def = blocked;
411 out:
412 mutex_unlock(&rfkill_global_mutex);
413}
414EXPORT_SYMBOL(rfkill_set_global_sw_state);
415
416
417bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
418{
419 bool ret, change;
420
421 ret = __rfkill_set_hw_state(rfkill, blocked, &change);
422
423 if (!rfkill->registered)
424 return ret;
425
426 if (change)
427 schedule_work(&rfkill->uevent_work);
428
429 return ret;
430}
431EXPORT_SYMBOL(rfkill_set_hw_state);
432
433static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
434{
435 u32 bit = RFKILL_BLOCK_SW;
436
437 /* if in a ops->set_block right now, use other bit */
438 if (rfkill->state & RFKILL_BLOCK_SW_SETCALL)
439 bit = RFKILL_BLOCK_SW_PREV;
440
441 if (blocked)
442 rfkill->state |= bit;
443 else
444 rfkill->state &= ~bit;
445}
446
447bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
448{
449 unsigned long flags;
450 bool prev, hwblock;
451
452 BUG_ON(!rfkill);
453
454 spin_lock_irqsave(&rfkill->lock, flags);
455 prev = !!(rfkill->state & RFKILL_BLOCK_SW);
456 __rfkill_set_sw_state(rfkill, blocked);
457 hwblock = !!(rfkill->state & RFKILL_BLOCK_HW);
458 blocked = blocked || hwblock;
459 spin_unlock_irqrestore(&rfkill->lock, flags);
460
461 if (!rfkill->registered)
462 return blocked;
463
464 if (prev != blocked && !hwblock)
465 schedule_work(&rfkill->uevent_work);
466
467 rfkill_led_trigger_event(rfkill);
468
469 return blocked;
470}
471EXPORT_SYMBOL(rfkill_set_sw_state);
472
473void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
474{
475 unsigned long flags;
476 bool swprev, hwprev;
477
478 BUG_ON(!rfkill);
479
480 spin_lock_irqsave(&rfkill->lock, flags);
481
482 /*
483 * No need to care about prev/setblock ... this is for uevent only
484 * and that will get triggered by rfkill_set_block anyway.
485 */
486 swprev = !!(rfkill->state & RFKILL_BLOCK_SW);
487 hwprev = !!(rfkill->state & RFKILL_BLOCK_HW);
488 __rfkill_set_sw_state(rfkill, sw);
489
490 spin_unlock_irqrestore(&rfkill->lock, flags);
491
492 if (!rfkill->registered)
493 return;
494
495 if (swprev != sw || hwprev != hw)
496 schedule_work(&rfkill->uevent_work);
497
498 rfkill_led_trigger_event(rfkill);
499}
500EXPORT_SYMBOL(rfkill_set_states);
501
502static ssize_t rfkill_name_show(struct device *dev,
503 struct device_attribute *attr,
504 char *buf)
505{
506 struct rfkill *rfkill = to_rfkill(dev);
507
508 return sprintf(buf, "%s\n", rfkill->name);
509}
510
511static const char *rfkill_get_type_str(enum rfkill_type type)
512{
513 switch (type) {
514 case RFKILL_TYPE_WLAN:
515 return "wlan";
516 case RFKILL_TYPE_BLUETOOTH:
517 return "bluetooth";
518 case RFKILL_TYPE_UWB:
519 return "ultrawideband";
520 case RFKILL_TYPE_WIMAX:
521 return "wimax";
522 case RFKILL_TYPE_WWAN:
523 return "wwan";
524 default:
525 BUG();
526 }
527
528 BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_WWAN + 1);
529}
530
531static ssize_t rfkill_type_show(struct device *dev,
532 struct device_attribute *attr,
533 char *buf)
534{
535 struct rfkill *rfkill = to_rfkill(dev);
536
537 return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type));
538}
539
540static u8 user_state_from_blocked(unsigned long state)
541{
542 if (state & RFKILL_BLOCK_HW)
543 return RFKILL_USER_STATE_HARD_BLOCKED;
544 if (state & RFKILL_BLOCK_SW)
545 return RFKILL_USER_STATE_SOFT_BLOCKED;
546
547 return RFKILL_USER_STATE_UNBLOCKED;
548}
549
550static ssize_t rfkill_state_show(struct device *dev,
551 struct device_attribute *attr,
552 char *buf)
553{
554 struct rfkill *rfkill = to_rfkill(dev);
555 unsigned long flags;
556 u32 state;
557
558 spin_lock_irqsave(&rfkill->lock, flags);
559 state = rfkill->state;
560 spin_unlock_irqrestore(&rfkill->lock, flags);
561
562 return sprintf(buf, "%d\n", user_state_from_blocked(state));
563}
564
565static ssize_t rfkill_state_store(struct device *dev,
566 struct device_attribute *attr,
567 const char *buf, size_t count)
568{
569 /*
570 * The intention was that userspace can only take control over
571 * a given device when/if rfkill-input doesn't control it due
572 * to user_claim. Since user_claim is currently unsupported,
573 * we never support changing the state from userspace -- this
574 * can be implemented again later.
575 */
576
577 return -EPERM;
578}
579
580static ssize_t rfkill_claim_show(struct device *dev,
581 struct device_attribute *attr,
582 char *buf)
583{
584 return sprintf(buf, "%d\n", 0);
585}
586
587static ssize_t rfkill_claim_store(struct device *dev,
588 struct device_attribute *attr,
589 const char *buf, size_t count)
590{
591 return -EOPNOTSUPP;
592}
593
594static struct device_attribute rfkill_dev_attrs[] = {
595 __ATTR(name, S_IRUGO, rfkill_name_show, NULL),
596 __ATTR(type, S_IRUGO, rfkill_type_show, NULL),
597 __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
598 __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
599 __ATTR_NULL
600};
601
602static void rfkill_release(struct device *dev)
603{
604 struct rfkill *rfkill = to_rfkill(dev);
605
606 kfree(rfkill);
607}
608
609static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
610{
611 struct rfkill *rfkill = to_rfkill(dev);
612 unsigned long flags;
613 u32 state;
614 int error;
615
616 error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
617 if (error)
618 return error;
619 error = add_uevent_var(env, "RFKILL_TYPE=%s",
620 rfkill_get_type_str(rfkill->type));
621 if (error)
622 return error;
623 spin_lock_irqsave(&rfkill->lock, flags);
624 state = rfkill->state;
625 spin_unlock_irqrestore(&rfkill->lock, flags);
626 error = add_uevent_var(env, "RFKILL_STATE=%d",
627 user_state_from_blocked(state));
628 return error;
629}
630
631void rfkill_pause_polling(struct rfkill *rfkill)
632{
633 BUG_ON(!rfkill);
634
635 if (!rfkill->ops->poll)
636 return;
637
638 cancel_delayed_work_sync(&rfkill->poll_work);
639}
640EXPORT_SYMBOL(rfkill_pause_polling);
641
642void rfkill_resume_polling(struct rfkill *rfkill)
643{
644 BUG_ON(!rfkill);
645
646 if (!rfkill->ops->poll)
647 return;
648
649 schedule_work(&rfkill->poll_work.work);
650}
651EXPORT_SYMBOL(rfkill_resume_polling);
652
653static int rfkill_suspend(struct device *dev, pm_message_t state)
654{
655 struct rfkill *rfkill = to_rfkill(dev);
656
657 rfkill_pause_polling(rfkill);
658
659 rfkill->suspended = true;
660
661 return 0;
662}
663
664static int rfkill_resume(struct device *dev)
665{
666 struct rfkill *rfkill = to_rfkill(dev);
667 bool cur;
668
669 mutex_lock(&rfkill_global_mutex);
670 cur = rfkill_global_states[rfkill->type].cur;
671 rfkill_set_block(rfkill, cur);
672 mutex_unlock(&rfkill_global_mutex);
673
674 rfkill->suspended = false;
675
676 schedule_work(&rfkill->uevent_work);
677
678 rfkill_resume_polling(rfkill);
679
680 return 0;
681}
682
683static struct class rfkill_class = {
684 .name = "rfkill",
685 .dev_release = rfkill_release,
686 .dev_attrs = rfkill_dev_attrs,
687 .dev_uevent = rfkill_dev_uevent,
688 .suspend = rfkill_suspend,
689 .resume = rfkill_resume,
690};
691
692
693struct rfkill * __must_check rfkill_alloc(const char *name,
694 struct device *parent,
695 const enum rfkill_type type,
696 const struct rfkill_ops *ops,
697 void *ops_data)
698{
699 struct rfkill *rfkill;
700 struct device *dev;
701
702 if (WARN_ON(!ops))
703 return NULL;
704
705 if (WARN_ON(!ops->set_block))
706 return NULL;
707
708 if (WARN_ON(!name))
709 return NULL;
710
711 if (WARN_ON(type >= NUM_RFKILL_TYPES))
712 return NULL;
713
714 rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL);
715 if (!rfkill)
716 return NULL;
717
718 spin_lock_init(&rfkill->lock);
719 INIT_LIST_HEAD(&rfkill->node);
720 rfkill->type = type;
721 rfkill->name = name;
722 rfkill->ops = ops;
723 rfkill->data = ops_data;
724
725 dev = &rfkill->dev;
726 dev->class = &rfkill_class;
727 dev->parent = parent;
728 device_initialize(dev);
729
730 return rfkill;
731}
732EXPORT_SYMBOL(rfkill_alloc);
733
734static void rfkill_poll(struct work_struct *work)
735{
736 struct rfkill *rfkill;
737
738 rfkill = container_of(work, struct rfkill, poll_work.work);
739
740 /*
741 * Poll hardware state -- driver will use one of the
742 * rfkill_set{,_hw,_sw}_state functions and use its
743 * return value to update the current status.
744 */
745 rfkill->ops->poll(rfkill, rfkill->data);
746
747 schedule_delayed_work(&rfkill->poll_work,
748 round_jiffies_relative(POLL_INTERVAL));
749}
750
751static void rfkill_uevent_work(struct work_struct *work)
752{
753 struct rfkill *rfkill;
754
755 rfkill = container_of(work, struct rfkill, uevent_work);
756
757 rfkill_uevent(rfkill);
758}
759
760static void rfkill_sync_work(struct work_struct *work)
761{
762 struct rfkill *rfkill;
763 bool cur;
764
765 rfkill = container_of(work, struct rfkill, sync_work);
766
767 mutex_lock(&rfkill_global_mutex);
768 cur = rfkill_global_states[rfkill->type].cur;
769 rfkill_set_block(rfkill, cur);
770 mutex_unlock(&rfkill_global_mutex);
771}
772
773int __must_check rfkill_register(struct rfkill *rfkill)
774{
775 static unsigned long rfkill_no;
776 struct device *dev = &rfkill->dev;
777 int error;
778
779 BUG_ON(!rfkill);
780
781 mutex_lock(&rfkill_global_mutex);
782
783 if (rfkill->registered) {
784 error = -EALREADY;
785 goto unlock;
786 }
787
788 dev_set_name(dev, "rfkill%lu", rfkill_no);
789 rfkill_no++;
790
791 if (!(rfkill_states_default_locked & BIT(rfkill->type))) {
792 /* first of its kind */
793 BUILD_BUG_ON(NUM_RFKILL_TYPES >
794 sizeof(rfkill_states_default_locked) * 8);
795 rfkill_states_default_locked |= BIT(rfkill->type);
796 rfkill_global_states[rfkill->type].cur =
797 rfkill_global_states[rfkill->type].def;
798 }
799
800 list_add_tail(&rfkill->node, &rfkill_list);
801
802 error = device_add(dev);
803 if (error)
804 goto remove;
805
806 error = rfkill_led_trigger_register(rfkill);
807 if (error)
808 goto devdel;
809
810 rfkill->registered = true;
811
812 if (rfkill->ops->poll) {
813 INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll);
814 schedule_delayed_work(&rfkill->poll_work,
815 round_jiffies_relative(POLL_INTERVAL));
816 }
817
818 INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work);
819
820 INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
821 schedule_work(&rfkill->sync_work);
822
823 mutex_unlock(&rfkill_global_mutex);
824 return 0;
825
826 devdel:
827 device_del(&rfkill->dev);
828 remove:
829 list_del_init(&rfkill->node);
830 unlock:
831 mutex_unlock(&rfkill_global_mutex);
832 return error;
833}
834EXPORT_SYMBOL(rfkill_register);
835
836void rfkill_unregister(struct rfkill *rfkill)
837{
838 BUG_ON(!rfkill);
839
840 if (rfkill->ops->poll)
841 cancel_delayed_work_sync(&rfkill->poll_work);
842
843 cancel_work_sync(&rfkill->uevent_work);
844 cancel_work_sync(&rfkill->sync_work);
845
846 rfkill->registered = false;
847
848 device_del(&rfkill->dev);
849
850 mutex_lock(&rfkill_global_mutex);
851 list_del_init(&rfkill->node);
852 mutex_unlock(&rfkill_global_mutex);
853
854 rfkill_led_trigger_unregister(rfkill);
855}
856EXPORT_SYMBOL(rfkill_unregister);
857
858void rfkill_destroy(struct rfkill *rfkill)
859{
860 if (rfkill)
861 put_device(&rfkill->dev);
862}
863EXPORT_SYMBOL(rfkill_destroy);
864
865
866static int __init rfkill_init(void)
867{
868 int error;
869 int i;
870
871 for (i = 0; i < NUM_RFKILL_TYPES; i++)
872 rfkill_global_states[i].def = !rfkill_default_state;
873
874 error = class_register(&rfkill_class);
875 if (error)
876 goto out;
877
878#ifdef CONFIG_RFKILL_INPUT
879 error = rfkill_handler_init();
880 if (error)
881 class_unregister(&rfkill_class);
882#endif
883
884 out:
885 return error;
886}
887subsys_initcall(rfkill_init);
888
889static void __exit rfkill_exit(void)
890{
891#ifdef CONFIG_RFKILL_INPUT
892 rfkill_handler_exit();
893#endif
894 class_unregister(&rfkill_class);
895}
896module_exit(rfkill_exit);