]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/watchdog/watchdog_dev.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / drivers / watchdog / watchdog_dev.c
1 /*
2 * watchdog_dev.c
3 *
4 * (c) Copyright 2008-2011 Alan Cox <alan@lxorguk.ukuu.org.uk>,
5 * All Rights Reserved.
6 *
7 * (c) Copyright 2008-2011 Wim Van Sebroeck <wim@iguana.be>.
8 *
9 *
10 * This source code is part of the generic code that can be used
11 * by all the watchdog timer drivers.
12 *
13 * This part of the generic code takes care of the following
14 * misc device: /dev/watchdog.
15 *
16 * Based on source code of the following authors:
17 * Matt Domsch <Matt_Domsch@dell.com>,
18 * Rob Radez <rob@osinvestor.com>,
19 * Rusty Lynch <rusty@linux.co.intel.com>
20 * Satyam Sharma <satyam@infradead.org>
21 * Randy Dunlap <randy.dunlap@oracle.com>
22 *
23 * This program is free software; you can redistribute it and/or
24 * modify it under the terms of the GNU General Public License
25 * as published by the Free Software Foundation; either version
26 * 2 of the License, or (at your option) any later version.
27 *
28 * Neither Alan Cox, CymruNet Ltd., Wim Van Sebroeck nor Iguana vzw.
29 * admit liability nor provide warranty for any of this software.
30 * This material is provided "AS-IS" and at no charge.
31 */
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #include <linux/cdev.h> /* For character device */
36 #include <linux/errno.h> /* For the -ENODEV/... values */
37 #include <linux/fs.h> /* For file operations */
38 #include <linux/init.h> /* For __init/__exit/... */
39 #include <linux/jiffies.h> /* For timeout functions */
40 #include <linux/kernel.h> /* For printk/panic/... */
41 #include <linux/miscdevice.h> /* For handling misc devices */
42 #include <linux/module.h> /* For module stuff/... */
43 #include <linux/mutex.h> /* For mutexes */
44 #include <linux/slab.h> /* For memory functions */
45 #include <linux/types.h> /* For standard types (like size_t) */
46 #include <linux/watchdog.h> /* For watchdog specific items */
47 #include <linux/workqueue.h> /* For workqueue */
48 #include <linux/uaccess.h> /* For copy_to_user/put_user/... */
49
50 #include "watchdog_core.h"
51 #include "watchdog_pretimeout.h"
52
53 /*
54 * struct watchdog_core_data - watchdog core internal data
55 * @dev: The watchdog's internal device
56 * @cdev: The watchdog's Character device.
57 * @wdd: Pointer to watchdog device.
58 * @lock: Lock for watchdog core.
59 * @status: Watchdog core internal status bits.
60 */
61 struct watchdog_core_data {
62 struct device dev;
63 struct cdev cdev;
64 struct watchdog_device *wdd;
65 struct mutex lock;
66 unsigned long last_keepalive;
67 unsigned long last_hw_keepalive;
68 struct delayed_work work;
69 unsigned long status; /* Internal status bits */
70 #define _WDOG_DEV_OPEN 0 /* Opened ? */
71 #define _WDOG_ALLOW_RELEASE 1 /* Did we receive the magic char ? */
72 #define _WDOG_KEEPALIVE 2 /* Did we receive a keepalive ? */
73 };
74
75 /* the dev_t structure to store the dynamically allocated watchdog devices */
76 static dev_t watchdog_devt;
77 /* Reference to watchdog device behind /dev/watchdog */
78 static struct watchdog_core_data *old_wd_data;
79
80 static struct workqueue_struct *watchdog_wq;
81
82 static bool handle_boot_enabled =
83 IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED);
84
85 static inline bool watchdog_need_worker(struct watchdog_device *wdd)
86 {
87 /* All variables in milli-seconds */
88 unsigned int hm = wdd->max_hw_heartbeat_ms;
89 unsigned int t = wdd->timeout * 1000;
90
91 /*
92 * A worker to generate heartbeat requests is needed if all of the
93 * following conditions are true.
94 * - Userspace activated the watchdog.
95 * - The driver provided a value for the maximum hardware timeout, and
96 * thus is aware that the framework supports generating heartbeat
97 * requests.
98 * - Userspace requests a longer timeout than the hardware can handle.
99 *
100 * Alternatively, if userspace has not opened the watchdog
101 * device, we take care of feeding the watchdog if it is
102 * running.
103 */
104 return (hm && watchdog_active(wdd) && t > hm) ||
105 (t && !watchdog_active(wdd) && watchdog_hw_running(wdd));
106 }
107
108 static long watchdog_next_keepalive(struct watchdog_device *wdd)
109 {
110 struct watchdog_core_data *wd_data = wdd->wd_data;
111 unsigned int timeout_ms = wdd->timeout * 1000;
112 unsigned long keepalive_interval;
113 unsigned long last_heartbeat;
114 unsigned long virt_timeout;
115 unsigned int hw_heartbeat_ms;
116
117 virt_timeout = wd_data->last_keepalive + msecs_to_jiffies(timeout_ms);
118 hw_heartbeat_ms = min_not_zero(timeout_ms, wdd->max_hw_heartbeat_ms);
119 keepalive_interval = msecs_to_jiffies(hw_heartbeat_ms / 2);
120
121 if (!watchdog_active(wdd))
122 return keepalive_interval;
123
124 /*
125 * To ensure that the watchdog times out wdd->timeout seconds
126 * after the most recent ping from userspace, the last
127 * worker ping has to come in hw_heartbeat_ms before this timeout.
128 */
129 last_heartbeat = virt_timeout - msecs_to_jiffies(hw_heartbeat_ms);
130 return min_t(long, last_heartbeat - jiffies, keepalive_interval);
131 }
132
133 static inline void watchdog_update_worker(struct watchdog_device *wdd)
134 {
135 struct watchdog_core_data *wd_data = wdd->wd_data;
136
137 if (watchdog_need_worker(wdd)) {
138 long t = watchdog_next_keepalive(wdd);
139
140 if (t > 0)
141 mod_delayed_work(watchdog_wq, &wd_data->work, t);
142 } else {
143 cancel_delayed_work(&wd_data->work);
144 }
145 }
146
147 static int __watchdog_ping(struct watchdog_device *wdd)
148 {
149 struct watchdog_core_data *wd_data = wdd->wd_data;
150 unsigned long earliest_keepalive = wd_data->last_hw_keepalive +
151 msecs_to_jiffies(wdd->min_hw_heartbeat_ms);
152 int err;
153
154 if (time_is_after_jiffies(earliest_keepalive)) {
155 mod_delayed_work(watchdog_wq, &wd_data->work,
156 earliest_keepalive - jiffies);
157 return 0;
158 }
159
160 wd_data->last_hw_keepalive = jiffies;
161
162 if (wdd->ops->ping)
163 err = wdd->ops->ping(wdd); /* ping the watchdog */
164 else
165 err = wdd->ops->start(wdd); /* restart watchdog */
166
167 watchdog_update_worker(wdd);
168
169 return err;
170 }
171
172 /*
173 * watchdog_ping: ping the watchdog.
174 * @wdd: the watchdog device to ping
175 *
176 * The caller must hold wd_data->lock.
177 *
178 * If the watchdog has no own ping operation then it needs to be
179 * restarted via the start operation. This wrapper function does
180 * exactly that.
181 * We only ping when the watchdog device is running.
182 */
183
184 static int watchdog_ping(struct watchdog_device *wdd)
185 {
186 struct watchdog_core_data *wd_data = wdd->wd_data;
187
188 if (!watchdog_active(wdd) && !watchdog_hw_running(wdd))
189 return 0;
190
191 set_bit(_WDOG_KEEPALIVE, &wd_data->status);
192
193 wd_data->last_keepalive = jiffies;
194 return __watchdog_ping(wdd);
195 }
196
197 static bool watchdog_worker_should_ping(struct watchdog_core_data *wd_data)
198 {
199 struct watchdog_device *wdd = wd_data->wdd;
200
201 return wdd && (watchdog_active(wdd) || watchdog_hw_running(wdd));
202 }
203
204 static void watchdog_ping_work(struct work_struct *work)
205 {
206 struct watchdog_core_data *wd_data;
207
208 wd_data = container_of(to_delayed_work(work), struct watchdog_core_data,
209 work);
210
211 mutex_lock(&wd_data->lock);
212 if (watchdog_worker_should_ping(wd_data))
213 __watchdog_ping(wd_data->wdd);
214 mutex_unlock(&wd_data->lock);
215 }
216
217 /*
218 * watchdog_start: wrapper to start the watchdog.
219 * @wdd: the watchdog device to start
220 *
221 * The caller must hold wd_data->lock.
222 *
223 * Start the watchdog if it is not active and mark it active.
224 * This function returns zero on success or a negative errno code for
225 * failure.
226 */
227
228 static int watchdog_start(struct watchdog_device *wdd)
229 {
230 struct watchdog_core_data *wd_data = wdd->wd_data;
231 unsigned long started_at;
232 int err;
233
234 if (watchdog_active(wdd))
235 return 0;
236
237 set_bit(_WDOG_KEEPALIVE, &wd_data->status);
238
239 started_at = jiffies;
240 if (watchdog_hw_running(wdd) && wdd->ops->ping)
241 err = wdd->ops->ping(wdd);
242 else
243 err = wdd->ops->start(wdd);
244 if (err == 0) {
245 set_bit(WDOG_ACTIVE, &wdd->status);
246 wd_data->last_keepalive = started_at;
247 watchdog_update_worker(wdd);
248 }
249
250 return err;
251 }
252
253 /*
254 * watchdog_stop: wrapper to stop the watchdog.
255 * @wdd: the watchdog device to stop
256 *
257 * The caller must hold wd_data->lock.
258 *
259 * Stop the watchdog if it is still active and unmark it active.
260 * This function returns zero on success or a negative errno code for
261 * failure.
262 * If the 'nowayout' feature was set, the watchdog cannot be stopped.
263 */
264
265 static int watchdog_stop(struct watchdog_device *wdd)
266 {
267 int err = 0;
268
269 if (!watchdog_active(wdd))
270 return 0;
271
272 if (test_bit(WDOG_NO_WAY_OUT, &wdd->status)) {
273 pr_info("watchdog%d: nowayout prevents watchdog being stopped!\n",
274 wdd->id);
275 return -EBUSY;
276 }
277
278 if (wdd->ops->stop) {
279 clear_bit(WDOG_HW_RUNNING, &wdd->status);
280 err = wdd->ops->stop(wdd);
281 } else {
282 set_bit(WDOG_HW_RUNNING, &wdd->status);
283 }
284
285 if (err == 0) {
286 clear_bit(WDOG_ACTIVE, &wdd->status);
287 watchdog_update_worker(wdd);
288 }
289
290 return err;
291 }
292
293 /*
294 * watchdog_get_status: wrapper to get the watchdog status
295 * @wdd: the watchdog device to get the status from
296 *
297 * The caller must hold wd_data->lock.
298 *
299 * Get the watchdog's status flags.
300 */
301
302 static unsigned int watchdog_get_status(struct watchdog_device *wdd)
303 {
304 struct watchdog_core_data *wd_data = wdd->wd_data;
305 unsigned int status;
306
307 if (wdd->ops->status)
308 status = wdd->ops->status(wdd);
309 else
310 status = wdd->bootstatus & (WDIOF_CARDRESET |
311 WDIOF_OVERHEAT |
312 WDIOF_FANFAULT |
313 WDIOF_EXTERN1 |
314 WDIOF_EXTERN2 |
315 WDIOF_POWERUNDER |
316 WDIOF_POWEROVER);
317
318 if (test_bit(_WDOG_ALLOW_RELEASE, &wd_data->status))
319 status |= WDIOF_MAGICCLOSE;
320
321 if (test_and_clear_bit(_WDOG_KEEPALIVE, &wd_data->status))
322 status |= WDIOF_KEEPALIVEPING;
323
324 return status;
325 }
326
327 /*
328 * watchdog_set_timeout: set the watchdog timer timeout
329 * @wdd: the watchdog device to set the timeout for
330 * @timeout: timeout to set in seconds
331 *
332 * The caller must hold wd_data->lock.
333 */
334
335 static int watchdog_set_timeout(struct watchdog_device *wdd,
336 unsigned int timeout)
337 {
338 int err = 0;
339
340 if (!(wdd->info->options & WDIOF_SETTIMEOUT))
341 return -EOPNOTSUPP;
342
343 if (watchdog_timeout_invalid(wdd, timeout))
344 return -EINVAL;
345
346 if (wdd->ops->set_timeout) {
347 err = wdd->ops->set_timeout(wdd, timeout);
348 } else {
349 wdd->timeout = timeout;
350 /* Disable pretimeout if it doesn't fit the new timeout */
351 if (wdd->pretimeout >= wdd->timeout)
352 wdd->pretimeout = 0;
353 }
354
355 watchdog_update_worker(wdd);
356
357 return err;
358 }
359
360 /*
361 * watchdog_set_pretimeout: set the watchdog timer pretimeout
362 * @wdd: the watchdog device to set the timeout for
363 * @timeout: pretimeout to set in seconds
364 */
365
366 static int watchdog_set_pretimeout(struct watchdog_device *wdd,
367 unsigned int timeout)
368 {
369 int err = 0;
370
371 if (!(wdd->info->options & WDIOF_PRETIMEOUT))
372 return -EOPNOTSUPP;
373
374 if (watchdog_pretimeout_invalid(wdd, timeout))
375 return -EINVAL;
376
377 if (wdd->ops->set_pretimeout)
378 err = wdd->ops->set_pretimeout(wdd, timeout);
379 else
380 wdd->pretimeout = timeout;
381
382 return err;
383 }
384
385 /*
386 * watchdog_get_timeleft: wrapper to get the time left before a reboot
387 * @wdd: the watchdog device to get the remaining time from
388 * @timeleft: the time that's left
389 *
390 * The caller must hold wd_data->lock.
391 *
392 * Get the time before a watchdog will reboot (if not pinged).
393 */
394
395 static int watchdog_get_timeleft(struct watchdog_device *wdd,
396 unsigned int *timeleft)
397 {
398 *timeleft = 0;
399
400 if (!wdd->ops->get_timeleft)
401 return -EOPNOTSUPP;
402
403 *timeleft = wdd->ops->get_timeleft(wdd);
404
405 return 0;
406 }
407
408 #ifdef CONFIG_WATCHDOG_SYSFS
409 static ssize_t nowayout_show(struct device *dev, struct device_attribute *attr,
410 char *buf)
411 {
412 struct watchdog_device *wdd = dev_get_drvdata(dev);
413
414 return sprintf(buf, "%d\n", !!test_bit(WDOG_NO_WAY_OUT, &wdd->status));
415 }
416 static DEVICE_ATTR_RO(nowayout);
417
418 static ssize_t status_show(struct device *dev, struct device_attribute *attr,
419 char *buf)
420 {
421 struct watchdog_device *wdd = dev_get_drvdata(dev);
422 struct watchdog_core_data *wd_data = wdd->wd_data;
423 unsigned int status;
424
425 mutex_lock(&wd_data->lock);
426 status = watchdog_get_status(wdd);
427 mutex_unlock(&wd_data->lock);
428
429 return sprintf(buf, "0x%x\n", status);
430 }
431 static DEVICE_ATTR_RO(status);
432
433 static ssize_t bootstatus_show(struct device *dev,
434 struct device_attribute *attr, char *buf)
435 {
436 struct watchdog_device *wdd = dev_get_drvdata(dev);
437
438 return sprintf(buf, "%u\n", wdd->bootstatus);
439 }
440 static DEVICE_ATTR_RO(bootstatus);
441
442 static ssize_t timeleft_show(struct device *dev, struct device_attribute *attr,
443 char *buf)
444 {
445 struct watchdog_device *wdd = dev_get_drvdata(dev);
446 struct watchdog_core_data *wd_data = wdd->wd_data;
447 ssize_t status;
448 unsigned int val;
449
450 mutex_lock(&wd_data->lock);
451 status = watchdog_get_timeleft(wdd, &val);
452 mutex_unlock(&wd_data->lock);
453 if (!status)
454 status = sprintf(buf, "%u\n", val);
455
456 return status;
457 }
458 static DEVICE_ATTR_RO(timeleft);
459
460 static ssize_t timeout_show(struct device *dev, struct device_attribute *attr,
461 char *buf)
462 {
463 struct watchdog_device *wdd = dev_get_drvdata(dev);
464
465 return sprintf(buf, "%u\n", wdd->timeout);
466 }
467 static DEVICE_ATTR_RO(timeout);
468
469 static ssize_t pretimeout_show(struct device *dev,
470 struct device_attribute *attr, char *buf)
471 {
472 struct watchdog_device *wdd = dev_get_drvdata(dev);
473
474 return sprintf(buf, "%u\n", wdd->pretimeout);
475 }
476 static DEVICE_ATTR_RO(pretimeout);
477
478 static ssize_t identity_show(struct device *dev, struct device_attribute *attr,
479 char *buf)
480 {
481 struct watchdog_device *wdd = dev_get_drvdata(dev);
482
483 return sprintf(buf, "%s\n", wdd->info->identity);
484 }
485 static DEVICE_ATTR_RO(identity);
486
487 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
488 char *buf)
489 {
490 struct watchdog_device *wdd = dev_get_drvdata(dev);
491
492 if (watchdog_active(wdd))
493 return sprintf(buf, "active\n");
494
495 return sprintf(buf, "inactive\n");
496 }
497 static DEVICE_ATTR_RO(state);
498
499 static ssize_t pretimeout_available_governors_show(struct device *dev,
500 struct device_attribute *attr, char *buf)
501 {
502 return watchdog_pretimeout_available_governors_get(buf);
503 }
504 static DEVICE_ATTR_RO(pretimeout_available_governors);
505
506 static ssize_t pretimeout_governor_show(struct device *dev,
507 struct device_attribute *attr,
508 char *buf)
509 {
510 struct watchdog_device *wdd = dev_get_drvdata(dev);
511
512 return watchdog_pretimeout_governor_get(wdd, buf);
513 }
514
515 static ssize_t pretimeout_governor_store(struct device *dev,
516 struct device_attribute *attr,
517 const char *buf, size_t count)
518 {
519 struct watchdog_device *wdd = dev_get_drvdata(dev);
520 int ret = watchdog_pretimeout_governor_set(wdd, buf);
521
522 if (!ret)
523 ret = count;
524
525 return ret;
526 }
527 static DEVICE_ATTR_RW(pretimeout_governor);
528
529 static umode_t wdt_is_visible(struct kobject *kobj, struct attribute *attr,
530 int n)
531 {
532 struct device *dev = container_of(kobj, struct device, kobj);
533 struct watchdog_device *wdd = dev_get_drvdata(dev);
534 umode_t mode = attr->mode;
535
536 if (attr == &dev_attr_timeleft.attr && !wdd->ops->get_timeleft)
537 mode = 0;
538 else if (attr == &dev_attr_pretimeout.attr &&
539 !(wdd->info->options & WDIOF_PRETIMEOUT))
540 mode = 0;
541 else if ((attr == &dev_attr_pretimeout_governor.attr ||
542 attr == &dev_attr_pretimeout_available_governors.attr) &&
543 (!(wdd->info->options & WDIOF_PRETIMEOUT) ||
544 !IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_GOV)))
545 mode = 0;
546
547 return mode;
548 }
549 static struct attribute *wdt_attrs[] = {
550 &dev_attr_state.attr,
551 &dev_attr_identity.attr,
552 &dev_attr_timeout.attr,
553 &dev_attr_pretimeout.attr,
554 &dev_attr_timeleft.attr,
555 &dev_attr_bootstatus.attr,
556 &dev_attr_status.attr,
557 &dev_attr_nowayout.attr,
558 &dev_attr_pretimeout_governor.attr,
559 &dev_attr_pretimeout_available_governors.attr,
560 NULL,
561 };
562
563 static const struct attribute_group wdt_group = {
564 .attrs = wdt_attrs,
565 .is_visible = wdt_is_visible,
566 };
567 __ATTRIBUTE_GROUPS(wdt);
568 #else
569 #define wdt_groups NULL
570 #endif
571
572 /*
573 * watchdog_ioctl_op: call the watchdog drivers ioctl op if defined
574 * @wdd: the watchdog device to do the ioctl on
575 * @cmd: watchdog command
576 * @arg: argument pointer
577 *
578 * The caller must hold wd_data->lock.
579 */
580
581 static int watchdog_ioctl_op(struct watchdog_device *wdd, unsigned int cmd,
582 unsigned long arg)
583 {
584 if (!wdd->ops->ioctl)
585 return -ENOIOCTLCMD;
586
587 return wdd->ops->ioctl(wdd, cmd, arg);
588 }
589
590 /*
591 * watchdog_write: writes to the watchdog.
592 * @file: file from VFS
593 * @data: user address of data
594 * @len: length of data
595 * @ppos: pointer to the file offset
596 *
597 * A write to a watchdog device is defined as a keepalive ping.
598 * Writing the magic 'V' sequence allows the next close to turn
599 * off the watchdog (if 'nowayout' is not set).
600 */
601
602 static ssize_t watchdog_write(struct file *file, const char __user *data,
603 size_t len, loff_t *ppos)
604 {
605 struct watchdog_core_data *wd_data = file->private_data;
606 struct watchdog_device *wdd;
607 int err;
608 size_t i;
609 char c;
610
611 if (len == 0)
612 return 0;
613
614 /*
615 * Note: just in case someone wrote the magic character
616 * five months ago...
617 */
618 clear_bit(_WDOG_ALLOW_RELEASE, &wd_data->status);
619
620 /* scan to see whether or not we got the magic character */
621 for (i = 0; i != len; i++) {
622 if (get_user(c, data + i))
623 return -EFAULT;
624 if (c == 'V')
625 set_bit(_WDOG_ALLOW_RELEASE, &wd_data->status);
626 }
627
628 /* someone wrote to us, so we send the watchdog a keepalive ping */
629
630 err = -ENODEV;
631 mutex_lock(&wd_data->lock);
632 wdd = wd_data->wdd;
633 if (wdd)
634 err = watchdog_ping(wdd);
635 mutex_unlock(&wd_data->lock);
636
637 if (err < 0)
638 return err;
639
640 return len;
641 }
642
643 /*
644 * watchdog_ioctl: handle the different ioctl's for the watchdog device.
645 * @file: file handle to the device
646 * @cmd: watchdog command
647 * @arg: argument pointer
648 *
649 * The watchdog API defines a common set of functions for all watchdogs
650 * according to their available features.
651 */
652
653 static long watchdog_ioctl(struct file *file, unsigned int cmd,
654 unsigned long arg)
655 {
656 struct watchdog_core_data *wd_data = file->private_data;
657 void __user *argp = (void __user *)arg;
658 struct watchdog_device *wdd;
659 int __user *p = argp;
660 unsigned int val;
661 int err;
662
663 mutex_lock(&wd_data->lock);
664
665 wdd = wd_data->wdd;
666 if (!wdd) {
667 err = -ENODEV;
668 goto out_ioctl;
669 }
670
671 err = watchdog_ioctl_op(wdd, cmd, arg);
672 if (err != -ENOIOCTLCMD)
673 goto out_ioctl;
674
675 switch (cmd) {
676 case WDIOC_GETSUPPORT:
677 err = copy_to_user(argp, wdd->info,
678 sizeof(struct watchdog_info)) ? -EFAULT : 0;
679 break;
680 case WDIOC_GETSTATUS:
681 val = watchdog_get_status(wdd);
682 err = put_user(val, p);
683 break;
684 case WDIOC_GETBOOTSTATUS:
685 err = put_user(wdd->bootstatus, p);
686 break;
687 case WDIOC_SETOPTIONS:
688 if (get_user(val, p)) {
689 err = -EFAULT;
690 break;
691 }
692 if (val & WDIOS_DISABLECARD) {
693 err = watchdog_stop(wdd);
694 if (err < 0)
695 break;
696 }
697 if (val & WDIOS_ENABLECARD)
698 err = watchdog_start(wdd);
699 break;
700 case WDIOC_KEEPALIVE:
701 if (!(wdd->info->options & WDIOF_KEEPALIVEPING)) {
702 err = -EOPNOTSUPP;
703 break;
704 }
705 err = watchdog_ping(wdd);
706 break;
707 case WDIOC_SETTIMEOUT:
708 if (get_user(val, p)) {
709 err = -EFAULT;
710 break;
711 }
712 err = watchdog_set_timeout(wdd, val);
713 if (err < 0)
714 break;
715 /* If the watchdog is active then we send a keepalive ping
716 * to make sure that the watchdog keep's running (and if
717 * possible that it takes the new timeout) */
718 err = watchdog_ping(wdd);
719 if (err < 0)
720 break;
721 /* Fall */
722 case WDIOC_GETTIMEOUT:
723 /* timeout == 0 means that we don't know the timeout */
724 if (wdd->timeout == 0) {
725 err = -EOPNOTSUPP;
726 break;
727 }
728 err = put_user(wdd->timeout, p);
729 break;
730 case WDIOC_GETTIMELEFT:
731 err = watchdog_get_timeleft(wdd, &val);
732 if (err < 0)
733 break;
734 err = put_user(val, p);
735 break;
736 case WDIOC_SETPRETIMEOUT:
737 if (get_user(val, p)) {
738 err = -EFAULT;
739 break;
740 }
741 err = watchdog_set_pretimeout(wdd, val);
742 break;
743 case WDIOC_GETPRETIMEOUT:
744 err = put_user(wdd->pretimeout, p);
745 break;
746 default:
747 err = -ENOTTY;
748 break;
749 }
750
751 out_ioctl:
752 mutex_unlock(&wd_data->lock);
753 return err;
754 }
755
756 /*
757 * watchdog_open: open the /dev/watchdog* devices.
758 * @inode: inode of device
759 * @file: file handle to device
760 *
761 * When the /dev/watchdog* device gets opened, we start the watchdog.
762 * Watch out: the /dev/watchdog device is single open, so we make sure
763 * it can only be opened once.
764 */
765
766 static int watchdog_open(struct inode *inode, struct file *file)
767 {
768 struct watchdog_core_data *wd_data;
769 struct watchdog_device *wdd;
770 bool hw_running;
771 int err;
772
773 /* Get the corresponding watchdog device */
774 if (imajor(inode) == MISC_MAJOR)
775 wd_data = old_wd_data;
776 else
777 wd_data = container_of(inode->i_cdev, struct watchdog_core_data,
778 cdev);
779
780 /* the watchdog is single open! */
781 if (test_and_set_bit(_WDOG_DEV_OPEN, &wd_data->status))
782 return -EBUSY;
783
784 wdd = wd_data->wdd;
785
786 /*
787 * If the /dev/watchdog device is open, we don't want the module
788 * to be unloaded.
789 */
790 hw_running = watchdog_hw_running(wdd);
791 if (!hw_running && !try_module_get(wdd->ops->owner)) {
792 err = -EBUSY;
793 goto out_clear;
794 }
795
796 err = watchdog_start(wdd);
797 if (err < 0)
798 goto out_mod;
799
800 file->private_data = wd_data;
801
802 if (!hw_running)
803 get_device(&wd_data->dev);
804
805 /* dev/watchdog is a virtual (and thus non-seekable) filesystem */
806 return nonseekable_open(inode, file);
807
808 out_mod:
809 module_put(wd_data->wdd->ops->owner);
810 out_clear:
811 clear_bit(_WDOG_DEV_OPEN, &wd_data->status);
812 return err;
813 }
814
815 static void watchdog_core_data_release(struct device *dev)
816 {
817 struct watchdog_core_data *wd_data;
818
819 wd_data = container_of(dev, struct watchdog_core_data, dev);
820
821 kfree(wd_data);
822 }
823
824 /*
825 * watchdog_release: release the watchdog device.
826 * @inode: inode of device
827 * @file: file handle to device
828 *
829 * This is the code for when /dev/watchdog gets closed. We will only
830 * stop the watchdog when we have received the magic char (and nowayout
831 * was not set), else the watchdog will keep running.
832 */
833
834 static int watchdog_release(struct inode *inode, struct file *file)
835 {
836 struct watchdog_core_data *wd_data = file->private_data;
837 struct watchdog_device *wdd;
838 int err = -EBUSY;
839 bool running;
840
841 mutex_lock(&wd_data->lock);
842
843 wdd = wd_data->wdd;
844 if (!wdd)
845 goto done;
846
847 /*
848 * We only stop the watchdog if we received the magic character
849 * or if WDIOF_MAGICCLOSE is not set. If nowayout was set then
850 * watchdog_stop will fail.
851 */
852 if (!test_bit(WDOG_ACTIVE, &wdd->status))
853 err = 0;
854 else if (test_and_clear_bit(_WDOG_ALLOW_RELEASE, &wd_data->status) ||
855 !(wdd->info->options & WDIOF_MAGICCLOSE))
856 err = watchdog_stop(wdd);
857
858 /* If the watchdog was not stopped, send a keepalive ping */
859 if (err < 0) {
860 pr_crit("watchdog%d: watchdog did not stop!\n", wdd->id);
861 watchdog_ping(wdd);
862 }
863
864 watchdog_update_worker(wdd);
865
866 /* make sure that /dev/watchdog can be re-opened */
867 clear_bit(_WDOG_DEV_OPEN, &wd_data->status);
868
869 done:
870 running = wdd && watchdog_hw_running(wdd);
871 mutex_unlock(&wd_data->lock);
872 /*
873 * Allow the owner module to be unloaded again unless the watchdog
874 * is still running. If the watchdog is still running, it can not
875 * be stopped, and its driver must not be unloaded.
876 */
877 if (!running) {
878 module_put(wd_data->cdev.owner);
879 put_device(&wd_data->dev);
880 }
881 return 0;
882 }
883
884 static const struct file_operations watchdog_fops = {
885 .owner = THIS_MODULE,
886 .write = watchdog_write,
887 .unlocked_ioctl = watchdog_ioctl,
888 .open = watchdog_open,
889 .release = watchdog_release,
890 };
891
892 static struct miscdevice watchdog_miscdev = {
893 .minor = WATCHDOG_MINOR,
894 .name = "watchdog",
895 .fops = &watchdog_fops,
896 };
897
898 static struct class watchdog_class = {
899 .name = "watchdog",
900 .owner = THIS_MODULE,
901 .dev_groups = wdt_groups,
902 };
903
904 /*
905 * watchdog_cdev_register: register watchdog character device
906 * @wdd: watchdog device
907 *
908 * Register a watchdog character device including handling the legacy
909 * /dev/watchdog node. /dev/watchdog is actually a miscdevice and
910 * thus we set it up like that.
911 */
912
913 static int watchdog_cdev_register(struct watchdog_device *wdd)
914 {
915 struct watchdog_core_data *wd_data;
916 int err;
917
918 wd_data = kzalloc(sizeof(struct watchdog_core_data), GFP_KERNEL);
919 if (!wd_data)
920 return -ENOMEM;
921 mutex_init(&wd_data->lock);
922
923 wd_data->wdd = wdd;
924 wdd->wd_data = wd_data;
925
926 if (!watchdog_wq)
927 return -ENODEV;
928
929 INIT_DELAYED_WORK(&wd_data->work, watchdog_ping_work);
930
931 if (wdd->id == 0) {
932 old_wd_data = wd_data;
933 watchdog_miscdev.parent = wdd->parent;
934 err = misc_register(&watchdog_miscdev);
935 if (err != 0) {
936 pr_err("%s: cannot register miscdev on minor=%d (err=%d).\n",
937 wdd->info->identity, WATCHDOG_MINOR, err);
938 if (err == -EBUSY)
939 pr_err("%s: a legacy watchdog module is probably present.\n",
940 wdd->info->identity);
941 old_wd_data = NULL;
942 kfree(wd_data);
943 return err;
944 }
945 }
946
947 device_initialize(&wd_data->dev);
948 wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
949 wd_data->dev.class = &watchdog_class;
950 wd_data->dev.parent = wdd->parent;
951 wd_data->dev.groups = wdd->groups;
952 wd_data->dev.release = watchdog_core_data_release;
953 dev_set_drvdata(&wd_data->dev, wdd);
954 dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
955
956 /* Fill in the data structures */
957 cdev_init(&wd_data->cdev, &watchdog_fops);
958
959 /* Add the device */
960 err = cdev_device_add(&wd_data->cdev, &wd_data->dev);
961 if (err) {
962 pr_err("watchdog%d unable to add device %d:%d\n",
963 wdd->id, MAJOR(watchdog_devt), wdd->id);
964 if (wdd->id == 0) {
965 misc_deregister(&watchdog_miscdev);
966 old_wd_data = NULL;
967 put_device(&wd_data->dev);
968 }
969 return err;
970 }
971
972 wd_data->cdev.owner = wdd->ops->owner;
973
974 /* Record time of most recent heartbeat as 'just before now'. */
975 wd_data->last_hw_keepalive = jiffies - 1;
976
977 /*
978 * If the watchdog is running, prevent its driver from being unloaded,
979 * and schedule an immediate ping.
980 */
981 if (watchdog_hw_running(wdd)) {
982 __module_get(wdd->ops->owner);
983 get_device(&wd_data->dev);
984 if (handle_boot_enabled)
985 queue_delayed_work(watchdog_wq, &wd_data->work, 0);
986 else
987 pr_info("watchdog%d running and kernel based pre-userspace handler disabled\n",
988 wdd->id);
989 }
990
991 return 0;
992 }
993
994 /*
995 * watchdog_cdev_unregister: unregister watchdog character device
996 * @watchdog: watchdog device
997 *
998 * Unregister watchdog character device and if needed the legacy
999 * /dev/watchdog device.
1000 */
1001
1002 static void watchdog_cdev_unregister(struct watchdog_device *wdd)
1003 {
1004 struct watchdog_core_data *wd_data = wdd->wd_data;
1005
1006 cdev_device_del(&wd_data->cdev, &wd_data->dev);
1007 if (wdd->id == 0) {
1008 misc_deregister(&watchdog_miscdev);
1009 old_wd_data = NULL;
1010 }
1011
1012 if (watchdog_active(wdd) &&
1013 test_bit(WDOG_STOP_ON_UNREGISTER, &wdd->status)) {
1014 watchdog_stop(wdd);
1015 }
1016
1017 mutex_lock(&wd_data->lock);
1018 wd_data->wdd = NULL;
1019 wdd->wd_data = NULL;
1020 mutex_unlock(&wd_data->lock);
1021
1022 cancel_delayed_work_sync(&wd_data->work);
1023
1024 put_device(&wd_data->dev);
1025 }
1026
1027 /*
1028 * watchdog_dev_register: register a watchdog device
1029 * @wdd: watchdog device
1030 *
1031 * Register a watchdog device including handling the legacy
1032 * /dev/watchdog node. /dev/watchdog is actually a miscdevice and
1033 * thus we set it up like that.
1034 */
1035
1036 int watchdog_dev_register(struct watchdog_device *wdd)
1037 {
1038 int ret;
1039
1040 ret = watchdog_cdev_register(wdd);
1041 if (ret)
1042 return ret;
1043
1044 ret = watchdog_register_pretimeout(wdd);
1045 if (ret)
1046 watchdog_cdev_unregister(wdd);
1047
1048 return ret;
1049 }
1050
1051 /*
1052 * watchdog_dev_unregister: unregister a watchdog device
1053 * @watchdog: watchdog device
1054 *
1055 * Unregister watchdog device and if needed the legacy
1056 * /dev/watchdog device.
1057 */
1058
1059 void watchdog_dev_unregister(struct watchdog_device *wdd)
1060 {
1061 watchdog_unregister_pretimeout(wdd);
1062 watchdog_cdev_unregister(wdd);
1063 }
1064
1065 /*
1066 * watchdog_dev_init: init dev part of watchdog core
1067 *
1068 * Allocate a range of chardev nodes to use for watchdog devices
1069 */
1070
1071 int __init watchdog_dev_init(void)
1072 {
1073 int err;
1074
1075 watchdog_wq = alloc_workqueue("watchdogd",
1076 WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
1077 if (!watchdog_wq) {
1078 pr_err("Failed to create watchdog workqueue\n");
1079 return -ENOMEM;
1080 }
1081
1082 err = class_register(&watchdog_class);
1083 if (err < 0) {
1084 pr_err("couldn't register class\n");
1085 goto err_register;
1086 }
1087
1088 err = alloc_chrdev_region(&watchdog_devt, 0, MAX_DOGS, "watchdog");
1089 if (err < 0) {
1090 pr_err("watchdog: unable to allocate char dev region\n");
1091 goto err_alloc;
1092 }
1093
1094 return 0;
1095
1096 err_alloc:
1097 class_unregister(&watchdog_class);
1098 err_register:
1099 destroy_workqueue(watchdog_wq);
1100 return err;
1101 }
1102
1103 /*
1104 * watchdog_dev_exit: exit dev part of watchdog core
1105 *
1106 * Release the range of chardev nodes used for watchdog devices
1107 */
1108
1109 void __exit watchdog_dev_exit(void)
1110 {
1111 unregister_chrdev_region(watchdog_devt, MAX_DOGS);
1112 class_unregister(&watchdog_class);
1113 destroy_workqueue(watchdog_wq);
1114 }
1115
1116 module_param(handle_boot_enabled, bool, 0444);
1117 MODULE_PARM_DESC(handle_boot_enabled,
1118 "Watchdog core auto-updates boot enabled watchdogs before userspace takes over (default="
1119 __MODULE_STRING(IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED)) ")");