]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - block/genhd.c
nvme-pci: look for StorageD3Enable on companion ACPI device instead
[mirror_ubuntu-hirsute-kernel.git] / block / genhd.c
CommitLineData
3dcf60bc 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * gendisk handling
7b51e703
CH
4 *
5 * Portions Copyright (C) 2020 Christoph Hellwig
1da177e4
LT
6 */
7
1da177e4 8#include <linux/module.h>
3ad5cee5 9#include <linux/ctype.h>
1da177e4
LT
10#include <linux/fs.h>
11#include <linux/genhd.h>
b446b60e 12#include <linux/kdev_t.h>
1da177e4
LT
13#include <linux/kernel.h>
14#include <linux/blkdev.h>
66114cad 15#include <linux/backing-dev.h>
1da177e4
LT
16#include <linux/init.h>
17#include <linux/spinlock.h>
f500975a 18#include <linux/proc_fs.h>
1da177e4
LT
19#include <linux/seq_file.h>
20#include <linux/slab.h>
21#include <linux/kmod.h>
58383af6 22#include <linux/mutex.h>
bcce3de1 23#include <linux/idr.h>
77ea887e 24#include <linux/log2.h>
25e823c8 25#include <linux/pm_runtime.h>
99e6608c 26#include <linux/badblocks.h>
1da177e4 27
ff88972c
AB
28#include "blk.h"
29
31eb6186 30static struct kobject *block_depr;
1da177e4 31
22ae8ce8 32DECLARE_RWSEM(bdev_lookup_sem);
62b508f8 33
bcce3de1 34/* for extended dynamic devt allocation, currently only one major is used */
ce23bba8 35#define NR_EXT_DEVT (1 << MINORBITS)
22ae8ce8 36static DEFINE_IDA(ext_devt_ida);
bcce3de1 37
12c2bdb2
DB
38static void disk_check_events(struct disk_events *ev,
39 unsigned int *clearing_ptr);
9f53d2fe 40static void disk_alloc_events(struct gendisk *disk);
77ea887e
TH
41static void disk_add_events(struct gendisk *disk);
42static void disk_del_events(struct gendisk *disk);
43static void disk_release_events(struct gendisk *disk);
44
a782483c
CH
45void set_capacity(struct gendisk *disk, sector_t sectors)
46{
cb8432d6 47 struct block_device *bdev = disk->part0;
0fe37724 48 unsigned long flags;
a782483c 49
0fe37724 50 spin_lock_irqsave(&bdev->bd_size_lock, flags);
a782483c 51 i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
0fe37724 52 spin_unlock_irqrestore(&bdev->bd_size_lock, flags);
a782483c
CH
53}
54EXPORT_SYMBOL(set_capacity);
55
e598a72f 56/*
449f4ec9
CH
57 * Set disk capacity and notify if the size is not currently zero and will not
58 * be set to zero. Returns true if a uevent was sent, otherwise false.
e598a72f 59 */
449f4ec9 60bool set_capacity_and_notify(struct gendisk *disk, sector_t size)
e598a72f
BS
61{
62 sector_t capacity = get_capacity(disk);
a782483c 63 char *envp[] = { "RESIZE=1", NULL };
e598a72f
BS
64
65 set_capacity(disk, size);
e598a72f 66
a782483c
CH
67 /*
68 * Only print a message and send a uevent if the gendisk is user visible
69 * and alive. This avoids spamming the log and udev when setting the
70 * initial capacity during probing.
71 */
72 if (size == capacity ||
73 (disk->flags & (GENHD_FL_UP | GENHD_FL_HIDDEN)) != GENHD_FL_UP)
74 return false;
e598a72f 75
a782483c 76 pr_info("%s: detected capacity change from %lld to %lld\n",
2703fe1d 77 disk->disk_name, capacity, size);
7e890c37 78
a782483c
CH
79 /*
80 * Historically we did not send a uevent for changes to/from an empty
81 * device.
82 */
83 if (!capacity || !size)
84 return false;
85 kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
86 return true;
e598a72f 87}
449f4ec9 88EXPORT_SYMBOL_GPL(set_capacity_and_notify);
e598a72f 89
5cbd28e3
CH
90/*
91 * Format the device name of the indicated disk into the supplied buffer and
92 * return a pointer to that same buffer for convenience.
93 */
94char *disk_name(struct gendisk *hd, int partno, char *buf)
95{
96 if (!partno)
97 snprintf(buf, BDEVNAME_SIZE, "%s", hd->disk_name);
98 else if (isdigit(hd->disk_name[strlen(hd->disk_name)-1]))
99 snprintf(buf, BDEVNAME_SIZE, "%sp%d", hd->disk_name, partno);
100 else
101 snprintf(buf, BDEVNAME_SIZE, "%s%d", hd->disk_name, partno);
102
103 return buf;
104}
105
106const char *bdevname(struct block_device *bdev, char *buf)
107{
8a63a86e 108 return disk_name(bdev->bd_disk, bdev->bd_partno, buf);
5cbd28e3
CH
109}
110EXPORT_SYMBOL(bdevname);
e598a72f 111
0d02129e
CH
112static void part_stat_read_all(struct block_device *part,
113 struct disk_stats *stat)
ea18e0f0
KK
114{
115 int cpu;
116
117 memset(stat, 0, sizeof(struct disk_stats));
118 for_each_possible_cpu(cpu) {
0d02129e 119 struct disk_stats *ptr = per_cpu_ptr(part->bd_stats, cpu);
ea18e0f0
KK
120 int group;
121
122 for (group = 0; group < NR_STAT_GROUPS; group++) {
123 stat->nsecs[group] += ptr->nsecs[group];
124 stat->sectors[group] += ptr->sectors[group];
125 stat->ios[group] += ptr->ios[group];
126 stat->merges[group] += ptr->merges[group];
127 }
128
129 stat->io_ticks += ptr->io_ticks;
ea18e0f0
KK
130 }
131}
ea18e0f0 132
8446fe92 133static unsigned int part_in_flight(struct block_device *part)
f299b7c7 134{
b2f609e1 135 unsigned int inflight = 0;
1226b8dd 136 int cpu;
f299b7c7 137
1226b8dd 138 for_each_possible_cpu(cpu) {
e016b782
MP
139 inflight += part_stat_local_read_cpu(part, in_flight[0], cpu) +
140 part_stat_local_read_cpu(part, in_flight[1], cpu);
1226b8dd 141 }
e016b782
MP
142 if ((int)inflight < 0)
143 inflight = 0;
1226b8dd 144
e016b782 145 return inflight;
f299b7c7
JA
146}
147
8446fe92
CH
148static void part_in_flight_rw(struct block_device *part,
149 unsigned int inflight[2])
bf0ddaba 150{
1226b8dd
MP
151 int cpu;
152
1226b8dd
MP
153 inflight[0] = 0;
154 inflight[1] = 0;
155 for_each_possible_cpu(cpu) {
156 inflight[0] += part_stat_local_read_cpu(part, in_flight[0], cpu);
157 inflight[1] += part_stat_local_read_cpu(part, in_flight[1], cpu);
158 }
159 if ((int)inflight[0] < 0)
160 inflight[0] = 0;
161 if ((int)inflight[1] < 0)
162 inflight[1] = 0;
bf0ddaba
OS
163}
164
8446fe92 165struct block_device *__disk_get_part(struct gendisk *disk, int partno)
807d4af2
CH
166{
167 struct disk_part_tbl *ptbl = rcu_dereference(disk->part_tbl);
168
169 if (unlikely(partno < 0 || partno >= ptbl->len))
170 return NULL;
171 return rcu_dereference(ptbl->part[partno]);
172}
173
e71bf0d0
TH
174/**
175 * disk_part_iter_init - initialize partition iterator
176 * @piter: iterator to initialize
177 * @disk: disk to iterate over
178 * @flags: DISK_PITER_* flags
179 *
180 * Initialize @piter so that it iterates over partitions of @disk.
181 *
182 * CONTEXT:
183 * Don't care.
184 */
185void disk_part_iter_init(struct disk_part_iter *piter, struct gendisk *disk,
186 unsigned int flags)
187{
540eed56
TH
188 struct disk_part_tbl *ptbl;
189
190 rcu_read_lock();
191 ptbl = rcu_dereference(disk->part_tbl);
192
e71bf0d0
TH
193 piter->disk = disk;
194 piter->part = NULL;
195
196 if (flags & DISK_PITER_REVERSE)
540eed56 197 piter->idx = ptbl->len - 1;
71982a40 198 else if (flags & (DISK_PITER_INCL_PART0 | DISK_PITER_INCL_EMPTY_PART0))
e71bf0d0 199 piter->idx = 0;
b5d0b9df
TH
200 else
201 piter->idx = 1;
e71bf0d0
TH
202
203 piter->flags = flags;
540eed56
TH
204
205 rcu_read_unlock();
e71bf0d0
TH
206}
207EXPORT_SYMBOL_GPL(disk_part_iter_init);
208
209/**
210 * disk_part_iter_next - proceed iterator to the next partition and return it
211 * @piter: iterator of interest
212 *
213 * Proceed @piter to the next partition and return it.
214 *
215 * CONTEXT:
216 * Don't care.
217 */
ad1eaa53 218struct block_device *disk_part_iter_next(struct disk_part_iter *piter)
e71bf0d0 219{
540eed56 220 struct disk_part_tbl *ptbl;
e71bf0d0
TH
221 int inc, end;
222
223 /* put the last partition */
e79319af 224 disk_part_iter_exit(piter);
e71bf0d0 225
540eed56 226 /* get part_tbl */
e71bf0d0 227 rcu_read_lock();
540eed56 228 ptbl = rcu_dereference(piter->disk->part_tbl);
e71bf0d0
TH
229
230 /* determine iteration parameters */
231 if (piter->flags & DISK_PITER_REVERSE) {
232 inc = -1;
71982a40
TH
233 if (piter->flags & (DISK_PITER_INCL_PART0 |
234 DISK_PITER_INCL_EMPTY_PART0))
b5d0b9df
TH
235 end = -1;
236 else
237 end = 0;
e71bf0d0
TH
238 } else {
239 inc = 1;
540eed56 240 end = ptbl->len;
e71bf0d0
TH
241 }
242
243 /* iterate to the next partition */
244 for (; piter->idx != end; piter->idx += inc) {
8446fe92 245 struct block_device *part;
e71bf0d0 246
540eed56 247 part = rcu_dereference(ptbl->part[piter->idx]);
e71bf0d0
TH
248 if (!part)
249 continue;
aebf5db9
ML
250 piter->part = bdgrab(part);
251 if (!piter->part)
252 continue;
8446fe92 253 if (!bdev_nr_sectors(part) &&
71982a40
TH
254 !(piter->flags & DISK_PITER_INCL_EMPTY) &&
255 !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
aebf5db9
ML
256 piter->idx == 0)) {
257 bdput(piter->part);
258 piter->part = NULL;
e71bf0d0 259 continue;
aebf5db9 260 }
e71bf0d0 261
e71bf0d0
TH
262 piter->idx += inc;
263 break;
264 }
265
266 rcu_read_unlock();
267
268 return piter->part;
269}
270EXPORT_SYMBOL_GPL(disk_part_iter_next);
271
272/**
273 * disk_part_iter_exit - finish up partition iteration
274 * @piter: iter of interest
275 *
276 * Called when iteration is over. Cleans up @piter.
277 *
278 * CONTEXT:
279 * Don't care.
280 */
281void disk_part_iter_exit(struct disk_part_iter *piter)
282{
ad1eaa53
CH
283 if (piter->part)
284 bdput(piter->part);
e71bf0d0
TH
285 piter->part = NULL;
286}
287EXPORT_SYMBOL_GPL(disk_part_iter_exit);
288
8446fe92 289static inline int sector_in_part(struct block_device *part, sector_t sector)
a6f23657 290{
8446fe92
CH
291 return part->bd_start_sect <= sector &&
292 sector < part->bd_start_sect + bdev_nr_sectors(part);
a6f23657
JA
293}
294
e71bf0d0
TH
295/**
296 * disk_map_sector_rcu - map sector to partition
297 * @disk: gendisk of interest
298 * @sector: sector to map
299 *
300 * Find out which partition @sector maps to on @disk. This is
301 * primarily used for stats accounting.
302 *
303 * CONTEXT:
cb8432d6 304 * RCU read locked.
e71bf0d0
TH
305 *
306 * RETURNS:
074a7aca 307 * Found partition on success, part0 is returned if no partition matches
b7d6c303 308 * or the matched partition is being deleted.
e71bf0d0 309 */
8446fe92 310struct block_device *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
e71bf0d0 311{
540eed56 312 struct disk_part_tbl *ptbl;
8446fe92 313 struct block_device *part;
e71bf0d0
TH
314 int i;
315
8ab1d40a 316 rcu_read_lock();
540eed56
TH
317 ptbl = rcu_dereference(disk->part_tbl);
318
a6f23657 319 part = rcu_dereference(ptbl->last_lookup);
cb8432d6 320 if (part && sector_in_part(part, sector))
8ab1d40a 321 goto out_unlock;
a6f23657 322
540eed56 323 for (i = 1; i < ptbl->len; i++) {
a6f23657 324 part = rcu_dereference(ptbl->part[i]);
a6f23657
JA
325 if (part && sector_in_part(part, sector)) {
326 rcu_assign_pointer(ptbl->last_lookup, part);
8ab1d40a 327 goto out_unlock;
a6f23657 328 }
e71bf0d0 329 }
8ab1d40a 330
8446fe92 331 part = disk->part0;
8ab1d40a
KK
332out_unlock:
333 rcu_read_unlock();
334 return part;
e71bf0d0 335}
d6d18d17 336EXPORT_SYMBOL_GPL(disk_map_sector_rcu);
e71bf0d0 337
b53df2e7
SK
338/**
339 * disk_has_partitions
340 * @disk: gendisk of interest
341 *
342 * Walk through the partition table and check if valid partition exists.
343 *
344 * CONTEXT:
345 * Don't care.
346 *
347 * RETURNS:
348 * True if the gendisk has at least one valid non-zero size partition.
349 * Otherwise false.
350 */
351bool disk_has_partitions(struct gendisk *disk)
352{
353 struct disk_part_tbl *ptbl;
354 int i;
355 bool ret = false;
356
357 rcu_read_lock();
358 ptbl = rcu_dereference(disk->part_tbl);
359
360 /* Iterate partitions skipping the whole device at index 0 */
361 for (i = 1; i < ptbl->len; i++) {
362 if (rcu_dereference(ptbl->part[i])) {
363 ret = true;
364 break;
365 }
366 }
367
368 rcu_read_unlock();
369
370 return ret;
371}
372EXPORT_SYMBOL_GPL(disk_has_partitions);
373
1da177e4
LT
374/*
375 * Can be deleted altogether. Later.
376 *
377 */
133d55cd 378#define BLKDEV_MAJOR_HASH_SIZE 255
1da177e4
LT
379static struct blk_major_name {
380 struct blk_major_name *next;
381 int major;
382 char name[16];
a160c615 383 void (*probe)(dev_t devt);
68eef3b4 384} *major_names[BLKDEV_MAJOR_HASH_SIZE];
e49fbbbf 385static DEFINE_MUTEX(major_names_lock);
1da177e4
LT
386
387/* index in the above - for now: assume no multimajor ranges */
e61eb2e9 388static inline int major_to_index(unsigned major)
1da177e4 389{
68eef3b4 390 return major % BLKDEV_MAJOR_HASH_SIZE;
7170be5f
NH
391}
392
68eef3b4 393#ifdef CONFIG_PROC_FS
cf771cb5 394void blkdev_show(struct seq_file *seqf, off_t offset)
7170be5f 395{
68eef3b4 396 struct blk_major_name *dp;
7170be5f 397
e49fbbbf 398 mutex_lock(&major_names_lock);
133d55cd
LG
399 for (dp = major_names[major_to_index(offset)]; dp; dp = dp->next)
400 if (dp->major == offset)
cf771cb5 401 seq_printf(seqf, "%3d %s\n", dp->major, dp->name);
e49fbbbf 402 mutex_unlock(&major_names_lock);
1da177e4 403}
68eef3b4 404#endif /* CONFIG_PROC_FS */
1da177e4 405
9e8c0bcc 406/**
e2b6b301 407 * __register_blkdev - register a new block device
9e8c0bcc 408 *
f33ff110
SB
409 * @major: the requested major device number [1..BLKDEV_MAJOR_MAX-1]. If
410 * @major = 0, try to allocate any unused major number.
9e8c0bcc 411 * @name: the name of the new block device as a zero terminated string
e2b6b301 412 * @probe: allback that is called on access to any minor number of @major
9e8c0bcc
MN
413 *
414 * The @name must be unique within the system.
415 *
0e056eb5
MCC
416 * The return value depends on the @major input parameter:
417 *
f33ff110
SB
418 * - if a major device number was requested in range [1..BLKDEV_MAJOR_MAX-1]
419 * then the function returns zero on success, or a negative error code
0e056eb5 420 * - if any unused major number was requested with @major = 0 parameter
9e8c0bcc 421 * then the return value is the allocated major number in range
f33ff110
SB
422 * [1..BLKDEV_MAJOR_MAX-1] or a negative error code otherwise
423 *
424 * See Documentation/admin-guide/devices.txt for the list of allocated
425 * major numbers.
e2b6b301
CH
426 *
427 * Use register_blkdev instead for any new code.
9e8c0bcc 428 */
a160c615
CH
429int __register_blkdev(unsigned int major, const char *name,
430 void (*probe)(dev_t devt))
1da177e4
LT
431{
432 struct blk_major_name **n, *p;
433 int index, ret = 0;
434
e49fbbbf 435 mutex_lock(&major_names_lock);
1da177e4
LT
436
437 /* temporary */
438 if (major == 0) {
439 for (index = ARRAY_SIZE(major_names)-1; index > 0; index--) {
440 if (major_names[index] == NULL)
441 break;
442 }
443
444 if (index == 0) {
dfc76d11
KP
445 printk("%s: failed to get major for %s\n",
446 __func__, name);
1da177e4
LT
447 ret = -EBUSY;
448 goto out;
449 }
450 major = index;
451 ret = major;
452 }
453
133d55cd 454 if (major >= BLKDEV_MAJOR_MAX) {
dfc76d11
KP
455 pr_err("%s: major requested (%u) is greater than the maximum (%u) for %s\n",
456 __func__, major, BLKDEV_MAJOR_MAX-1, name);
133d55cd
LG
457
458 ret = -EINVAL;
459 goto out;
460 }
461
1da177e4
LT
462 p = kmalloc(sizeof(struct blk_major_name), GFP_KERNEL);
463 if (p == NULL) {
464 ret = -ENOMEM;
465 goto out;
466 }
467
468 p->major = major;
a160c615 469 p->probe = probe;
1da177e4
LT
470 strlcpy(p->name, name, sizeof(p->name));
471 p->next = NULL;
472 index = major_to_index(major);
473
474 for (n = &major_names[index]; *n; n = &(*n)->next) {
475 if ((*n)->major == major)
476 break;
477 }
478 if (!*n)
479 *n = p;
480 else
481 ret = -EBUSY;
482
483 if (ret < 0) {
f33ff110 484 printk("register_blkdev: cannot get major %u for %s\n",
1da177e4
LT
485 major, name);
486 kfree(p);
487 }
488out:
e49fbbbf 489 mutex_unlock(&major_names_lock);
1da177e4
LT
490 return ret;
491}
a160c615 492EXPORT_SYMBOL(__register_blkdev);
1da177e4 493
f4480240 494void unregister_blkdev(unsigned int major, const char *name)
1da177e4
LT
495{
496 struct blk_major_name **n;
497 struct blk_major_name *p = NULL;
498 int index = major_to_index(major);
1da177e4 499
e49fbbbf 500 mutex_lock(&major_names_lock);
1da177e4
LT
501 for (n = &major_names[index]; *n; n = &(*n)->next)
502 if ((*n)->major == major)
503 break;
294462a5
AM
504 if (!*n || strcmp((*n)->name, name)) {
505 WARN_ON(1);
294462a5 506 } else {
1da177e4
LT
507 p = *n;
508 *n = p->next;
509 }
e49fbbbf 510 mutex_unlock(&major_names_lock);
1da177e4 511 kfree(p);
1da177e4
LT
512}
513
514EXPORT_SYMBOL(unregister_blkdev);
515
870d6656
TH
516/**
517 * blk_mangle_minor - scatter minor numbers apart
518 * @minor: minor number to mangle
519 *
520 * Scatter consecutively allocated @minor number apart if MANGLE_DEVT
521 * is enabled. Mangling twice gives the original value.
522 *
523 * RETURNS:
524 * Mangled value.
525 *
526 * CONTEXT:
527 * Don't care.
528 */
529static int blk_mangle_minor(int minor)
530{
531#ifdef CONFIG_DEBUG_BLOCK_EXT_DEVT
532 int i;
533
534 for (i = 0; i < MINORBITS / 2; i++) {
535 int low = minor & (1 << i);
536 int high = minor & (1 << (MINORBITS - 1 - i));
537 int distance = MINORBITS - 1 - 2 * i;
538
539 minor ^= low | high; /* clear both bits */
540 low <<= distance; /* swap the positions */
541 high >>= distance;
542 minor |= low | high; /* and set */
543 }
544#endif
545 return minor;
546}
547
bcce3de1 548/**
9fc995a6
CH
549 * blk_alloc_devt - allocate a dev_t for a block device
550 * @bdev: block device to allocate dev_t for
bcce3de1
TH
551 * @devt: out parameter for resulting dev_t
552 *
553 * Allocate a dev_t for block device.
554 *
555 * RETURNS:
556 * 0 on success, allocated dev_t is returned in *@devt. -errno on
557 * failure.
558 *
559 * CONTEXT:
560 * Might sleep.
561 */
9fc995a6 562int blk_alloc_devt(struct block_device *bdev, dev_t *devt)
bcce3de1 563{
9fc995a6 564 struct gendisk *disk = bdev->bd_disk;
bab998d6 565 int idx;
bcce3de1
TH
566
567 /* in consecutive minor range? */
9fc995a6
CH
568 if (bdev->bd_partno < disk->minors) {
569 *devt = MKDEV(disk->major, disk->first_minor + bdev->bd_partno);
bcce3de1
TH
570 return 0;
571 }
572
22ae8ce8 573 idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT, GFP_KERNEL);
bab998d6
TH
574 if (idx < 0)
575 return idx == -ENOSPC ? -EBUSY : idx;
bcce3de1 576
870d6656 577 *devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx));
bcce3de1
TH
578 return 0;
579}
580
581/**
582 * blk_free_devt - free a dev_t
583 * @devt: dev_t to free
584 *
585 * Free @devt which was allocated using blk_alloc_devt().
586 *
587 * CONTEXT:
588 * Might sleep.
589 */
590void blk_free_devt(dev_t devt)
591{
22ae8ce8
CH
592 if (MAJOR(devt) == BLOCK_EXT_MAJOR)
593 ida_free(&ext_devt_ida, blk_mangle_minor(MINOR(devt)));
6fcc44d1
YY
594}
595
1f014290
TH
596static char *bdevt_str(dev_t devt, char *buf)
597{
598 if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) {
599 char tbuf[BDEVT_SIZE];
600 snprintf(tbuf, BDEVT_SIZE, "%02x%02x", MAJOR(devt), MINOR(devt));
601 snprintf(buf, BDEVT_SIZE, "%-9s", tbuf);
602 } else
603 snprintf(buf, BDEVT_SIZE, "%03x:%05x", MAJOR(devt), MINOR(devt));
604
605 return buf;
606}
607
9301fe73
CH
608static void disk_scan_partitions(struct gendisk *disk)
609{
610 struct block_device *bdev;
611
612 if (!get_capacity(disk) || !disk_part_scan_enabled(disk))
613 return;
614
615 set_bit(GD_NEED_PART_SCAN, &disk->state);
616 bdev = blkdev_get_by_dev(disk_devt(disk), FMODE_READ, NULL);
617 if (!IS_ERR(bdev))
618 blkdev_put(bdev, FMODE_READ);
619}
620
fef912bf
HR
621static void register_disk(struct device *parent, struct gendisk *disk,
622 const struct attribute_group **groups)
d2bf1b67
TH
623{
624 struct device *ddev = disk_to_dev(disk);
d2bf1b67 625 struct disk_part_iter piter;
ad1eaa53 626 struct block_device *part;
d2bf1b67
TH
627 int err;
628
e63a46be 629 ddev->parent = parent;
d2bf1b67 630
ffc8b308 631 dev_set_name(ddev, "%s", disk->disk_name);
d2bf1b67
TH
632
633 /* delay uevents, until we scanned partition table */
634 dev_set_uevent_suppress(ddev, 1);
635
fef912bf
HR
636 if (groups) {
637 WARN_ON(ddev->groups);
638 ddev->groups = groups;
639 }
d2bf1b67
TH
640 if (device_add(ddev))
641 return;
642 if (!sysfs_deprecated) {
643 err = sysfs_create_link(block_depr, &ddev->kobj,
644 kobject_name(&ddev->kobj));
645 if (err) {
646 device_del(ddev);
647 return;
648 }
649 }
25e823c8
ML
650
651 /*
652 * avoid probable deadlock caused by allocating memory with
653 * GFP_KERNEL in runtime_resume callback of its all ancestor
654 * devices
655 */
656 pm_runtime_set_memalloc_noio(ddev, true);
657
cb8432d6
CH
658 disk->part0->bd_holder_dir =
659 kobject_create_and_add("holders", &ddev->kobj);
d2bf1b67
TH
660 disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
661
c8c5199a 662 if (disk->flags & GENHD_FL_HIDDEN)
8ddcd653 663 return;
8ddcd653 664
9301fe73 665 disk_scan_partitions(disk);
d2bf1b67 666
d2bf1b67
TH
667 /* announce disk after possible partitions are created */
668 dev_set_uevent_suppress(ddev, 0);
669 kobject_uevent(&ddev->kobj, KOBJ_ADD);
670
671 /* announce possible partitions */
672 disk_part_iter_init(&piter, disk, 0);
673 while ((part = disk_part_iter_next(&piter)))
ad1eaa53 674 kobject_uevent(bdev_kobj(part), KOBJ_ADD);
d2bf1b67 675 disk_part_iter_exit(&piter);
8ddcd653 676
4d7c1d3f 677 if (disk->queue->backing_dev_info->dev) {
678 err = sysfs_create_link(&ddev->kobj,
679 &disk->queue->backing_dev_info->dev->kobj,
680 "bdi");
681 WARN_ON(err);
682 }
d2bf1b67
TH
683}
684
1da177e4 685/**
fa70d2e2 686 * __device_add_disk - add disk information to kernel list
e63a46be 687 * @parent: parent device for the disk
1da177e4 688 * @disk: per-device partitioning information
fef912bf 689 * @groups: Additional per-device sysfs groups
fa70d2e2 690 * @register_queue: register the queue if set to true
1da177e4
LT
691 *
692 * This function registers the partitioning information in @disk
693 * with the kernel.
3e1a7ff8
TH
694 *
695 * FIXME: error handling
1da177e4 696 */
fa70d2e2 697static void __device_add_disk(struct device *parent, struct gendisk *disk,
fef912bf 698 const struct attribute_group **groups,
fa70d2e2 699 bool register_queue)
1da177e4 700{
3e1a7ff8 701 dev_t devt;
6ffeea77 702 int retval;
cf0ca9fe 703
737eb78e
DLM
704 /*
705 * The disk queue should now be all set with enough information about
706 * the device for the elevator code to pick an adequate default
707 * elevator if one is needed, that is, for devices requesting queue
708 * registration.
709 */
710 if (register_queue)
711 elevator_init_mq(disk->queue);
712
3e1a7ff8
TH
713 /* minors == 0 indicates to use ext devt from part0 and should
714 * be accompanied with EXT_DEVT flag. Make sure all
715 * parameters make sense.
716 */
717 WARN_ON(disk->minors && !(disk->major || disk->first_minor));
8ddcd653
CH
718 WARN_ON(!disk->minors &&
719 !(disk->flags & (GENHD_FL_EXT_DEVT | GENHD_FL_HIDDEN)));
3e1a7ff8 720
1da177e4 721 disk->flags |= GENHD_FL_UP;
3e1a7ff8 722
9fc995a6 723 retval = blk_alloc_devt(disk->part0, &devt);
3e1a7ff8
TH
724 if (retval) {
725 WARN_ON(1);
726 return;
727 }
3e1a7ff8
TH
728 disk->major = MAJOR(devt);
729 disk->first_minor = MINOR(devt);
730
9f53d2fe
SG
731 disk_alloc_events(disk);
732
8ddcd653
CH
733 if (disk->flags & GENHD_FL_HIDDEN) {
734 /*
735 * Don't let hidden disks show up in /proc/partitions,
736 * and don't bother scanning for partitions either.
737 */
738 disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
739 disk->flags |= GENHD_FL_NO_PART_SCAN;
740 } else {
3c5d202b
CH
741 struct backing_dev_info *bdi = disk->queue->backing_dev_info;
742 struct device *dev = disk_to_dev(disk);
3a92168b 743 int ret;
744
8ddcd653 745 /* Register BDI before referencing it from bdev */
3c5d202b
CH
746 dev->devt = devt;
747 ret = bdi_register(bdi, "%u:%u", MAJOR(devt), MINOR(devt));
3a92168b 748 WARN_ON(ret);
3c5d202b 749 bdi_set_owner(bdi, dev);
cb8432d6 750 bdev_add(disk->part0, devt);
8ddcd653 751 }
fef912bf 752 register_disk(parent, disk, groups);
fa70d2e2
MS
753 if (register_queue)
754 blk_register_queue(disk);
cf0ca9fe 755
523e1d39
TH
756 /*
757 * Take an extra ref on queue which will be put on disk_release()
758 * so that it sticks around as long as @disk is there.
759 */
09ac46c4 760 WARN_ON_ONCE(!blk_get_queue(disk->queue));
523e1d39 761
77ea887e 762 disk_add_events(disk);
25520d55 763 blk_integrity_add(disk);
1da177e4 764}
fa70d2e2 765
fef912bf
HR
766void device_add_disk(struct device *parent, struct gendisk *disk,
767 const struct attribute_group **groups)
768
fa70d2e2 769{
fef912bf 770 __device_add_disk(parent, disk, groups, true);
fa70d2e2 771}
e63a46be 772EXPORT_SYMBOL(device_add_disk);
1da177e4 773
fa70d2e2
MS
774void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk)
775{
fef912bf 776 __device_add_disk(parent, disk, NULL, false);
fa70d2e2
MS
777}
778EXPORT_SYMBOL(device_add_disk_no_queue_reg);
779
71773cf7 780static void invalidate_partition(struct block_device *bdev)
02d33b67 781{
02d33b67
CH
782 fsync_bdev(bdev);
783 __invalidate_device(bdev, true);
9bc5c397
CH
784
785 /*
22ae8ce8
CH
786 * Unhash the bdev inode for this device so that it can't be looked
787 * up any more even if openers still hold references to it.
9bc5c397
CH
788 */
789 remove_inode_hash(bdev->bd_inode);
02d33b67
CH
790}
791
b5bd357c
LC
792/**
793 * del_gendisk - remove the gendisk
794 * @disk: the struct gendisk to remove
795 *
796 * Removes the gendisk and all its associated resources. This deletes the
797 * partitions associated with the gendisk, and unregisters the associated
798 * request_queue.
799 *
800 * This is the counter to the respective __device_add_disk() call.
801 *
802 * The final removal of the struct gendisk happens when its refcount reaches 0
803 * with put_disk(), which should be called after del_gendisk(), if
804 * __device_add_disk() was used.
e8c7d14a
LC
805 *
806 * Drivers exist which depend on the release of the gendisk to be synchronous,
807 * it should not be deferred.
808 *
809 * Context: can sleep
b5bd357c 810 */
d2bf1b67 811void del_gendisk(struct gendisk *disk)
1da177e4 812{
d2bf1b67 813 struct disk_part_iter piter;
ad1eaa53 814 struct block_device *part;
d2bf1b67 815
e8c7d14a
LC
816 might_sleep();
817
6b3ba976
CH
818 if (WARN_ON_ONCE(!disk->queue))
819 return;
820
25520d55 821 blk_integrity_del(disk);
77ea887e
TH
822 disk_del_events(disk);
823
56c0908c
JK
824 /*
825 * Block lookups of the disk until all bdevs are unhashed and the
826 * disk is marked as dead (GENHD_FL_UP cleared).
827 */
22ae8ce8
CH
828 down_write(&bdev_lookup_sem);
829
d2bf1b67
TH
830 /* invalidate stuff */
831 disk_part_iter_init(&piter, disk,
832 DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE);
833 while ((part = disk_part_iter_next(&piter))) {
ad1eaa53 834 invalidate_partition(part);
0d02129e 835 delete_partition(part);
d2bf1b67
TH
836 }
837 disk_part_iter_exit(&piter);
838
71773cf7 839 invalidate_partition(disk->part0);
d2bf1b67
TH
840 set_capacity(disk, 0);
841 disk->flags &= ~GENHD_FL_UP;
22ae8ce8 842 up_write(&bdev_lookup_sem);
d2bf1b67 843
6b3ba976 844 if (!(disk->flags & GENHD_FL_HIDDEN)) {
8ddcd653 845 sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
6b3ba976 846
90f16fdd
JK
847 /*
848 * Unregister bdi before releasing device numbers (as they can
849 * get reused and we'd get clashes in sysfs).
850 */
6b3ba976 851 bdi_unregister(disk->queue->backing_dev_info);
90f16fdd 852 }
d2bf1b67 853
6b3ba976 854 blk_unregister_queue(disk);
d2bf1b67 855
cb8432d6 856 kobject_put(disk->part0->bd_holder_dir);
d2bf1b67 857 kobject_put(disk->slave_dir);
d2bf1b67 858
8446fe92 859 part_stat_set_all(disk->part0, 0);
cb8432d6 860 disk->part0->bd_stamp = 0;
d2bf1b67
TH
861 if (!sysfs_deprecated)
862 sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
25e823c8 863 pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
d2bf1b67 864 device_del(disk_to_dev(disk));
1da177e4 865}
d2bf1b67 866EXPORT_SYMBOL(del_gendisk);
1da177e4 867
99e6608c
VV
868/* sysfs access to bad-blocks list. */
869static ssize_t disk_badblocks_show(struct device *dev,
870 struct device_attribute *attr,
871 char *page)
872{
873 struct gendisk *disk = dev_to_disk(dev);
874
875 if (!disk->bb)
876 return sprintf(page, "\n");
877
878 return badblocks_show(disk->bb, page, 0);
879}
880
881static ssize_t disk_badblocks_store(struct device *dev,
882 struct device_attribute *attr,
883 const char *page, size_t len)
884{
885 struct gendisk *disk = dev_to_disk(dev);
886
887 if (!disk->bb)
888 return -ENXIO;
889
890 return badblocks_store(disk->bb, page, len, 0);
891}
892
22ae8ce8 893void blk_request_module(dev_t devt)
bd8eff3b 894{
a160c615
CH
895 unsigned int major = MAJOR(devt);
896 struct blk_major_name **n;
897
898 mutex_lock(&major_names_lock);
899 for (n = &major_names[major_to_index(major)]; *n; n = &(*n)->next) {
900 if ((*n)->major == major && (*n)->probe) {
901 (*n)->probe(devt);
902 mutex_unlock(&major_names_lock);
903 return;
904 }
905 }
906 mutex_unlock(&major_names_lock);
907
bd8eff3b
CH
908 if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0)
909 /* Make old-style 2.4 aliases work */
910 request_module("block-major-%d", MAJOR(devt));
911}
912
f331c029
TH
913/**
914 * bdget_disk - do bdget() by gendisk and partition number
915 * @disk: gendisk of interest
916 * @partno: partition number
917 *
918 * Find partition @partno from @disk, do bdget() on it.
919 *
920 * CONTEXT:
921 * Don't care.
922 *
923 * RETURNS:
924 * Resulting block_device on success, NULL on failure.
925 */
aeb3d3a8 926struct block_device *bdget_disk(struct gendisk *disk, int partno)
f331c029 927{
548b10eb 928 struct block_device *bdev = NULL;
f331c029 929
0d02129e
CH
930 rcu_read_lock();
931 bdev = __disk_get_part(disk, partno);
932 if (bdev && !bdgrab(bdev))
933 bdev = NULL;
934 rcu_read_unlock();
f331c029 935
548b10eb 936 return bdev;
f331c029 937}
f331c029 938
5c6f35c5
GKH
939/*
940 * print a full list of all partitions - intended for places where the root
941 * filesystem can't be mounted and thus to give the victim some idea of what
942 * went wrong
943 */
944void __init printk_all_partitions(void)
945{
def4e38d
TH
946 struct class_dev_iter iter;
947 struct device *dev;
948
949 class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
950 while ((dev = class_dev_iter_next(&iter))) {
951 struct gendisk *disk = dev_to_disk(dev);
e71bf0d0 952 struct disk_part_iter piter;
ad1eaa53 953 struct block_device *part;
1f014290
TH
954 char name_buf[BDEVNAME_SIZE];
955 char devt_buf[BDEVT_SIZE];
def4e38d
TH
956
957 /*
958 * Don't show empty devices or things that have been
25985edc 959 * suppressed
def4e38d
TH
960 */
961 if (get_capacity(disk) == 0 ||
962 (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO))
963 continue;
964
965 /*
966 * Note, unlike /proc/partitions, I am showing the
967 * numbers in hex - the same format as the root=
968 * option takes.
969 */
074a7aca
TH
970 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
971 while ((part = disk_part_iter_next(&piter))) {
ad1eaa53 972 bool is_part0 = part == disk->part0;
def4e38d 973
b5af921e 974 printk("%s%s %10llu %s %s", is_part0 ? "" : " ",
ad1eaa53
CH
975 bdevt_str(part->bd_dev, devt_buf),
976 bdev_nr_sectors(part) >> 1,
977 disk_name(disk, part->bd_partno, name_buf),
978 part->bd_meta_info ?
979 part->bd_meta_info->uuid : "");
074a7aca 980 if (is_part0) {
52c44d93 981 if (dev->parent && dev->parent->driver)
074a7aca 982 printk(" driver: %s\n",
52c44d93 983 dev->parent->driver->name);
074a7aca
TH
984 else
985 printk(" (driver?)\n");
986 } else
987 printk("\n");
988 }
e71bf0d0 989 disk_part_iter_exit(&piter);
def4e38d
TH
990 }
991 class_dev_iter_exit(&iter);
dd2a345f
DG
992}
993
1da177e4
LT
994#ifdef CONFIG_PROC_FS
995/* iterator */
def4e38d 996static void *disk_seqf_start(struct seq_file *seqf, loff_t *pos)
68c4d4a7 997{
def4e38d
TH
998 loff_t skip = *pos;
999 struct class_dev_iter *iter;
1000 struct device *dev;
68c4d4a7 1001
aeb3d3a8 1002 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
def4e38d
TH
1003 if (!iter)
1004 return ERR_PTR(-ENOMEM);
1005
1006 seqf->private = iter;
1007 class_dev_iter_init(iter, &block_class, NULL, &disk_type);
1008 do {
1009 dev = class_dev_iter_next(iter);
1010 if (!dev)
1011 return NULL;
1012 } while (skip--);
1013
1014 return dev_to_disk(dev);
68c4d4a7
GKH
1015}
1016
def4e38d 1017static void *disk_seqf_next(struct seq_file *seqf, void *v, loff_t *pos)
1da177e4 1018{
edfaa7c3 1019 struct device *dev;
1da177e4 1020
def4e38d
TH
1021 (*pos)++;
1022 dev = class_dev_iter_next(seqf->private);
2ac3cee5 1023 if (dev)
68c4d4a7 1024 return dev_to_disk(dev);
2ac3cee5 1025
1da177e4
LT
1026 return NULL;
1027}
1028
def4e38d 1029static void disk_seqf_stop(struct seq_file *seqf, void *v)
27f30251 1030{
def4e38d 1031 struct class_dev_iter *iter = seqf->private;
27f30251 1032
def4e38d
TH
1033 /* stop is called even after start failed :-( */
1034 if (iter) {
1035 class_dev_iter_exit(iter);
1036 kfree(iter);
77da1605 1037 seqf->private = NULL;
5c0ef6d0 1038 }
1da177e4
LT
1039}
1040
def4e38d 1041static void *show_partition_start(struct seq_file *seqf, loff_t *pos)
1da177e4 1042{
06768067 1043 void *p;
def4e38d
TH
1044
1045 p = disk_seqf_start(seqf, pos);
b9f985b6 1046 if (!IS_ERR_OR_NULL(p) && !*pos)
def4e38d
TH
1047 seq_puts(seqf, "major minor #blocks name\n\n");
1048 return p;
1da177e4
LT
1049}
1050
cf771cb5 1051static int show_partition(struct seq_file *seqf, void *v)
1da177e4
LT
1052{
1053 struct gendisk *sgp = v;
e71bf0d0 1054 struct disk_part_iter piter;
ad1eaa53 1055 struct block_device *part;
1da177e4
LT
1056 char buf[BDEVNAME_SIZE];
1057
1da177e4 1058 /* Don't show non-partitionable removeable devices or empty devices */
d27769ec 1059 if (!get_capacity(sgp) || (!disk_max_parts(sgp) &&
f331c029 1060 (sgp->flags & GENHD_FL_REMOVABLE)))
1da177e4
LT
1061 return 0;
1062 if (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)
1063 return 0;
1064
1065 /* show the full disk and all non-0 size partitions of it */
074a7aca 1066 disk_part_iter_init(&piter, sgp, DISK_PITER_INCL_PART0);
e71bf0d0 1067 while ((part = disk_part_iter_next(&piter)))
1f014290 1068 seq_printf(seqf, "%4d %7d %10llu %s\n",
ad1eaa53
CH
1069 MAJOR(part->bd_dev), MINOR(part->bd_dev),
1070 bdev_nr_sectors(part) >> 1,
1071 disk_name(sgp, part->bd_partno, buf));
e71bf0d0 1072 disk_part_iter_exit(&piter);
1da177e4
LT
1073
1074 return 0;
1075}
1076
f500975a 1077static const struct seq_operations partitions_op = {
def4e38d
TH
1078 .start = show_partition_start,
1079 .next = disk_seqf_next,
1080 .stop = disk_seqf_stop,
edfaa7c3 1081 .show = show_partition
1da177e4
LT
1082};
1083#endif
1084
1da177e4
LT
1085static int __init genhd_device_init(void)
1086{
e105b8bf
DW
1087 int error;
1088
1089 block_class.dev_kobj = sysfs_dev_block_kobj;
1090 error = class_register(&block_class);
ee27a558
RM
1091 if (unlikely(error))
1092 return error;
1da177e4 1093 blk_dev_init();
edfaa7c3 1094
561ec68e
ZY
1095 register_blkdev(BLOCK_EXT_MAJOR, "blkext");
1096
edfaa7c3 1097 /* create top-level block dir */
e52eec13
AK
1098 if (!sysfs_deprecated)
1099 block_depr = kobject_create_and_add("block", NULL);
830d3cfb 1100 return 0;
1da177e4
LT
1101}
1102
1103subsys_initcall(genhd_device_init);
1104
edfaa7c3
KS
1105static ssize_t disk_range_show(struct device *dev,
1106 struct device_attribute *attr, char *buf)
1da177e4 1107{
edfaa7c3 1108 struct gendisk *disk = dev_to_disk(dev);
1da177e4 1109
edfaa7c3 1110 return sprintf(buf, "%d\n", disk->minors);
1da177e4
LT
1111}
1112
1f014290
TH
1113static ssize_t disk_ext_range_show(struct device *dev,
1114 struct device_attribute *attr, char *buf)
1115{
1116 struct gendisk *disk = dev_to_disk(dev);
1117
b5d0b9df 1118 return sprintf(buf, "%d\n", disk_max_parts(disk));
1f014290
TH
1119}
1120
edfaa7c3
KS
1121static ssize_t disk_removable_show(struct device *dev,
1122 struct device_attribute *attr, char *buf)
a7fd6706 1123{
edfaa7c3 1124 struct gendisk *disk = dev_to_disk(dev);
a7fd6706 1125
edfaa7c3
KS
1126 return sprintf(buf, "%d\n",
1127 (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
a7fd6706
KS
1128}
1129
8ddcd653
CH
1130static ssize_t disk_hidden_show(struct device *dev,
1131 struct device_attribute *attr, char *buf)
1132{
1133 struct gendisk *disk = dev_to_disk(dev);
1134
1135 return sprintf(buf, "%d\n",
1136 (disk->flags & GENHD_FL_HIDDEN ? 1 : 0));
1137}
1138
1c9ce527
KS
1139static ssize_t disk_ro_show(struct device *dev,
1140 struct device_attribute *attr, char *buf)
1141{
1142 struct gendisk *disk = dev_to_disk(dev);
1143
b7db9956 1144 return sprintf(buf, "%d\n", get_disk_ro(disk) ? 1 : 0);
1c9ce527
KS
1145}
1146
3ad5cee5
CH
1147ssize_t part_size_show(struct device *dev,
1148 struct device_attribute *attr, char *buf)
1149{
0d02129e 1150 return sprintf(buf, "%llu\n", bdev_nr_sectors(dev_to_bdev(dev)));
3ad5cee5
CH
1151}
1152
1153ssize_t part_stat_show(struct device *dev,
1154 struct device_attribute *attr, char *buf)
1155{
0d02129e
CH
1156 struct block_device *bdev = dev_to_bdev(dev);
1157 struct request_queue *q = bdev->bd_disk->queue;
ea18e0f0 1158 struct disk_stats stat;
3ad5cee5
CH
1159 unsigned int inflight;
1160
0d02129e 1161 part_stat_read_all(bdev, &stat);
b2f609e1 1162 if (queue_is_mq(q))
0d02129e 1163 inflight = blk_mq_in_flight(q, bdev);
b2f609e1 1164 else
0d02129e 1165 inflight = part_in_flight(bdev);
ea18e0f0 1166
3ad5cee5
CH
1167 return sprintf(buf,
1168 "%8lu %8lu %8llu %8u "
1169 "%8lu %8lu %8llu %8u "
1170 "%8u %8u %8u "
1171 "%8lu %8lu %8llu %8u "
1172 "%8lu %8u"
1173 "\n",
ea18e0f0
KK
1174 stat.ios[STAT_READ],
1175 stat.merges[STAT_READ],
1176 (unsigned long long)stat.sectors[STAT_READ],
1177 (unsigned int)div_u64(stat.nsecs[STAT_READ], NSEC_PER_MSEC),
1178 stat.ios[STAT_WRITE],
1179 stat.merges[STAT_WRITE],
1180 (unsigned long long)stat.sectors[STAT_WRITE],
1181 (unsigned int)div_u64(stat.nsecs[STAT_WRITE], NSEC_PER_MSEC),
3ad5cee5 1182 inflight,
ea18e0f0 1183 jiffies_to_msecs(stat.io_ticks),
8cd5b8fc
KK
1184 (unsigned int)div_u64(stat.nsecs[STAT_READ] +
1185 stat.nsecs[STAT_WRITE] +
1186 stat.nsecs[STAT_DISCARD] +
1187 stat.nsecs[STAT_FLUSH],
1188 NSEC_PER_MSEC),
ea18e0f0
KK
1189 stat.ios[STAT_DISCARD],
1190 stat.merges[STAT_DISCARD],
1191 (unsigned long long)stat.sectors[STAT_DISCARD],
1192 (unsigned int)div_u64(stat.nsecs[STAT_DISCARD], NSEC_PER_MSEC),
1193 stat.ios[STAT_FLUSH],
1194 (unsigned int)div_u64(stat.nsecs[STAT_FLUSH], NSEC_PER_MSEC));
3ad5cee5
CH
1195}
1196
1197ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
1198 char *buf)
1199{
0d02129e
CH
1200 struct block_device *bdev = dev_to_bdev(dev);
1201 struct request_queue *q = bdev->bd_disk->queue;
3ad5cee5
CH
1202 unsigned int inflight[2];
1203
b2f609e1 1204 if (queue_is_mq(q))
0d02129e 1205 blk_mq_in_flight_rw(q, bdev, inflight);
b2f609e1 1206 else
0d02129e 1207 part_in_flight_rw(bdev, inflight);
b2f609e1 1208
3ad5cee5
CH
1209 return sprintf(buf, "%8u %8u\n", inflight[0], inflight[1]);
1210}
1211
edfaa7c3
KS
1212static ssize_t disk_capability_show(struct device *dev,
1213 struct device_attribute *attr, char *buf)
86ce18d7 1214{
edfaa7c3
KS
1215 struct gendisk *disk = dev_to_disk(dev);
1216
1217 return sprintf(buf, "%x\n", disk->flags);
86ce18d7 1218}
edfaa7c3 1219
c72758f3
MP
1220static ssize_t disk_alignment_offset_show(struct device *dev,
1221 struct device_attribute *attr,
1222 char *buf)
1223{
1224 struct gendisk *disk = dev_to_disk(dev);
1225
1226 return sprintf(buf, "%d\n", queue_alignment_offset(disk->queue));
1227}
1228
86b37281
MP
1229static ssize_t disk_discard_alignment_show(struct device *dev,
1230 struct device_attribute *attr,
1231 char *buf)
1232{
1233 struct gendisk *disk = dev_to_disk(dev);
1234
dd3d145d 1235 return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue));
86b37281
MP
1236}
1237
5657a819
JP
1238static DEVICE_ATTR(range, 0444, disk_range_show, NULL);
1239static DEVICE_ATTR(ext_range, 0444, disk_ext_range_show, NULL);
1240static DEVICE_ATTR(removable, 0444, disk_removable_show, NULL);
1241static DEVICE_ATTR(hidden, 0444, disk_hidden_show, NULL);
1242static DEVICE_ATTR(ro, 0444, disk_ro_show, NULL);
1243static DEVICE_ATTR(size, 0444, part_size_show, NULL);
1244static DEVICE_ATTR(alignment_offset, 0444, disk_alignment_offset_show, NULL);
1245static DEVICE_ATTR(discard_alignment, 0444, disk_discard_alignment_show, NULL);
1246static DEVICE_ATTR(capability, 0444, disk_capability_show, NULL);
1247static DEVICE_ATTR(stat, 0444, part_stat_show, NULL);
1248static DEVICE_ATTR(inflight, 0444, part_inflight_show, NULL);
1249static DEVICE_ATTR(badblocks, 0644, disk_badblocks_show, disk_badblocks_store);
3ad5cee5 1250
c17bb495 1251#ifdef CONFIG_FAIL_MAKE_REQUEST
3ad5cee5
CH
1252ssize_t part_fail_show(struct device *dev,
1253 struct device_attribute *attr, char *buf)
1254{
0d02129e 1255 return sprintf(buf, "%d\n", dev_to_bdev(dev)->bd_make_it_fail);
3ad5cee5
CH
1256}
1257
1258ssize_t part_fail_store(struct device *dev,
1259 struct device_attribute *attr,
1260 const char *buf, size_t count)
1261{
3ad5cee5
CH
1262 int i;
1263
1264 if (count > 0 && sscanf(buf, "%d", &i) > 0)
0d02129e 1265 dev_to_bdev(dev)->bd_make_it_fail = i;
3ad5cee5
CH
1266
1267 return count;
1268}
1269
edfaa7c3 1270static struct device_attribute dev_attr_fail =
5657a819 1271 __ATTR(make-it-fail, 0644, part_fail_show, part_fail_store);
3ad5cee5
CH
1272#endif /* CONFIG_FAIL_MAKE_REQUEST */
1273
581d4e28
JA
1274#ifdef CONFIG_FAIL_IO_TIMEOUT
1275static struct device_attribute dev_attr_fail_timeout =
5657a819 1276 __ATTR(io-timeout-fail, 0644, part_timeout_show, part_timeout_store);
581d4e28 1277#endif
edfaa7c3
KS
1278
1279static struct attribute *disk_attrs[] = {
1280 &dev_attr_range.attr,
1f014290 1281 &dev_attr_ext_range.attr,
edfaa7c3 1282 &dev_attr_removable.attr,
8ddcd653 1283 &dev_attr_hidden.attr,
1c9ce527 1284 &dev_attr_ro.attr,
edfaa7c3 1285 &dev_attr_size.attr,
c72758f3 1286 &dev_attr_alignment_offset.attr,
86b37281 1287 &dev_attr_discard_alignment.attr,
edfaa7c3
KS
1288 &dev_attr_capability.attr,
1289 &dev_attr_stat.attr,
316d315b 1290 &dev_attr_inflight.attr,
99e6608c 1291 &dev_attr_badblocks.attr,
edfaa7c3
KS
1292#ifdef CONFIG_FAIL_MAKE_REQUEST
1293 &dev_attr_fail.attr,
581d4e28
JA
1294#endif
1295#ifdef CONFIG_FAIL_IO_TIMEOUT
1296 &dev_attr_fail_timeout.attr,
edfaa7c3
KS
1297#endif
1298 NULL
1299};
1300
9438b3e0
DW
1301static umode_t disk_visible(struct kobject *kobj, struct attribute *a, int n)
1302{
1303 struct device *dev = container_of(kobj, typeof(*dev), kobj);
1304 struct gendisk *disk = dev_to_disk(dev);
1305
1306 if (a == &dev_attr_badblocks.attr && !disk->bb)
1307 return 0;
1308 return a->mode;
1309}
1310
edfaa7c3
KS
1311static struct attribute_group disk_attr_group = {
1312 .attrs = disk_attrs,
9438b3e0 1313 .is_visible = disk_visible,
edfaa7c3
KS
1314};
1315
a4dbd674 1316static const struct attribute_group *disk_attr_groups[] = {
edfaa7c3
KS
1317 &disk_attr_group,
1318 NULL
1da177e4
LT
1319};
1320
540eed56
TH
1321/**
1322 * disk_replace_part_tbl - replace disk->part_tbl in RCU-safe way
1323 * @disk: disk to replace part_tbl for
1324 * @new_ptbl: new part_tbl to install
1325 *
1326 * Replace disk->part_tbl with @new_ptbl in RCU-safe way. The
1327 * original ptbl is freed using RCU callback.
1328 *
1329 * LOCKING:
6d2cf6f2 1330 * Matching bd_mutex locked or the caller is the only user of @disk.
540eed56
TH
1331 */
1332static void disk_replace_part_tbl(struct gendisk *disk,
1333 struct disk_part_tbl *new_ptbl)
1334{
6d2cf6f2
BVA
1335 struct disk_part_tbl *old_ptbl =
1336 rcu_dereference_protected(disk->part_tbl, 1);
540eed56
TH
1337
1338 rcu_assign_pointer(disk->part_tbl, new_ptbl);
a6f23657
JA
1339
1340 if (old_ptbl) {
1341 rcu_assign_pointer(old_ptbl->last_lookup, NULL);
57bdfbf9 1342 kfree_rcu(old_ptbl, rcu_head);
a6f23657 1343 }
540eed56
TH
1344}
1345
1346/**
1347 * disk_expand_part_tbl - expand disk->part_tbl
1348 * @disk: disk to expand part_tbl for
1349 * @partno: expand such that this partno can fit in
1350 *
1351 * Expand disk->part_tbl such that @partno can fit in. disk->part_tbl
1352 * uses RCU to allow unlocked dereferencing for stats and other stuff.
1353 *
1354 * LOCKING:
6d2cf6f2
BVA
1355 * Matching bd_mutex locked or the caller is the only user of @disk.
1356 * Might sleep.
540eed56
TH
1357 *
1358 * RETURNS:
1359 * 0 on success, -errno on failure.
1360 */
1361int disk_expand_part_tbl(struct gendisk *disk, int partno)
1362{
6d2cf6f2
BVA
1363 struct disk_part_tbl *old_ptbl =
1364 rcu_dereference_protected(disk->part_tbl, 1);
540eed56
TH
1365 struct disk_part_tbl *new_ptbl;
1366 int len = old_ptbl ? old_ptbl->len : 0;
5fabcb4c 1367 int i, target;
5fabcb4c
JA
1368
1369 /*
1370 * check for int overflow, since we can get here from blkpg_ioctl()
1371 * with a user passed 'partno'.
1372 */
1373 target = partno + 1;
1374 if (target < 0)
1375 return -EINVAL;
540eed56
TH
1376
1377 /* disk_max_parts() is zero during initialization, ignore if so */
1378 if (disk_max_parts(disk) && target > disk_max_parts(disk))
1379 return -EINVAL;
1380
1381 if (target <= len)
1382 return 0;
1383
78b90a2c
GS
1384 new_ptbl = kzalloc_node(struct_size(new_ptbl, part, target), GFP_KERNEL,
1385 disk->node_id);
540eed56
TH
1386 if (!new_ptbl)
1387 return -ENOMEM;
1388
540eed56
TH
1389 new_ptbl->len = target;
1390
1391 for (i = 0; i < len; i++)
1392 rcu_assign_pointer(new_ptbl->part[i], old_ptbl->part[i]);
1393
1394 disk_replace_part_tbl(disk, new_ptbl);
1395 return 0;
1396}
1397
b5bd357c
LC
1398/**
1399 * disk_release - releases all allocated resources of the gendisk
1400 * @dev: the device representing this disk
1401 *
1402 * This function releases all allocated resources of the gendisk.
1403 *
b5bd357c
LC
1404 * Drivers which used __device_add_disk() have a gendisk with a request_queue
1405 * assigned. Since the request_queue sits on top of the gendisk for these
1406 * drivers we also call blk_put_queue() for them, and we expect the
1407 * request_queue refcount to reach 0 at this point, and so the request_queue
1408 * will also be freed prior to the disk.
e8c7d14a
LC
1409 *
1410 * Context: can sleep
b5bd357c 1411 */
edfaa7c3 1412static void disk_release(struct device *dev)
1da177e4 1413{
edfaa7c3
KS
1414 struct gendisk *disk = dev_to_disk(dev);
1415
e8c7d14a
LC
1416 might_sleep();
1417
2da78092 1418 blk_free_devt(dev->devt);
77ea887e 1419 disk_release_events(disk);
1da177e4 1420 kfree(disk->random);
540eed56 1421 disk_replace_part_tbl(disk, NULL);
cb8432d6 1422 bdput(disk->part0);
523e1d39
TH
1423 if (disk->queue)
1424 blk_put_queue(disk->queue);
1da177e4
LT
1425 kfree(disk);
1426}
edfaa7c3
KS
1427struct class block_class = {
1428 .name = "block",
1da177e4
LT
1429};
1430
3c2670e6 1431static char *block_devnode(struct device *dev, umode_t *mode,
4e4098a3 1432 kuid_t *uid, kgid_t *gid)
b03f38b6
KS
1433{
1434 struct gendisk *disk = dev_to_disk(dev);
1435
348e114b
CH
1436 if (disk->fops->devnode)
1437 return disk->fops->devnode(disk, mode);
b03f38b6
KS
1438 return NULL;
1439}
1440
ef45fe47 1441const struct device_type disk_type = {
edfaa7c3
KS
1442 .name = "disk",
1443 .groups = disk_attr_groups,
1444 .release = disk_release,
e454cea2 1445 .devnode = block_devnode,
1da177e4
LT
1446};
1447
a6e2ba88 1448#ifdef CONFIG_PROC_FS
cf771cb5
TH
1449/*
1450 * aggregate disk stat collector. Uses the same stats that the sysfs
1451 * entries do, above, but makes them available through one seq_file.
1452 *
1453 * The output looks suspiciously like /proc/partitions with a bunch of
1454 * extra fields.
1455 */
1456static int diskstats_show(struct seq_file *seqf, void *v)
1da177e4
LT
1457{
1458 struct gendisk *gp = v;
e71bf0d0 1459 struct disk_part_iter piter;
ad1eaa53 1460 struct block_device *hd;
1da177e4 1461 char buf[BDEVNAME_SIZE];
e016b782 1462 unsigned int inflight;
ea18e0f0 1463 struct disk_stats stat;
1da177e4
LT
1464
1465 /*
ed9e1982 1466 if (&disk_to_dev(gp)->kobj.entry == block_class.devices.next)
cf771cb5 1467 seq_puts(seqf, "major minor name"
1da177e4
LT
1468 " rio rmerge rsect ruse wio wmerge "
1469 "wsect wuse running use aveq"
1470 "\n\n");
1471 */
9f5e4865 1472
71982a40 1473 disk_part_iter_init(&piter, gp, DISK_PITER_INCL_EMPTY_PART0);
e71bf0d0 1474 while ((hd = disk_part_iter_next(&piter))) {
0d02129e 1475 part_stat_read_all(hd, &stat);
b2f609e1 1476 if (queue_is_mq(gp->queue))
ad1eaa53 1477 inflight = blk_mq_in_flight(gp->queue, hd);
b2f609e1 1478 else
ad1eaa53 1479 inflight = part_in_flight(hd);
ea18e0f0 1480
bdca3c87
MC
1481 seq_printf(seqf, "%4d %7d %s "
1482 "%lu %lu %lu %u "
1483 "%lu %lu %lu %u "
1484 "%u %u %u "
b6866318
KK
1485 "%lu %lu %lu %u "
1486 "%lu %u"
1487 "\n",
ad1eaa53
CH
1488 MAJOR(hd->bd_dev), MINOR(hd->bd_dev),
1489 disk_name(gp, hd->bd_partno, buf),
ea18e0f0
KK
1490 stat.ios[STAT_READ],
1491 stat.merges[STAT_READ],
1492 stat.sectors[STAT_READ],
1493 (unsigned int)div_u64(stat.nsecs[STAT_READ],
1494 NSEC_PER_MSEC),
1495 stat.ios[STAT_WRITE],
1496 stat.merges[STAT_WRITE],
1497 stat.sectors[STAT_WRITE],
1498 (unsigned int)div_u64(stat.nsecs[STAT_WRITE],
1499 NSEC_PER_MSEC),
e016b782 1500 inflight,
ea18e0f0 1501 jiffies_to_msecs(stat.io_ticks),
8cd5b8fc
KK
1502 (unsigned int)div_u64(stat.nsecs[STAT_READ] +
1503 stat.nsecs[STAT_WRITE] +
1504 stat.nsecs[STAT_DISCARD] +
1505 stat.nsecs[STAT_FLUSH],
1506 NSEC_PER_MSEC),
ea18e0f0
KK
1507 stat.ios[STAT_DISCARD],
1508 stat.merges[STAT_DISCARD],
1509 stat.sectors[STAT_DISCARD],
1510 (unsigned int)div_u64(stat.nsecs[STAT_DISCARD],
1511 NSEC_PER_MSEC),
1512 stat.ios[STAT_FLUSH],
1513 (unsigned int)div_u64(stat.nsecs[STAT_FLUSH],
1514 NSEC_PER_MSEC)
28f39d55 1515 );
1da177e4 1516 }
e71bf0d0 1517 disk_part_iter_exit(&piter);
9f5e4865 1518
1da177e4
LT
1519 return 0;
1520}
1521
31d85ab2 1522static const struct seq_operations diskstats_op = {
def4e38d
TH
1523 .start = disk_seqf_start,
1524 .next = disk_seqf_next,
1525 .stop = disk_seqf_stop,
1da177e4
LT
1526 .show = diskstats_show
1527};
f500975a
AD
1528
1529static int __init proc_genhd_init(void)
1530{
fddda2b7
CH
1531 proc_create_seq("diskstats", 0, NULL, &diskstats_op);
1532 proc_create_seq("partitions", 0, NULL, &partitions_op);
f500975a
AD
1533 return 0;
1534}
1535module_init(proc_genhd_init);
a6e2ba88 1536#endif /* CONFIG_PROC_FS */
1da177e4 1537
cf771cb5 1538dev_t blk_lookup_devt(const char *name, int partno)
a142be85 1539{
def4e38d
TH
1540 dev_t devt = MKDEV(0, 0);
1541 struct class_dev_iter iter;
1542 struct device *dev;
a142be85 1543
def4e38d
TH
1544 class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
1545 while ((dev = class_dev_iter_next(&iter))) {
a142be85 1546 struct gendisk *disk = dev_to_disk(dev);
0d02129e 1547 struct block_device *part;
a142be85 1548
3ada8b7e 1549 if (strcmp(dev_name(dev), name))
f331c029 1550 continue;
f331c029 1551
41b8c853
NB
1552 if (partno < disk->minors) {
1553 /* We need to return the right devno, even
1554 * if the partition doesn't exist yet.
1555 */
1556 devt = MKDEV(MAJOR(dev->devt),
1557 MINOR(dev->devt) + partno);
1558 break;
1559 }
0d02129e 1560 part = bdget_disk(disk, partno);
2bbedcb4 1561 if (part) {
0d02129e
CH
1562 devt = part->bd_dev;
1563 bdput(part);
548b10eb 1564 break;
def4e38d 1565 }
5c0ef6d0 1566 }
def4e38d 1567 class_dev_iter_exit(&iter);
edfaa7c3
KS
1568 return devt;
1569}
edfaa7c3 1570
e319e1fb 1571struct gendisk *__alloc_disk_node(int minors, int node_id)
1946089a
CL
1572{
1573 struct gendisk *disk;
6d2cf6f2 1574 struct disk_part_tbl *ptbl;
1946089a 1575
de65b012
CH
1576 if (minors > DISK_MAX_PARTS) {
1577 printk(KERN_ERR
7fb52621 1578 "block: can't allocate more than %d partitions\n",
de65b012
CH
1579 DISK_MAX_PARTS);
1580 minors = DISK_MAX_PARTS;
1581 }
1946089a 1582
c1b511eb 1583 disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
f93af2a4
CH
1584 if (!disk)
1585 return NULL;
6c23a968 1586
cb8432d6
CH
1587 disk->part0 = bdev_alloc(disk, 0);
1588 if (!disk->part0)
22ae8ce8
CH
1589 goto out_free_disk;
1590
f93af2a4 1591 disk->node_id = node_id;
22ae8ce8 1592 if (disk_expand_part_tbl(disk, 0))
15e3d2c5 1593 goto out_bdput;
f93af2a4
CH
1594
1595 ptbl = rcu_dereference_protected(disk->part_tbl, 1);
8446fe92 1596 rcu_assign_pointer(ptbl->part[0], disk->part0);
f93af2a4
CH
1597
1598 disk->minors = minors;
1599 rand_initialize_disk(disk);
1600 disk_to_dev(disk)->class = &block_class;
1601 disk_to_dev(disk)->type = &disk_type;
1602 device_initialize(disk_to_dev(disk));
1da177e4 1603 return disk;
f93af2a4 1604
22ae8ce8 1605out_bdput:
cb8432d6 1606 bdput(disk->part0);
f93af2a4
CH
1607out_free_disk:
1608 kfree(disk);
1609 return NULL;
1da177e4 1610}
e319e1fb 1611EXPORT_SYMBOL(__alloc_disk_node);
1da177e4 1612
b5bd357c
LC
1613/**
1614 * put_disk - decrements the gendisk refcount
0d20dcc2 1615 * @disk: the struct gendisk to decrement the refcount for
b5bd357c
LC
1616 *
1617 * This decrements the refcount for the struct gendisk. When this reaches 0
1618 * we'll have disk_release() called.
e8c7d14a
LC
1619 *
1620 * Context: Any context, but the last reference must not be dropped from
1621 * atomic context.
b5bd357c 1622 */
1da177e4
LT
1623void put_disk(struct gendisk *disk)
1624{
1625 if (disk)
efdc41c8 1626 put_device(disk_to_dev(disk));
1da177e4 1627}
1da177e4
LT
1628EXPORT_SYMBOL(put_disk);
1629
e3264a4d
HR
1630static void set_disk_ro_uevent(struct gendisk *gd, int ro)
1631{
1632 char event[] = "DISK_RO=1";
1633 char *envp[] = { event, NULL };
1634
1635 if (!ro)
1636 event[8] = '0';
1637 kobject_uevent_env(&disk_to_dev(gd)->kobj, KOBJ_CHANGE, envp);
1638}
1639
1da177e4
LT
1640void set_disk_ro(struct gendisk *disk, int flag)
1641{
e71bf0d0 1642 struct disk_part_iter piter;
ad1eaa53 1643 struct block_device *part;
e71bf0d0 1644
cb8432d6 1645 if (disk->part0->bd_read_only != flag) {
e3264a4d 1646 set_disk_ro_uevent(disk, flag);
cb8432d6 1647 disk->part0->bd_read_only = flag;
e3264a4d
HR
1648 }
1649
1650 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY);
e71bf0d0 1651 while ((part = disk_part_iter_next(&piter)))
ad1eaa53 1652 part->bd_read_only = flag;
e71bf0d0 1653 disk_part_iter_exit(&piter);
1da177e4
LT
1654}
1655
1656EXPORT_SYMBOL(set_disk_ro);
1657
1658int bdev_read_only(struct block_device *bdev)
1659{
1660 if (!bdev)
1661 return 0;
83950d35 1662 return bdev->bd_read_only;
1da177e4
LT
1663}
1664
1665EXPORT_SYMBOL(bdev_read_only);
1666
77ea887e
TH
1667/*
1668 * Disk events - monitor disk events like media change and eject request.
1669 */
1670struct disk_events {
1671 struct list_head node; /* all disk_event's */
1672 struct gendisk *disk; /* the associated disk */
1673 spinlock_t lock;
1674
fdd514e1 1675 struct mutex block_mutex; /* protects blocking */
77ea887e
TH
1676 int block; /* event blocking depth */
1677 unsigned int pending; /* events already sent out */
1678 unsigned int clearing; /* events being cleared */
1679
1680 long poll_msecs; /* interval, -1 for default */
1681 struct delayed_work dwork;
1682};
1683
1684static const char *disk_events_strs[] = {
1685 [ilog2(DISK_EVENT_MEDIA_CHANGE)] = "media_change",
1686 [ilog2(DISK_EVENT_EJECT_REQUEST)] = "eject_request",
1687};
1688
1689static char *disk_uevents[] = {
1690 [ilog2(DISK_EVENT_MEDIA_CHANGE)] = "DISK_MEDIA_CHANGE=1",
1691 [ilog2(DISK_EVENT_EJECT_REQUEST)] = "DISK_EJECT_REQUEST=1",
1692};
1693
1694/* list of all disk_events */
1695static DEFINE_MUTEX(disk_events_mutex);
1696static LIST_HEAD(disk_events);
1697
1698/* disable in-kernel polling by default */
1fe8f348 1699static unsigned long disk_events_dfl_poll_msecs;
77ea887e
TH
1700
1701static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
1702{
1703 struct disk_events *ev = disk->ev;
1704 long intv_msecs = 0;
1705
1706 /*
1707 * If device-specific poll interval is set, always use it. If
673387a9 1708 * the default is being used, poll if the POLL flag is set.
77ea887e
TH
1709 */
1710 if (ev->poll_msecs >= 0)
1711 intv_msecs = ev->poll_msecs;
c92e2f04 1712 else if (disk->event_flags & DISK_EVENT_FLAG_POLL)
77ea887e
TH
1713 intv_msecs = disk_events_dfl_poll_msecs;
1714
1715 return msecs_to_jiffies(intv_msecs);
1716}
1717
c3af54af
TH
1718/**
1719 * disk_block_events - block and flush disk event checking
1720 * @disk: disk to block events for
1721 *
1722 * On return from this function, it is guaranteed that event checking
1723 * isn't in progress and won't happen until unblocked by
1724 * disk_unblock_events(). Events blocking is counted and the actual
1725 * unblocking happens after the matching number of unblocks are done.
1726 *
1727 * Note that this intentionally does not block event checking from
1728 * disk_clear_events().
1729 *
1730 * CONTEXT:
1731 * Might sleep.
1732 */
1733void disk_block_events(struct gendisk *disk)
77ea887e
TH
1734{
1735 struct disk_events *ev = disk->ev;
1736 unsigned long flags;
1737 bool cancel;
1738
c3af54af
TH
1739 if (!ev)
1740 return;
1741
fdd514e1
TH
1742 /*
1743 * Outer mutex ensures that the first blocker completes canceling
1744 * the event work before further blockers are allowed to finish.
1745 */
1746 mutex_lock(&ev->block_mutex);
1747
77ea887e
TH
1748 spin_lock_irqsave(&ev->lock, flags);
1749 cancel = !ev->block++;
1750 spin_unlock_irqrestore(&ev->lock, flags);
1751
c3af54af
TH
1752 if (cancel)
1753 cancel_delayed_work_sync(&disk->ev->dwork);
fdd514e1
TH
1754
1755 mutex_unlock(&ev->block_mutex);
77ea887e
TH
1756}
1757
1758static void __disk_unblock_events(struct gendisk *disk, bool check_now)
1759{
1760 struct disk_events *ev = disk->ev;
1761 unsigned long intv;
1762 unsigned long flags;
1763
1764 spin_lock_irqsave(&ev->lock, flags);
1765
1766 if (WARN_ON_ONCE(ev->block <= 0))
1767 goto out_unlock;
1768
1769 if (--ev->block)
1770 goto out_unlock;
1771
77ea887e 1772 intv = disk_events_poll_jiffies(disk);
77ea887e 1773 if (check_now)
695588f9
VK
1774 queue_delayed_work(system_freezable_power_efficient_wq,
1775 &ev->dwork, 0);
77ea887e 1776 else if (intv)
695588f9
VK
1777 queue_delayed_work(system_freezable_power_efficient_wq,
1778 &ev->dwork, intv);
77ea887e
TH
1779out_unlock:
1780 spin_unlock_irqrestore(&ev->lock, flags);
1781}
1782
77ea887e
TH
1783/**
1784 * disk_unblock_events - unblock disk event checking
1785 * @disk: disk to unblock events for
1786 *
1787 * Undo disk_block_events(). When the block count reaches zero, it
1788 * starts events polling if configured.
1789 *
1790 * CONTEXT:
1791 * Don't care. Safe to call from irq context.
1792 */
1793void disk_unblock_events(struct gendisk *disk)
1794{
1795 if (disk->ev)
facc31dd 1796 __disk_unblock_events(disk, false);
77ea887e
TH
1797}
1798
1799/**
85ef06d1
TH
1800 * disk_flush_events - schedule immediate event checking and flushing
1801 * @disk: disk to check and flush events for
1802 * @mask: events to flush
77ea887e 1803 *
85ef06d1
TH
1804 * Schedule immediate event checking on @disk if not blocked. Events in
1805 * @mask are scheduled to be cleared from the driver. Note that this
1806 * doesn't clear the events from @disk->ev.
77ea887e
TH
1807 *
1808 * CONTEXT:
85ef06d1 1809 * If @mask is non-zero must be called with bdev->bd_mutex held.
77ea887e 1810 */
85ef06d1 1811void disk_flush_events(struct gendisk *disk, unsigned int mask)
77ea887e 1812{
a9dce2a3 1813 struct disk_events *ev = disk->ev;
a9dce2a3
TH
1814
1815 if (!ev)
1816 return;
1817
85ef06d1
TH
1818 spin_lock_irq(&ev->lock);
1819 ev->clearing |= mask;
41f63c53 1820 if (!ev->block)
695588f9
VK
1821 mod_delayed_work(system_freezable_power_efficient_wq,
1822 &ev->dwork, 0);
85ef06d1 1823 spin_unlock_irq(&ev->lock);
77ea887e 1824}
77ea887e
TH
1825
1826/**
1827 * disk_clear_events - synchronously check, clear and return pending events
1828 * @disk: disk to fetch and clear events from
da3dae54 1829 * @mask: mask of events to be fetched and cleared
77ea887e
TH
1830 *
1831 * Disk events are synchronously checked and pending events in @mask
1832 * are cleared and returned. This ignores the block count.
1833 *
1834 * CONTEXT:
1835 * Might sleep.
1836 */
95f6f3a4 1837static unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
77ea887e 1838{
77ea887e
TH
1839 struct disk_events *ev = disk->ev;
1840 unsigned int pending;
12c2bdb2 1841 unsigned int clearing = mask;
77ea887e 1842
a564e23f 1843 if (!ev)
77ea887e 1844 return 0;
77ea887e 1845
12c2bdb2
DB
1846 disk_block_events(disk);
1847
1848 /*
1849 * store the union of mask and ev->clearing on the stack so that the
1850 * race with disk_flush_events does not cause ambiguity (ev->clearing
1851 * can still be modified even if events are blocked).
1852 */
77ea887e 1853 spin_lock_irq(&ev->lock);
12c2bdb2
DB
1854 clearing |= ev->clearing;
1855 ev->clearing = 0;
77ea887e
TH
1856 spin_unlock_irq(&ev->lock);
1857
12c2bdb2 1858 disk_check_events(ev, &clearing);
aea24a8b 1859 /*
12c2bdb2
DB
1860 * if ev->clearing is not 0, the disk_flush_events got called in the
1861 * middle of this function, so we want to run the workfn without delay.
aea24a8b 1862 */
12c2bdb2 1863 __disk_unblock_events(disk, ev->clearing ? true : false);
77ea887e
TH
1864
1865 /* then, fetch and clear pending events */
1866 spin_lock_irq(&ev->lock);
77ea887e
TH
1867 pending = ev->pending & mask;
1868 ev->pending &= ~mask;
1869 spin_unlock_irq(&ev->lock);
12c2bdb2 1870 WARN_ON_ONCE(clearing & mask);
77ea887e
TH
1871
1872 return pending;
1873}
1874
95f6f3a4
CH
1875/**
1876 * bdev_check_media_change - check if a removable media has been changed
1877 * @bdev: block device to check
1878 *
1879 * Check whether a removable media has been changed, and attempt to free all
1880 * dentries and inodes and invalidates all block device page cache entries in
1881 * that case.
1882 *
1883 * Returns %true if the block device changed, or %false if not.
1884 */
1885bool bdev_check_media_change(struct block_device *bdev)
1886{
1887 unsigned int events;
1888
1889 events = disk_clear_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE |
1890 DISK_EVENT_EJECT_REQUEST);
1891 if (!(events & DISK_EVENT_MEDIA_CHANGE))
1892 return false;
1893
1894 if (__invalidate_device(bdev, true))
1895 pr_warn("VFS: busy inodes on changed media %s\n",
1896 bdev->bd_disk->disk_name);
38430f08 1897 set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
95f6f3a4
CH
1898 return true;
1899}
1900EXPORT_SYMBOL(bdev_check_media_change);
1901
12c2bdb2
DB
1902/*
1903 * Separate this part out so that a different pointer for clearing_ptr can be
1904 * passed in for disk_clear_events.
1905 */
77ea887e
TH
1906static void disk_events_workfn(struct work_struct *work)
1907{
1908 struct delayed_work *dwork = to_delayed_work(work);
1909 struct disk_events *ev = container_of(dwork, struct disk_events, dwork);
12c2bdb2
DB
1910
1911 disk_check_events(ev, &ev->clearing);
1912}
1913
1914static void disk_check_events(struct disk_events *ev,
1915 unsigned int *clearing_ptr)
1916{
77ea887e
TH
1917 struct gendisk *disk = ev->disk;
1918 char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
12c2bdb2 1919 unsigned int clearing = *clearing_ptr;
77ea887e
TH
1920 unsigned int events;
1921 unsigned long intv;
1922 int nr_events = 0, i;
1923
1924 /* check events */
1925 events = disk->fops->check_events(disk, clearing);
1926
1927 /* accumulate pending events and schedule next poll if necessary */
1928 spin_lock_irq(&ev->lock);
1929
1930 events &= ~ev->pending;
1931 ev->pending |= events;
12c2bdb2 1932 *clearing_ptr &= ~clearing;
77ea887e
TH
1933
1934 intv = disk_events_poll_jiffies(disk);
1935 if (!ev->block && intv)
695588f9
VK
1936 queue_delayed_work(system_freezable_power_efficient_wq,
1937 &ev->dwork, intv);
77ea887e
TH
1938
1939 spin_unlock_irq(&ev->lock);
1940
7c88a168
TH
1941 /*
1942 * Tell userland about new events. Only the events listed in
c92e2f04
MW
1943 * @disk->events are reported, and only if DISK_EVENT_FLAG_UEVENT
1944 * is set. Otherwise, events are processed internally but never
1945 * get reported to userland.
7c88a168 1946 */
77ea887e 1947 for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
c92e2f04
MW
1948 if ((events & disk->events & (1 << i)) &&
1949 (disk->event_flags & DISK_EVENT_FLAG_UEVENT))
77ea887e
TH
1950 envp[nr_events++] = disk_uevents[i];
1951
1952 if (nr_events)
1953 kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
1954}
1955
1956/*
1957 * A disk events enabled device has the following sysfs nodes under
1958 * its /sys/block/X/ directory.
1959 *
1960 * events : list of all supported events
1961 * events_async : list of events which can be detected w/o polling
673387a9 1962 * (always empty, only for backwards compatibility)
77ea887e
TH
1963 * events_poll_msecs : polling interval, 0: disable, -1: system default
1964 */
1965static ssize_t __disk_events_show(unsigned int events, char *buf)
1966{
1967 const char *delim = "";
1968 ssize_t pos = 0;
1969 int i;
1970
1971 for (i = 0; i < ARRAY_SIZE(disk_events_strs); i++)
1972 if (events & (1 << i)) {
1973 pos += sprintf(buf + pos, "%s%s",
1974 delim, disk_events_strs[i]);
1975 delim = " ";
1976 }
1977 if (pos)
1978 pos += sprintf(buf + pos, "\n");
1979 return pos;
1980}
1981
1982static ssize_t disk_events_show(struct device *dev,
1983 struct device_attribute *attr, char *buf)
1984{
1985 struct gendisk *disk = dev_to_disk(dev);
1986
c92e2f04
MW
1987 if (!(disk->event_flags & DISK_EVENT_FLAG_UEVENT))
1988 return 0;
1989
77ea887e
TH
1990 return __disk_events_show(disk->events, buf);
1991}
1992
1993static ssize_t disk_events_async_show(struct device *dev,
1994 struct device_attribute *attr, char *buf)
1995{
673387a9 1996 return 0;
77ea887e
TH
1997}
1998
1999static ssize_t disk_events_poll_msecs_show(struct device *dev,
2000 struct device_attribute *attr,
2001 char *buf)
2002{
2003 struct gendisk *disk = dev_to_disk(dev);
2004
cdf3e3de
MW
2005 if (!disk->ev)
2006 return sprintf(buf, "-1\n");
2007
77ea887e
TH
2008 return sprintf(buf, "%ld\n", disk->ev->poll_msecs);
2009}
2010
2011static ssize_t disk_events_poll_msecs_store(struct device *dev,
2012 struct device_attribute *attr,
2013 const char *buf, size_t count)
2014{
2015 struct gendisk *disk = dev_to_disk(dev);
2016 long intv;
2017
2018 if (!count || !sscanf(buf, "%ld", &intv))
2019 return -EINVAL;
2020
2021 if (intv < 0 && intv != -1)
2022 return -EINVAL;
2023
cdf3e3de
MW
2024 if (!disk->ev)
2025 return -ENODEV;
2026
c3af54af 2027 disk_block_events(disk);
77ea887e
TH
2028 disk->ev->poll_msecs = intv;
2029 __disk_unblock_events(disk, true);
2030
2031 return count;
2032}
2033
5657a819
JP
2034static const DEVICE_ATTR(events, 0444, disk_events_show, NULL);
2035static const DEVICE_ATTR(events_async, 0444, disk_events_async_show, NULL);
2036static const DEVICE_ATTR(events_poll_msecs, 0644,
77ea887e
TH
2037 disk_events_poll_msecs_show,
2038 disk_events_poll_msecs_store);
2039
2040static const struct attribute *disk_events_attrs[] = {
2041 &dev_attr_events.attr,
2042 &dev_attr_events_async.attr,
2043 &dev_attr_events_poll_msecs.attr,
2044 NULL,
2045};
2046
2047/*
2048 * The default polling interval can be specified by the kernel
2049 * parameter block.events_dfl_poll_msecs which defaults to 0
2050 * (disable). This can also be modified runtime by writing to
1624b0b2 2051 * /sys/module/block/parameters/events_dfl_poll_msecs.
77ea887e
TH
2052 */
2053static int disk_events_set_dfl_poll_msecs(const char *val,
2054 const struct kernel_param *kp)
2055{
2056 struct disk_events *ev;
2057 int ret;
2058
2059 ret = param_set_ulong(val, kp);
2060 if (ret < 0)
2061 return ret;
2062
2063 mutex_lock(&disk_events_mutex);
2064
2065 list_for_each_entry(ev, &disk_events, node)
85ef06d1 2066 disk_flush_events(ev->disk, 0);
77ea887e
TH
2067
2068 mutex_unlock(&disk_events_mutex);
2069
2070 return 0;
2071}
2072
2073static const struct kernel_param_ops disk_events_dfl_poll_msecs_param_ops = {
2074 .set = disk_events_set_dfl_poll_msecs,
2075 .get = param_get_ulong,
2076};
2077
2078#undef MODULE_PARAM_PREFIX
2079#define MODULE_PARAM_PREFIX "block."
2080
2081module_param_cb(events_dfl_poll_msecs, &disk_events_dfl_poll_msecs_param_ops,
2082 &disk_events_dfl_poll_msecs, 0644);
2083
2084/*
9f53d2fe 2085 * disk_{alloc|add|del|release}_events - initialize and destroy disk_events.
77ea887e 2086 */
9f53d2fe 2087static void disk_alloc_events(struct gendisk *disk)
77ea887e
TH
2088{
2089 struct disk_events *ev;
2090
cdf3e3de 2091 if (!disk->fops->check_events || !disk->events)
77ea887e
TH
2092 return;
2093
2094 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
2095 if (!ev) {
2096 pr_warn("%s: failed to initialize events\n", disk->disk_name);
2097 return;
2098 }
2099
77ea887e
TH
2100 INIT_LIST_HEAD(&ev->node);
2101 ev->disk = disk;
2102 spin_lock_init(&ev->lock);
fdd514e1 2103 mutex_init(&ev->block_mutex);
77ea887e
TH
2104 ev->block = 1;
2105 ev->poll_msecs = -1;
2106 INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
2107
9f53d2fe
SG
2108 disk->ev = ev;
2109}
2110
2111static void disk_add_events(struct gendisk *disk)
2112{
9f53d2fe
SG
2113 /* FIXME: error handling */
2114 if (sysfs_create_files(&disk_to_dev(disk)->kobj, disk_events_attrs) < 0)
2115 pr_warn("%s: failed to create sysfs files for events\n",
2116 disk->disk_name);
2117
cdf3e3de
MW
2118 if (!disk->ev)
2119 return;
2120
77ea887e 2121 mutex_lock(&disk_events_mutex);
9f53d2fe 2122 list_add_tail(&disk->ev->node, &disk_events);
77ea887e
TH
2123 mutex_unlock(&disk_events_mutex);
2124
2125 /*
2126 * Block count is initialized to 1 and the following initial
2127 * unblock kicks it into action.
2128 */
2129 __disk_unblock_events(disk, true);
2130}
2131
2132static void disk_del_events(struct gendisk *disk)
2133{
cdf3e3de
MW
2134 if (disk->ev) {
2135 disk_block_events(disk);
77ea887e 2136
cdf3e3de
MW
2137 mutex_lock(&disk_events_mutex);
2138 list_del_init(&disk->ev->node);
2139 mutex_unlock(&disk_events_mutex);
2140 }
77ea887e
TH
2141
2142 sysfs_remove_files(&disk_to_dev(disk)->kobj, disk_events_attrs);
2143}
2144
2145static void disk_release_events(struct gendisk *disk)
2146{
2147 /* the block count should be 1 from disk_del_events() */
2148 WARN_ON_ONCE(disk->ev && disk->ev->block != 1);
2149 kfree(disk->ev);
2150}