1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017-2018 Intel Corporation. All rights reserved. */
3 #include <linux/memremap.h>
4 #include <linux/device.h>
5 #include <linux/mutex.h>
6 #include <linux/list.h>
7 #include <linux/slab.h>
10 #include "dax-private.h"
13 static struct class *dax_class
;
15 static DEFINE_MUTEX(dax_bus_lock
);
17 #define DAX_NAME_LEN 30
19 struct list_head list
;
20 char dev_name
[DAX_NAME_LEN
];
23 static int dax_bus_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
26 * We only ever expect to handle device-dax instances, i.e. the
27 * @type argument to MODULE_ALIAS_DAX_DEVICE() is always zero
29 return add_uevent_var(env
, "MODALIAS=" DAX_DEVICE_MODALIAS_FMT
, 0);
32 static struct dax_device_driver
*to_dax_drv(struct device_driver
*drv
)
34 return container_of(drv
, struct dax_device_driver
, drv
);
37 static struct dax_id
*__dax_match_id(struct dax_device_driver
*dax_drv
,
40 struct dax_id
*dax_id
;
42 lockdep_assert_held(&dax_bus_lock
);
44 list_for_each_entry(dax_id
, &dax_drv
->ids
, list
)
45 if (sysfs_streq(dax_id
->dev_name
, dev_name
))
50 static int dax_match_id(struct dax_device_driver
*dax_drv
, struct device
*dev
)
54 mutex_lock(&dax_bus_lock
);
55 match
= !!__dax_match_id(dax_drv
, dev_name(dev
));
56 mutex_unlock(&dax_bus_lock
);
66 static ssize_t
do_id_store(struct device_driver
*drv
, const char *buf
,
67 size_t count
, enum id_action action
)
69 struct dax_device_driver
*dax_drv
= to_dax_drv(drv
);
70 unsigned int region_id
, id
;
71 char devname
[DAX_NAME_LEN
];
72 struct dax_id
*dax_id
;
76 fields
= sscanf(buf
, "dax%d.%d", ®ion_id
, &id
);
79 sprintf(devname
, "dax%d.%d", region_id
, id
);
80 if (!sysfs_streq(buf
, devname
))
83 mutex_lock(&dax_bus_lock
);
84 dax_id
= __dax_match_id(dax_drv
, buf
);
86 if (action
== ID_ADD
) {
87 dax_id
= kzalloc(sizeof(*dax_id
), GFP_KERNEL
);
89 strncpy(dax_id
->dev_name
, buf
, DAX_NAME_LEN
);
90 list_add(&dax_id
->list
, &dax_drv
->ids
);
94 } else if (action
== ID_REMOVE
) {
95 list_del(&dax_id
->list
);
98 mutex_unlock(&dax_bus_lock
);
102 if (action
== ID_ADD
)
103 rc
= driver_attach(drv
);
109 static ssize_t
new_id_store(struct device_driver
*drv
, const char *buf
,
112 return do_id_store(drv
, buf
, count
, ID_ADD
);
114 static DRIVER_ATTR_WO(new_id
);
116 static ssize_t
remove_id_store(struct device_driver
*drv
, const char *buf
,
119 return do_id_store(drv
, buf
, count
, ID_REMOVE
);
121 static DRIVER_ATTR_WO(remove_id
);
123 static struct attribute
*dax_drv_attrs
[] = {
124 &driver_attr_new_id
.attr
,
125 &driver_attr_remove_id
.attr
,
128 ATTRIBUTE_GROUPS(dax_drv
);
130 static int dax_bus_match(struct device
*dev
, struct device_driver
*drv
);
132 static bool is_static(struct dax_region
*dax_region
)
134 return (dax_region
->res
.flags
& IORESOURCE_DAX_STATIC
) != 0;
137 static u64
dev_dax_size(struct dev_dax
*dev_dax
)
142 device_lock_assert(&dev_dax
->dev
);
144 for (i
= 0; i
< dev_dax
->nr_range
; i
++)
145 size
+= range_len(&dev_dax
->ranges
[i
].range
);
150 static int dax_bus_probe(struct device
*dev
)
152 struct dax_device_driver
*dax_drv
= to_dax_drv(dev
->driver
);
153 struct dev_dax
*dev_dax
= to_dev_dax(dev
);
154 struct dax_region
*dax_region
= dev_dax
->region
;
157 if (dev_dax_size(dev_dax
) == 0 || dev_dax
->id
< 0)
160 rc
= dax_drv
->probe(dev_dax
);
162 if (rc
|| is_static(dax_region
))
166 * Track new seed creation only after successful probe of the
169 if (dax_region
->seed
== dev
)
170 dax_region
->seed
= NULL
;
175 static int dax_bus_remove(struct device
*dev
)
177 struct dax_device_driver
*dax_drv
= to_dax_drv(dev
->driver
);
178 struct dev_dax
*dev_dax
= to_dev_dax(dev
);
181 dax_drv
->remove(dev_dax
);
186 static struct bus_type dax_bus_type
= {
188 .uevent
= dax_bus_uevent
,
189 .match
= dax_bus_match
,
190 .probe
= dax_bus_probe
,
191 .remove
= dax_bus_remove
,
192 .drv_groups
= dax_drv_groups
,
195 static int dax_bus_match(struct device
*dev
, struct device_driver
*drv
)
197 struct dax_device_driver
*dax_drv
= to_dax_drv(drv
);
200 * All but the 'device-dax' driver, which has 'match_always'
201 * set, requires an exact id match.
203 if (dax_drv
->match_always
)
206 return dax_match_id(dax_drv
, dev
);
210 * Rely on the fact that drvdata is set before the attributes are
211 * registered, and that the attributes are unregistered before drvdata
212 * is cleared to assume that drvdata is always valid.
214 static ssize_t
id_show(struct device
*dev
,
215 struct device_attribute
*attr
, char *buf
)
217 struct dax_region
*dax_region
= dev_get_drvdata(dev
);
219 return sprintf(buf
, "%d\n", dax_region
->id
);
221 static DEVICE_ATTR_RO(id
);
223 static ssize_t
region_size_show(struct device
*dev
,
224 struct device_attribute
*attr
, char *buf
)
226 struct dax_region
*dax_region
= dev_get_drvdata(dev
);
228 return sprintf(buf
, "%llu\n", (unsigned long long)
229 resource_size(&dax_region
->res
));
231 static struct device_attribute dev_attr_region_size
= __ATTR(size
, 0444,
232 region_size_show
, NULL
);
234 static ssize_t
region_align_show(struct device
*dev
,
235 struct device_attribute
*attr
, char *buf
)
237 struct dax_region
*dax_region
= dev_get_drvdata(dev
);
239 return sprintf(buf
, "%u\n", dax_region
->align
);
241 static struct device_attribute dev_attr_region_align
=
242 __ATTR(align
, 0400, region_align_show
, NULL
);
244 #define for_each_dax_region_resource(dax_region, res) \
245 for (res = (dax_region)->res.child; res; res = res->sibling)
247 static unsigned long long dax_region_avail_size(struct dax_region
*dax_region
)
249 resource_size_t size
= resource_size(&dax_region
->res
);
250 struct resource
*res
;
252 device_lock_assert(dax_region
->dev
);
254 for_each_dax_region_resource(dax_region
, res
)
255 size
-= resource_size(res
);
259 static ssize_t
available_size_show(struct device
*dev
,
260 struct device_attribute
*attr
, char *buf
)
262 struct dax_region
*dax_region
= dev_get_drvdata(dev
);
263 unsigned long long size
;
266 size
= dax_region_avail_size(dax_region
);
269 return sprintf(buf
, "%llu\n", size
);
271 static DEVICE_ATTR_RO(available_size
);
273 static ssize_t
seed_show(struct device
*dev
,
274 struct device_attribute
*attr
, char *buf
)
276 struct dax_region
*dax_region
= dev_get_drvdata(dev
);
280 if (is_static(dax_region
))
284 seed
= dax_region
->seed
;
285 rc
= sprintf(buf
, "%s\n", seed
? dev_name(seed
) : "");
290 static DEVICE_ATTR_RO(seed
);
292 static ssize_t
create_show(struct device
*dev
,
293 struct device_attribute
*attr
, char *buf
)
295 struct dax_region
*dax_region
= dev_get_drvdata(dev
);
296 struct device
*youngest
;
299 if (is_static(dax_region
))
303 youngest
= dax_region
->youngest
;
304 rc
= sprintf(buf
, "%s\n", youngest
? dev_name(youngest
) : "");
310 static ssize_t
create_store(struct device
*dev
, struct device_attribute
*attr
,
311 const char *buf
, size_t len
)
313 struct dax_region
*dax_region
= dev_get_drvdata(dev
);
314 unsigned long long avail
;
318 if (is_static(dax_region
))
321 rc
= kstrtoint(buf
, 0, &val
);
328 avail
= dax_region_avail_size(dax_region
);
332 struct dev_dax_data data
= {
333 .dax_region
= dax_region
,
337 struct dev_dax
*dev_dax
= devm_create_dev_dax(&data
);
340 rc
= PTR_ERR(dev_dax
);
343 * In support of crafting multiple new devices
344 * simultaneously multiple seeds can be created,
345 * but only the first one that has not been
346 * successfully bound is tracked as the region
349 if (!dax_region
->seed
)
350 dax_region
->seed
= &dev_dax
->dev
;
351 dax_region
->youngest
= &dev_dax
->dev
;
359 static DEVICE_ATTR_RW(create
);
361 void kill_dev_dax(struct dev_dax
*dev_dax
)
363 struct dax_device
*dax_dev
= dev_dax
->dax_dev
;
364 struct inode
*inode
= dax_inode(dax_dev
);
367 unmap_mapping_range(inode
->i_mapping
, 0, 0, 1);
369 EXPORT_SYMBOL_GPL(kill_dev_dax
);
371 static void trim_dev_dax_range(struct dev_dax
*dev_dax
)
373 int i
= dev_dax
->nr_range
- 1;
374 struct range
*range
= &dev_dax
->ranges
[i
].range
;
375 struct dax_region
*dax_region
= dev_dax
->region
;
377 device_lock_assert(dax_region
->dev
);
378 dev_dbg(&dev_dax
->dev
, "delete range[%d]: %#llx:%#llx\n", i
,
379 (unsigned long long)range
->start
,
380 (unsigned long long)range
->end
);
382 __release_region(&dax_region
->res
, range
->start
, range_len(range
));
383 if (--dev_dax
->nr_range
== 0) {
384 kfree(dev_dax
->ranges
);
385 dev_dax
->ranges
= NULL
;
389 static void free_dev_dax_ranges(struct dev_dax
*dev_dax
)
391 while (dev_dax
->nr_range
)
392 trim_dev_dax_range(dev_dax
);
395 static void unregister_dev_dax(void *dev
)
397 struct dev_dax
*dev_dax
= to_dev_dax(dev
);
399 dev_dbg(dev
, "%s\n", __func__
);
401 kill_dev_dax(dev_dax
);
402 free_dev_dax_ranges(dev_dax
);
407 /* a return value >= 0 indicates this invocation invalidated the id */
408 static int __free_dev_dax_id(struct dev_dax
*dev_dax
)
410 struct dax_region
*dax_region
= dev_dax
->region
;
411 struct device
*dev
= &dev_dax
->dev
;
412 int rc
= dev_dax
->id
;
414 device_lock_assert(dev
);
416 if (is_static(dax_region
) || dev_dax
->id
< 0)
418 ida_free(&dax_region
->ida
, dev_dax
->id
);
423 static int free_dev_dax_id(struct dev_dax
*dev_dax
)
425 struct device
*dev
= &dev_dax
->dev
;
429 rc
= __free_dev_dax_id(dev_dax
);
434 static ssize_t
delete_store(struct device
*dev
, struct device_attribute
*attr
,
435 const char *buf
, size_t len
)
437 struct dax_region
*dax_region
= dev_get_drvdata(dev
);
438 struct dev_dax
*dev_dax
;
439 struct device
*victim
;
443 if (is_static(dax_region
))
446 victim
= device_find_child_by_name(dax_region
->dev
, buf
);
452 dev_dax
= to_dev_dax(victim
);
453 if (victim
->driver
|| dev_dax_size(dev_dax
))
457 * Invalidate the device so it does not become active
458 * again, but always preserve device-id-0 so that
459 * /sys/bus/dax/ is guaranteed to be populated while any
460 * dax_region is registered.
462 if (dev_dax
->id
> 0) {
463 do_del
= __free_dev_dax_id(dev_dax
) >= 0;
465 if (dax_region
->seed
== victim
)
466 dax_region
->seed
= NULL
;
467 if (dax_region
->youngest
== victim
)
468 dax_region
->youngest
= NULL
;
472 device_unlock(victim
);
474 /* won the race to invalidate the device, clean it up */
476 devm_release_action(dev
, unregister_dev_dax
, victim
);
482 static DEVICE_ATTR_WO(delete);
484 static umode_t
dax_region_visible(struct kobject
*kobj
, struct attribute
*a
,
487 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
488 struct dax_region
*dax_region
= dev_get_drvdata(dev
);
490 if (is_static(dax_region
))
491 if (a
== &dev_attr_available_size
.attr
492 || a
== &dev_attr_create
.attr
493 || a
== &dev_attr_seed
.attr
494 || a
== &dev_attr_delete
.attr
)
499 static struct attribute
*dax_region_attributes
[] = {
500 &dev_attr_available_size
.attr
,
501 &dev_attr_region_size
.attr
,
502 &dev_attr_region_align
.attr
,
503 &dev_attr_create
.attr
,
505 &dev_attr_delete
.attr
,
510 static const struct attribute_group dax_region_attribute_group
= {
511 .name
= "dax_region",
512 .attrs
= dax_region_attributes
,
513 .is_visible
= dax_region_visible
,
516 static const struct attribute_group
*dax_region_attribute_groups
[] = {
517 &dax_region_attribute_group
,
521 static void dax_region_free(struct kref
*kref
)
523 struct dax_region
*dax_region
;
525 dax_region
= container_of(kref
, struct dax_region
, kref
);
529 void dax_region_put(struct dax_region
*dax_region
)
531 kref_put(&dax_region
->kref
, dax_region_free
);
533 EXPORT_SYMBOL_GPL(dax_region_put
);
535 static void dax_region_unregister(void *region
)
537 struct dax_region
*dax_region
= region
;
539 sysfs_remove_groups(&dax_region
->dev
->kobj
,
540 dax_region_attribute_groups
);
541 dax_region_put(dax_region
);
544 struct dax_region
*alloc_dax_region(struct device
*parent
, int region_id
,
545 struct range
*range
, int target_node
, unsigned int align
,
548 struct dax_region
*dax_region
;
551 * The DAX core assumes that it can store its private data in
552 * parent->driver_data. This WARN is a reminder / safeguard for
553 * developers of device-dax drivers.
555 if (dev_get_drvdata(parent
)) {
556 dev_WARN(parent
, "dax core failed to setup private data\n");
560 if (!IS_ALIGNED(range
->start
, align
)
561 || !IS_ALIGNED(range_len(range
), align
))
564 dax_region
= kzalloc(sizeof(*dax_region
), GFP_KERNEL
);
568 dev_set_drvdata(parent
, dax_region
);
569 kref_init(&dax_region
->kref
);
570 dax_region
->id
= region_id
;
571 dax_region
->align
= align
;
572 dax_region
->dev
= parent
;
573 dax_region
->target_node
= target_node
;
574 ida_init(&dax_region
->ida
);
575 dax_region
->res
= (struct resource
) {
576 .start
= range
->start
,
578 .flags
= IORESOURCE_MEM
| flags
,
581 if (sysfs_create_groups(&parent
->kobj
, dax_region_attribute_groups
)) {
586 kref_get(&dax_region
->kref
);
587 if (devm_add_action_or_reset(parent
, dax_region_unregister
, dax_region
))
591 EXPORT_SYMBOL_GPL(alloc_dax_region
);
593 static void dax_mapping_release(struct device
*dev
)
595 struct dax_mapping
*mapping
= to_dax_mapping(dev
);
596 struct dev_dax
*dev_dax
= to_dev_dax(dev
->parent
);
598 ida_free(&dev_dax
->ida
, mapping
->id
);
602 static void unregister_dax_mapping(void *data
)
604 struct device
*dev
= data
;
605 struct dax_mapping
*mapping
= to_dax_mapping(dev
);
606 struct dev_dax
*dev_dax
= to_dev_dax(dev
->parent
);
607 struct dax_region
*dax_region
= dev_dax
->region
;
609 dev_dbg(dev
, "%s\n", __func__
);
611 device_lock_assert(dax_region
->dev
);
613 dev_dax
->ranges
[mapping
->range_id
].mapping
= NULL
;
614 mapping
->range_id
= -1;
620 static struct dev_dax_range
*get_dax_range(struct device
*dev
)
622 struct dax_mapping
*mapping
= to_dax_mapping(dev
);
623 struct dev_dax
*dev_dax
= to_dev_dax(dev
->parent
);
624 struct dax_region
*dax_region
= dev_dax
->region
;
626 device_lock(dax_region
->dev
);
627 if (mapping
->range_id
< 0) {
628 device_unlock(dax_region
->dev
);
632 return &dev_dax
->ranges
[mapping
->range_id
];
635 static void put_dax_range(struct dev_dax_range
*dax_range
)
637 struct dax_mapping
*mapping
= dax_range
->mapping
;
638 struct dev_dax
*dev_dax
= to_dev_dax(mapping
->dev
.parent
);
639 struct dax_region
*dax_region
= dev_dax
->region
;
641 device_unlock(dax_region
->dev
);
644 static ssize_t
start_show(struct device
*dev
,
645 struct device_attribute
*attr
, char *buf
)
647 struct dev_dax_range
*dax_range
;
650 dax_range
= get_dax_range(dev
);
653 rc
= sprintf(buf
, "%#llx\n", dax_range
->range
.start
);
654 put_dax_range(dax_range
);
658 static DEVICE_ATTR(start
, 0400, start_show
, NULL
);
660 static ssize_t
end_show(struct device
*dev
,
661 struct device_attribute
*attr
, char *buf
)
663 struct dev_dax_range
*dax_range
;
666 dax_range
= get_dax_range(dev
);
669 rc
= sprintf(buf
, "%#llx\n", dax_range
->range
.end
);
670 put_dax_range(dax_range
);
674 static DEVICE_ATTR(end
, 0400, end_show
, NULL
);
676 static ssize_t
pgoff_show(struct device
*dev
,
677 struct device_attribute
*attr
, char *buf
)
679 struct dev_dax_range
*dax_range
;
682 dax_range
= get_dax_range(dev
);
685 rc
= sprintf(buf
, "%#lx\n", dax_range
->pgoff
);
686 put_dax_range(dax_range
);
690 static DEVICE_ATTR(page_offset
, 0400, pgoff_show
, NULL
);
692 static struct attribute
*dax_mapping_attributes
[] = {
693 &dev_attr_start
.attr
,
695 &dev_attr_page_offset
.attr
,
699 static const struct attribute_group dax_mapping_attribute_group
= {
700 .attrs
= dax_mapping_attributes
,
703 static const struct attribute_group
*dax_mapping_attribute_groups
[] = {
704 &dax_mapping_attribute_group
,
708 static struct device_type dax_mapping_type
= {
709 .release
= dax_mapping_release
,
710 .groups
= dax_mapping_attribute_groups
,
713 static int devm_register_dax_mapping(struct dev_dax
*dev_dax
, int range_id
)
715 struct dax_region
*dax_region
= dev_dax
->region
;
716 struct dax_mapping
*mapping
;
720 device_lock_assert(dax_region
->dev
);
722 if (dev_WARN_ONCE(&dev_dax
->dev
, !dax_region
->dev
->driver
,
723 "region disabled\n"))
726 mapping
= kzalloc(sizeof(*mapping
), GFP_KERNEL
);
729 mapping
->range_id
= range_id
;
730 mapping
->id
= ida_alloc(&dev_dax
->ida
, GFP_KERNEL
);
731 if (mapping
->id
< 0) {
735 dev_dax
->ranges
[range_id
].mapping
= mapping
;
737 device_initialize(dev
);
738 dev
->parent
= &dev_dax
->dev
;
739 dev
->type
= &dax_mapping_type
;
740 dev_set_name(dev
, "mapping%d", mapping
->id
);
741 rc
= device_add(dev
);
747 rc
= devm_add_action_or_reset(dax_region
->dev
, unregister_dax_mapping
,
754 static int alloc_dev_dax_range(struct dev_dax
*dev_dax
, u64 start
,
755 resource_size_t size
)
757 struct dax_region
*dax_region
= dev_dax
->region
;
758 struct resource
*res
= &dax_region
->res
;
759 struct device
*dev
= &dev_dax
->dev
;
760 struct dev_dax_range
*ranges
;
761 unsigned long pgoff
= 0;
762 struct resource
*alloc
;
765 device_lock_assert(dax_region
->dev
);
767 /* handle the seed alloc special case */
769 if (dev_WARN_ONCE(dev
, dev_dax
->nr_range
,
770 "0-size allocation must be first\n"))
772 /* nr_range == 0 is elsewhere special cased as 0-size device */
776 alloc
= __request_region(res
, start
, size
, dev_name(dev
), 0);
780 ranges
= krealloc(dev_dax
->ranges
, sizeof(*ranges
)
781 * (dev_dax
->nr_range
+ 1), GFP_KERNEL
);
783 __release_region(res
, alloc
->start
, resource_size(alloc
));
787 for (i
= 0; i
< dev_dax
->nr_range
; i
++)
788 pgoff
+= PHYS_PFN(range_len(&ranges
[i
].range
));
789 dev_dax
->ranges
= ranges
;
790 ranges
[dev_dax
->nr_range
++] = (struct dev_dax_range
) {
793 .start
= alloc
->start
,
798 dev_dbg(dev
, "alloc range[%d]: %pa:%pa\n", dev_dax
->nr_range
- 1,
799 &alloc
->start
, &alloc
->end
);
801 * A dev_dax instance must be registered before mapping device
802 * children can be added. Defer to devm_create_dev_dax() to add
803 * the initial mapping device.
805 if (!device_is_registered(&dev_dax
->dev
))
808 rc
= devm_register_dax_mapping(dev_dax
, dev_dax
->nr_range
- 1);
810 trim_dev_dax_range(dev_dax
);
815 static int adjust_dev_dax_range(struct dev_dax
*dev_dax
, struct resource
*res
, resource_size_t size
)
817 int last_range
= dev_dax
->nr_range
- 1;
818 struct dev_dax_range
*dax_range
= &dev_dax
->ranges
[last_range
];
819 struct dax_region
*dax_region
= dev_dax
->region
;
820 bool is_shrink
= resource_size(res
) > size
;
821 struct range
*range
= &dax_range
->range
;
822 struct device
*dev
= &dev_dax
->dev
;
825 device_lock_assert(dax_region
->dev
);
827 if (dev_WARN_ONCE(dev
, !size
, "deletion is handled by dev_dax_shrink\n"))
830 rc
= adjust_resource(res
, range
->start
, size
);
834 *range
= (struct range
) {
835 .start
= range
->start
,
836 .end
= range
->start
+ size
- 1,
839 dev_dbg(dev
, "%s range[%d]: %#llx:%#llx\n", is_shrink
? "shrink" : "extend",
840 last_range
, (unsigned long long) range
->start
,
841 (unsigned long long) range
->end
);
846 static ssize_t
size_show(struct device
*dev
,
847 struct device_attribute
*attr
, char *buf
)
849 struct dev_dax
*dev_dax
= to_dev_dax(dev
);
850 unsigned long long size
;
853 size
= dev_dax_size(dev_dax
);
856 return sprintf(buf
, "%llu\n", size
);
859 static bool alloc_is_aligned(struct dev_dax
*dev_dax
, resource_size_t size
)
862 * The minimum mapping granularity for a device instance is a
863 * single subsection, unless the arch says otherwise.
865 return IS_ALIGNED(size
, max_t(unsigned long, dev_dax
->align
, memremap_compat_align()));
868 static int dev_dax_shrink(struct dev_dax
*dev_dax
, resource_size_t size
)
870 resource_size_t to_shrink
= dev_dax_size(dev_dax
) - size
;
871 struct dax_region
*dax_region
= dev_dax
->region
;
872 struct device
*dev
= &dev_dax
->dev
;
875 for (i
= dev_dax
->nr_range
- 1; i
>= 0; i
--) {
876 struct range
*range
= &dev_dax
->ranges
[i
].range
;
877 struct dax_mapping
*mapping
= dev_dax
->ranges
[i
].mapping
;
878 struct resource
*adjust
= NULL
, *res
;
879 resource_size_t shrink
;
881 shrink
= min_t(u64
, to_shrink
, range_len(range
));
882 if (shrink
>= range_len(range
)) {
883 devm_release_action(dax_region
->dev
,
884 unregister_dax_mapping
, &mapping
->dev
);
885 trim_dev_dax_range(dev_dax
);
892 for_each_dax_region_resource(dax_region
, res
)
893 if (strcmp(res
->name
, dev_name(dev
)) == 0
894 && res
->start
== range
->start
) {
899 if (dev_WARN_ONCE(dev
, !adjust
|| i
!= dev_dax
->nr_range
- 1,
900 "failed to find matching resource\n"))
902 return adjust_dev_dax_range(dev_dax
, adjust
, range_len(range
)
909 * Only allow adjustments that preserve the relative pgoff of existing
910 * allocations. I.e. the dev_dax->ranges array is ordered by increasing pgoff.
912 static bool adjust_ok(struct dev_dax
*dev_dax
, struct resource
*res
)
914 struct dev_dax_range
*last
;
917 if (dev_dax
->nr_range
== 0)
919 if (strcmp(res
->name
, dev_name(&dev_dax
->dev
)) != 0)
921 last
= &dev_dax
->ranges
[dev_dax
->nr_range
- 1];
922 if (last
->range
.start
!= res
->start
|| last
->range
.end
!= res
->end
)
924 for (i
= 0; i
< dev_dax
->nr_range
- 1; i
++) {
925 struct dev_dax_range
*dax_range
= &dev_dax
->ranges
[i
];
927 if (dax_range
->pgoff
> last
->pgoff
)
934 static ssize_t
dev_dax_resize(struct dax_region
*dax_region
,
935 struct dev_dax
*dev_dax
, resource_size_t size
)
937 resource_size_t avail
= dax_region_avail_size(dax_region
), to_alloc
;
938 resource_size_t dev_size
= dev_dax_size(dev_dax
);
939 struct resource
*region_res
= &dax_region
->res
;
940 struct device
*dev
= &dev_dax
->dev
;
941 struct resource
*res
, *first
;
942 resource_size_t alloc
= 0;
947 if (size
== dev_size
)
949 if (size
> dev_size
&& size
- dev_size
> avail
)
952 return dev_dax_shrink(dev_dax
, size
);
954 to_alloc
= size
- dev_size
;
955 if (dev_WARN_ONCE(dev
, !alloc_is_aligned(dev_dax
, to_alloc
),
956 "resize of %pa misaligned\n", &to_alloc
))
960 * Expand the device into the unused portion of the region. This
961 * may involve adjusting the end of an existing resource, or
962 * allocating a new resource.
965 first
= region_res
->child
;
967 return alloc_dev_dax_range(dev_dax
, dax_region
->res
.start
, to_alloc
);
970 for (res
= first
; res
; res
= res
->sibling
) {
971 struct resource
*next
= res
->sibling
;
973 /* space at the beginning of the region */
974 if (res
== first
&& res
->start
> dax_region
->res
.start
) {
975 alloc
= min(res
->start
- dax_region
->res
.start
, to_alloc
);
976 rc
= alloc_dev_dax_range(dev_dax
, dax_region
->res
.start
, alloc
);
981 /* space between allocations */
982 if (next
&& next
->start
> res
->end
+ 1)
983 alloc
= min(next
->start
- (res
->end
+ 1), to_alloc
);
985 /* space at the end of the region */
986 if (!alloc
&& !next
&& res
->end
< region_res
->end
)
987 alloc
= min(region_res
->end
- res
->end
, to_alloc
);
992 if (adjust_ok(dev_dax
, res
)) {
993 rc
= adjust_dev_dax_range(dev_dax
, res
, resource_size(res
) + alloc
);
996 rc
= alloc_dev_dax_range(dev_dax
, res
->end
+ 1, alloc
);
1007 static ssize_t
size_store(struct device
*dev
, struct device_attribute
*attr
,
1008 const char *buf
, size_t len
)
1011 unsigned long long val
;
1012 struct dev_dax
*dev_dax
= to_dev_dax(dev
);
1013 struct dax_region
*dax_region
= dev_dax
->region
;
1015 rc
= kstrtoull(buf
, 0, &val
);
1019 if (!alloc_is_aligned(dev_dax
, val
)) {
1020 dev_dbg(dev
, "%s: size: %lld misaligned\n", __func__
, val
);
1024 device_lock(dax_region
->dev
);
1025 if (!dax_region
->dev
->driver
) {
1026 device_unlock(dax_region
->dev
);
1030 rc
= dev_dax_resize(dax_region
, dev_dax
, val
);
1032 device_unlock(dax_region
->dev
);
1034 return rc
== 0 ? len
: rc
;
1036 static DEVICE_ATTR_RW(size
);
1038 static ssize_t
range_parse(const char *opt
, size_t len
, struct range
*range
)
1040 unsigned long long addr
= 0;
1041 char *start
, *end
, *str
;
1042 ssize_t rc
= -EINVAL
;
1044 str
= kstrdup(opt
, GFP_KERNEL
);
1049 start
= strsep(&end
, "-");
1053 rc
= kstrtoull(start
, 16, &addr
);
1056 range
->start
= addr
;
1058 rc
= kstrtoull(end
, 16, &addr
);
1068 static ssize_t
mapping_store(struct device
*dev
, struct device_attribute
*attr
,
1069 const char *buf
, size_t len
)
1071 struct dev_dax
*dev_dax
= to_dev_dax(dev
);
1072 struct dax_region
*dax_region
= dev_dax
->region
;
1077 rc
= range_parse(buf
, len
, &r
);
1082 device_lock(dax_region
->dev
);
1083 if (!dax_region
->dev
->driver
) {
1084 device_unlock(dax_region
->dev
);
1089 to_alloc
= range_len(&r
);
1090 if (alloc_is_aligned(dev_dax
, to_alloc
))
1091 rc
= alloc_dev_dax_range(dev_dax
, r
.start
, to_alloc
);
1093 device_unlock(dax_region
->dev
);
1095 return rc
== 0 ? len
: rc
;
1097 static DEVICE_ATTR_WO(mapping
);
1099 static ssize_t
align_show(struct device
*dev
,
1100 struct device_attribute
*attr
, char *buf
)
1102 struct dev_dax
*dev_dax
= to_dev_dax(dev
);
1104 return sprintf(buf
, "%d\n", dev_dax
->align
);
1107 static ssize_t
dev_dax_validate_align(struct dev_dax
*dev_dax
)
1109 struct device
*dev
= &dev_dax
->dev
;
1112 for (i
= 0; i
< dev_dax
->nr_range
; i
++) {
1113 size_t len
= range_len(&dev_dax
->ranges
[i
].range
);
1115 if (!alloc_is_aligned(dev_dax
, len
)) {
1116 dev_dbg(dev
, "%s: align %u invalid for range %d\n",
1117 __func__
, dev_dax
->align
, i
);
1125 static ssize_t
align_store(struct device
*dev
, struct device_attribute
*attr
,
1126 const char *buf
, size_t len
)
1128 struct dev_dax
*dev_dax
= to_dev_dax(dev
);
1129 struct dax_region
*dax_region
= dev_dax
->region
;
1130 unsigned long val
, align_save
;
1133 rc
= kstrtoul(buf
, 0, &val
);
1137 if (!dax_align_valid(val
))
1140 device_lock(dax_region
->dev
);
1141 if (!dax_region
->dev
->driver
) {
1142 device_unlock(dax_region
->dev
);
1152 align_save
= dev_dax
->align
;
1153 dev_dax
->align
= val
;
1154 rc
= dev_dax_validate_align(dev_dax
);
1156 dev_dax
->align
= align_save
;
1159 device_unlock(dax_region
->dev
);
1160 return rc
== 0 ? len
: rc
;
1162 static DEVICE_ATTR_RW(align
);
1164 static int dev_dax_target_node(struct dev_dax
*dev_dax
)
1166 struct dax_region
*dax_region
= dev_dax
->region
;
1168 return dax_region
->target_node
;
1171 static ssize_t
target_node_show(struct device
*dev
,
1172 struct device_attribute
*attr
, char *buf
)
1174 struct dev_dax
*dev_dax
= to_dev_dax(dev
);
1176 return sprintf(buf
, "%d\n", dev_dax_target_node(dev_dax
));
1178 static DEVICE_ATTR_RO(target_node
);
1180 static ssize_t
resource_show(struct device
*dev
,
1181 struct device_attribute
*attr
, char *buf
)
1183 struct dev_dax
*dev_dax
= to_dev_dax(dev
);
1184 struct dax_region
*dax_region
= dev_dax
->region
;
1185 unsigned long long start
;
1187 if (dev_dax
->nr_range
< 1)
1188 start
= dax_region
->res
.start
;
1190 start
= dev_dax
->ranges
[0].range
.start
;
1192 return sprintf(buf
, "%#llx\n", start
);
1194 static DEVICE_ATTR(resource
, 0400, resource_show
, NULL
);
1196 static ssize_t
modalias_show(struct device
*dev
, struct device_attribute
*attr
,
1200 * We only ever expect to handle device-dax instances, i.e. the
1201 * @type argument to MODULE_ALIAS_DAX_DEVICE() is always zero
1203 return sprintf(buf
, DAX_DEVICE_MODALIAS_FMT
"\n", 0);
1205 static DEVICE_ATTR_RO(modalias
);
1207 static ssize_t
numa_node_show(struct device
*dev
,
1208 struct device_attribute
*attr
, char *buf
)
1210 return sprintf(buf
, "%d\n", dev_to_node(dev
));
1212 static DEVICE_ATTR_RO(numa_node
);
1214 static umode_t
dev_dax_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
1216 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1217 struct dev_dax
*dev_dax
= to_dev_dax(dev
);
1218 struct dax_region
*dax_region
= dev_dax
->region
;
1220 if (a
== &dev_attr_target_node
.attr
&& dev_dax_target_node(dev_dax
) < 0)
1222 if (a
== &dev_attr_numa_node
.attr
&& !IS_ENABLED(CONFIG_NUMA
))
1224 if (a
== &dev_attr_mapping
.attr
&& is_static(dax_region
))
1226 if ((a
== &dev_attr_align
.attr
||
1227 a
== &dev_attr_size
.attr
) && is_static(dax_region
))
1232 static struct attribute
*dev_dax_attributes
[] = {
1233 &dev_attr_modalias
.attr
,
1234 &dev_attr_size
.attr
,
1235 &dev_attr_mapping
.attr
,
1236 &dev_attr_target_node
.attr
,
1237 &dev_attr_align
.attr
,
1238 &dev_attr_resource
.attr
,
1239 &dev_attr_numa_node
.attr
,
1243 static const struct attribute_group dev_dax_attribute_group
= {
1244 .attrs
= dev_dax_attributes
,
1245 .is_visible
= dev_dax_visible
,
1248 static const struct attribute_group
*dax_attribute_groups
[] = {
1249 &dev_dax_attribute_group
,
1253 static void dev_dax_release(struct device
*dev
)
1255 struct dev_dax
*dev_dax
= to_dev_dax(dev
);
1256 struct dax_region
*dax_region
= dev_dax
->region
;
1257 struct dax_device
*dax_dev
= dev_dax
->dax_dev
;
1260 free_dev_dax_id(dev_dax
);
1261 dax_region_put(dax_region
);
1262 kfree(dev_dax
->pgmap
);
1266 static const struct device_type dev_dax_type
= {
1267 .release
= dev_dax_release
,
1268 .groups
= dax_attribute_groups
,
1271 struct dev_dax
*devm_create_dev_dax(struct dev_dax_data
*data
)
1273 struct dax_region
*dax_region
= data
->dax_region
;
1274 struct device
*parent
= dax_region
->dev
;
1275 struct dax_device
*dax_dev
;
1276 struct dev_dax
*dev_dax
;
1277 struct inode
*inode
;
1281 dev_dax
= kzalloc(sizeof(*dev_dax
), GFP_KERNEL
);
1283 return ERR_PTR(-ENOMEM
);
1285 if (is_static(dax_region
)) {
1286 if (dev_WARN_ONCE(parent
, data
->id
< 0,
1287 "dynamic id specified to static region\n")) {
1292 dev_dax
->id
= data
->id
;
1294 if (dev_WARN_ONCE(parent
, data
->id
>= 0,
1295 "static id specified to dynamic region\n")) {
1300 rc
= ida_alloc(&dax_region
->ida
, GFP_KERNEL
);
1306 dev_dax
->region
= dax_region
;
1307 dev
= &dev_dax
->dev
;
1308 device_initialize(dev
);
1309 dev_set_name(dev
, "dax%d.%d", dax_region
->id
, dev_dax
->id
);
1311 rc
= alloc_dev_dax_range(dev_dax
, dax_region
->res
.start
, data
->size
);
1316 dev_WARN_ONCE(parent
, !is_static(dax_region
),
1317 "custom dev_pagemap requires a static dax_region\n");
1319 dev_dax
->pgmap
= kmemdup(data
->pgmap
,
1320 sizeof(struct dev_pagemap
), GFP_KERNEL
);
1321 if (!dev_dax
->pgmap
) {
1328 * No 'host' or dax_operations since there is no access to this
1329 * device outside of mmap of the resulting character device.
1331 dax_dev
= alloc_dax(dev_dax
, NULL
, NULL
, DAXDEV_F_SYNC
);
1332 if (IS_ERR(dax_dev
)) {
1333 rc
= PTR_ERR(dax_dev
);
1337 /* a device_dax instance is dead while the driver is not attached */
1340 dev_dax
->dax_dev
= dax_dev
;
1341 dev_dax
->target_node
= dax_region
->target_node
;
1342 dev_dax
->align
= dax_region
->align
;
1343 ida_init(&dev_dax
->ida
);
1344 kref_get(&dax_region
->kref
);
1346 inode
= dax_inode(dax_dev
);
1347 dev
->devt
= inode
->i_rdev
;
1348 if (data
->subsys
== DEV_DAX_BUS
)
1349 dev
->bus
= &dax_bus_type
;
1351 dev
->class = dax_class
;
1352 dev
->parent
= parent
;
1353 dev
->type
= &dev_dax_type
;
1355 rc
= device_add(dev
);
1357 kill_dev_dax(dev_dax
);
1362 rc
= devm_add_action_or_reset(dax_region
->dev
, unregister_dev_dax
, dev
);
1366 /* register mapping device for the initial allocation range */
1367 if (dev_dax
->nr_range
&& range_len(&dev_dax
->ranges
[0].range
)) {
1368 rc
= devm_register_dax_mapping(dev_dax
, 0);
1376 kfree(dev_dax
->pgmap
);
1378 free_dev_dax_ranges(dev_dax
);
1380 free_dev_dax_id(dev_dax
);
1386 EXPORT_SYMBOL_GPL(devm_create_dev_dax
);
1388 static int match_always_count
;
1390 int __dax_driver_register(struct dax_device_driver
*dax_drv
,
1391 struct module
*module
, const char *mod_name
)
1393 struct device_driver
*drv
= &dax_drv
->drv
;
1397 * dax_bus_probe() calls dax_drv->probe() unconditionally.
1398 * So better be safe than sorry and ensure it is provided.
1400 if (!dax_drv
->probe
)
1403 INIT_LIST_HEAD(&dax_drv
->ids
);
1404 drv
->owner
= module
;
1405 drv
->name
= mod_name
;
1406 drv
->mod_name
= mod_name
;
1407 drv
->bus
= &dax_bus_type
;
1409 /* there can only be one default driver */
1410 mutex_lock(&dax_bus_lock
);
1411 match_always_count
+= dax_drv
->match_always
;
1412 if (match_always_count
> 1) {
1413 match_always_count
--;
1417 mutex_unlock(&dax_bus_lock
);
1421 rc
= driver_register(drv
);
1422 if (rc
&& dax_drv
->match_always
) {
1423 mutex_lock(&dax_bus_lock
);
1424 match_always_count
-= dax_drv
->match_always
;
1425 mutex_unlock(&dax_bus_lock
);
1430 EXPORT_SYMBOL_GPL(__dax_driver_register
);
1432 void dax_driver_unregister(struct dax_device_driver
*dax_drv
)
1434 struct device_driver
*drv
= &dax_drv
->drv
;
1435 struct dax_id
*dax_id
, *_id
;
1437 mutex_lock(&dax_bus_lock
);
1438 match_always_count
-= dax_drv
->match_always
;
1439 list_for_each_entry_safe(dax_id
, _id
, &dax_drv
->ids
, list
) {
1440 list_del(&dax_id
->list
);
1443 mutex_unlock(&dax_bus_lock
);
1444 driver_unregister(drv
);
1446 EXPORT_SYMBOL_GPL(dax_driver_unregister
);
1448 int __init
dax_bus_init(void)
1452 if (IS_ENABLED(CONFIG_DEV_DAX_PMEM_COMPAT
)) {
1453 dax_class
= class_create(THIS_MODULE
, "dax");
1454 if (IS_ERR(dax_class
))
1455 return PTR_ERR(dax_class
);
1458 rc
= bus_register(&dax_bus_type
);
1460 class_destroy(dax_class
);
1464 void __exit
dax_bus_exit(void)
1466 bus_unregister(&dax_bus_type
);
1467 class_destroy(dax_class
);