.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. |
---|
3 | | - * |
---|
4 | | - * This program is free software; you can redistribute it and/or modify |
---|
5 | | - * it under the terms of version 2 of the GNU General Public License as |
---|
6 | | - * published by the Free Software Foundation. |
---|
7 | | - * |
---|
8 | | - * This program is distributed in the hope that it will be useful, but |
---|
9 | | - * WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
10 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
11 | | - * General Public License for more details. |
---|
12 | 4 | */ |
---|
13 | 5 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
---|
14 | 6 | #include <linux/libnvdimm.h> |
---|
.. | .. |
---|
23 | 15 | #include <linux/ndctl.h> |
---|
24 | 16 | #include <linux/sched.h> |
---|
25 | 17 | #include <linux/slab.h> |
---|
| 18 | +#include <linux/cpu.h> |
---|
26 | 19 | #include <linux/fs.h> |
---|
27 | 20 | #include <linux/io.h> |
---|
28 | 21 | #include <linux/mm.h> |
---|
.. | .. |
---|
33 | 26 | |
---|
34 | 27 | int nvdimm_major; |
---|
35 | 28 | static int nvdimm_bus_major; |
---|
36 | | -static struct class *nd_class; |
---|
| 29 | +struct class *nd_class; |
---|
37 | 30 | static DEFINE_IDA(nd_ida); |
---|
38 | 31 | |
---|
39 | 32 | static int to_nd_device_type(struct device *dev) |
---|
.. | .. |
---|
54 | 47 | |
---|
55 | 48 | static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env) |
---|
56 | 49 | { |
---|
57 | | - /* |
---|
58 | | - * Ensure that region devices always have their numa node set as |
---|
59 | | - * early as possible. |
---|
60 | | - */ |
---|
61 | | - if (is_nd_region(dev)) |
---|
62 | | - set_dev_node(dev, to_nd_region(dev)->numa_node); |
---|
63 | 50 | return add_uevent_var(env, "MODALIAS=" ND_DEVICE_MODALIAS_FMT, |
---|
64 | 51 | to_nd_device_type(dev)); |
---|
65 | 52 | } |
---|
.. | .. |
---|
104 | 91 | dev->driver->name, dev_name(dev)); |
---|
105 | 92 | |
---|
106 | 93 | nvdimm_bus_probe_start(nvdimm_bus); |
---|
| 94 | + debug_nvdimm_lock(dev); |
---|
107 | 95 | rc = nd_drv->probe(dev); |
---|
108 | | - if (rc == 0) |
---|
109 | | - nd_region_probe_success(nvdimm_bus, dev); |
---|
110 | | - else |
---|
111 | | - nd_region_disable(nvdimm_bus, dev); |
---|
| 96 | + debug_nvdimm_unlock(dev); |
---|
| 97 | + |
---|
| 98 | + if ((rc == 0 || rc == -EOPNOTSUPP) && |
---|
| 99 | + dev->parent && is_nd_region(dev->parent)) |
---|
| 100 | + nd_region_advance_seeds(to_nd_region(dev->parent), dev); |
---|
112 | 101 | nvdimm_bus_probe_end(nvdimm_bus); |
---|
113 | 102 | |
---|
114 | 103 | dev_dbg(&nvdimm_bus->dev, "END: %s.probe(%s) = %d\n", dev->driver->name, |
---|
.. | .. |
---|
126 | 115 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); |
---|
127 | 116 | int rc = 0; |
---|
128 | 117 | |
---|
129 | | - if (nd_drv->remove) |
---|
| 118 | + if (nd_drv->remove) { |
---|
| 119 | + debug_nvdimm_lock(dev); |
---|
130 | 120 | rc = nd_drv->remove(dev); |
---|
131 | | - nd_region_disable(nvdimm_bus, dev); |
---|
| 121 | + debug_nvdimm_unlock(dev); |
---|
| 122 | + } |
---|
132 | 123 | |
---|
133 | 124 | dev_dbg(&nvdimm_bus->dev, "%s.remove(%s) = %d\n", dev->driver->name, |
---|
134 | 125 | dev_name(dev), rc); |
---|
.. | .. |
---|
153 | 144 | |
---|
154 | 145 | void nd_device_notify(struct device *dev, enum nvdimm_event event) |
---|
155 | 146 | { |
---|
156 | | - device_lock(dev); |
---|
| 147 | + nd_device_lock(dev); |
---|
157 | 148 | if (dev->driver) { |
---|
158 | 149 | struct nd_device_driver *nd_drv; |
---|
159 | 150 | |
---|
.. | .. |
---|
161 | 152 | if (nd_drv->notify) |
---|
162 | 153 | nd_drv->notify(dev, event); |
---|
163 | 154 | } |
---|
164 | | - device_unlock(dev); |
---|
| 155 | + nd_device_unlock(dev); |
---|
165 | 156 | } |
---|
166 | 157 | EXPORT_SYMBOL(nd_device_notify); |
---|
167 | 158 | |
---|
.. | .. |
---|
196 | 187 | ndr_end = nd_region->ndr_start + nd_region->ndr_size - 1; |
---|
197 | 188 | |
---|
198 | 189 | /* make sure we are in the region */ |
---|
199 | | - if (ctx->phys < nd_region->ndr_start |
---|
200 | | - || (ctx->phys + ctx->cleared) > ndr_end) |
---|
| 190 | + if (ctx->phys < nd_region->ndr_start || |
---|
| 191 | + (ctx->phys + ctx->cleared - 1) > ndr_end) |
---|
201 | 192 | return 0; |
---|
202 | 193 | |
---|
203 | 194 | sector = (ctx->phys - nd_region->ndr_start) / 512; |
---|
.. | .. |
---|
309 | 300 | kfree(nvdimm_bus); |
---|
310 | 301 | } |
---|
311 | 302 | |
---|
312 | | -static bool is_nvdimm_bus(struct device *dev) |
---|
| 303 | +static const struct device_type nvdimm_bus_dev_type = { |
---|
| 304 | + .release = nvdimm_bus_release, |
---|
| 305 | + .groups = nvdimm_bus_attribute_groups, |
---|
| 306 | +}; |
---|
| 307 | + |
---|
| 308 | +bool is_nvdimm_bus(struct device *dev) |
---|
313 | 309 | { |
---|
314 | | - return dev->release == nvdimm_bus_release; |
---|
| 310 | + return dev->type == &nvdimm_bus_dev_type; |
---|
315 | 311 | } |
---|
316 | 312 | |
---|
317 | 313 | struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev) |
---|
.. | .. |
---|
337 | 333 | } |
---|
338 | 334 | EXPORT_SYMBOL_GPL(to_nvdimm_bus); |
---|
339 | 335 | |
---|
| 336 | +struct nvdimm_bus *nvdimm_to_bus(struct nvdimm *nvdimm) |
---|
| 337 | +{ |
---|
| 338 | + return to_nvdimm_bus(nvdimm->dev.parent); |
---|
| 339 | +} |
---|
| 340 | +EXPORT_SYMBOL_GPL(nvdimm_to_bus); |
---|
| 341 | + |
---|
340 | 342 | struct nvdimm_bus *nvdimm_bus_register(struct device *parent, |
---|
341 | 343 | struct nvdimm_bus_descriptor *nd_desc) |
---|
342 | 344 | { |
---|
.. | .. |
---|
350 | 352 | INIT_LIST_HEAD(&nvdimm_bus->mapping_list); |
---|
351 | 353 | init_waitqueue_head(&nvdimm_bus->wait); |
---|
352 | 354 | nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL); |
---|
353 | | - mutex_init(&nvdimm_bus->reconfig_mutex); |
---|
354 | | - badrange_init(&nvdimm_bus->badrange); |
---|
355 | 355 | if (nvdimm_bus->id < 0) { |
---|
356 | 356 | kfree(nvdimm_bus); |
---|
357 | 357 | return NULL; |
---|
358 | 358 | } |
---|
| 359 | + mutex_init(&nvdimm_bus->reconfig_mutex); |
---|
| 360 | + badrange_init(&nvdimm_bus->badrange); |
---|
359 | 361 | nvdimm_bus->nd_desc = nd_desc; |
---|
360 | 362 | nvdimm_bus->dev.parent = parent; |
---|
361 | | - nvdimm_bus->dev.release = nvdimm_bus_release; |
---|
| 363 | + nvdimm_bus->dev.type = &nvdimm_bus_dev_type; |
---|
362 | 364 | nvdimm_bus->dev.groups = nd_desc->attr_groups; |
---|
363 | 365 | nvdimm_bus->dev.bus = &nvdimm_bus_type; |
---|
364 | 366 | nvdimm_bus->dev.of_node = nd_desc->of_node; |
---|
.. | .. |
---|
393 | 395 | * i.e. remove classless children |
---|
394 | 396 | */ |
---|
395 | 397 | if (dev->class) |
---|
396 | | - /* pass */; |
---|
397 | | - else |
---|
398 | | - nd_device_unregister(dev, ND_SYNC); |
---|
| 398 | + return 0; |
---|
| 399 | + |
---|
| 400 | + if (is_nvdimm(dev)) { |
---|
| 401 | + struct nvdimm *nvdimm = to_nvdimm(dev); |
---|
| 402 | + bool dev_put = false; |
---|
| 403 | + |
---|
| 404 | + /* We are shutting down. Make state frozen artificially. */ |
---|
| 405 | + nvdimm_bus_lock(dev); |
---|
| 406 | + set_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags); |
---|
| 407 | + if (test_and_clear_bit(NDD_WORK_PENDING, &nvdimm->flags)) |
---|
| 408 | + dev_put = true; |
---|
| 409 | + nvdimm_bus_unlock(dev); |
---|
| 410 | + cancel_delayed_work_sync(&nvdimm->dwork); |
---|
| 411 | + if (dev_put) |
---|
| 412 | + put_device(dev); |
---|
| 413 | + } |
---|
| 414 | + nd_device_unregister(dev, ND_SYNC); |
---|
| 415 | + |
---|
399 | 416 | return 0; |
---|
400 | 417 | } |
---|
401 | 418 | |
---|
.. | .. |
---|
511 | 528 | { |
---|
512 | 529 | if (!dev) |
---|
513 | 530 | return; |
---|
| 531 | + |
---|
| 532 | + /* |
---|
| 533 | + * Ensure that region devices always have their NUMA node set as |
---|
| 534 | + * early as possible. This way we are able to make certain that |
---|
| 535 | + * any memory associated with the creation and the creation |
---|
| 536 | + * itself of the region is associated with the correct node. |
---|
| 537 | + */ |
---|
| 538 | + if (is_nd_region(dev)) |
---|
| 539 | + set_dev_node(dev, to_nd_region(dev)->numa_node); |
---|
| 540 | + |
---|
514 | 541 | dev->bus = &nvdimm_bus_type; |
---|
515 | | - if (dev->parent) |
---|
| 542 | + if (dev->parent) { |
---|
516 | 543 | get_device(dev->parent); |
---|
| 544 | + if (dev_to_node(dev) == NUMA_NO_NODE) |
---|
| 545 | + set_dev_node(dev, dev_to_node(dev->parent)); |
---|
| 546 | + } |
---|
517 | 547 | get_device(dev); |
---|
518 | | - async_schedule_domain(nd_async_device_register, dev, |
---|
519 | | - &nd_async_domain); |
---|
| 548 | + |
---|
| 549 | + async_schedule_dev_domain(nd_async_device_register, dev, |
---|
| 550 | + &nd_async_domain); |
---|
520 | 551 | } |
---|
521 | 552 | |
---|
522 | 553 | void nd_device_register(struct device *dev) |
---|
.. | .. |
---|
553 | 584 | * or otherwise let the async path handle it if the |
---|
554 | 585 | * unregistration was already queued. |
---|
555 | 586 | */ |
---|
556 | | - device_lock(dev); |
---|
| 587 | + nd_device_lock(dev); |
---|
557 | 588 | killed = kill_device(dev); |
---|
558 | | - device_unlock(dev); |
---|
| 589 | + nd_device_unlock(dev); |
---|
559 | 590 | |
---|
560 | 591 | if (!killed) |
---|
561 | 592 | return; |
---|
.. | .. |
---|
579 | 610 | struct device_driver *drv = &nd_drv->drv; |
---|
580 | 611 | |
---|
581 | 612 | if (!nd_drv->type) { |
---|
582 | | - pr_debug("driver type bitmask not set (%pf)\n", |
---|
| 613 | + pr_debug("driver type bitmask not set (%ps)\n", |
---|
583 | 614 | __builtin_return_address(0)); |
---|
584 | 615 | return -EINVAL; |
---|
585 | 616 | } |
---|
.. | .. |
---|
597 | 628 | } |
---|
598 | 629 | EXPORT_SYMBOL(__nd_driver_register); |
---|
599 | 630 | |
---|
600 | | -int nvdimm_revalidate_disk(struct gendisk *disk) |
---|
| 631 | +void nvdimm_check_and_set_ro(struct gendisk *disk) |
---|
601 | 632 | { |
---|
602 | 633 | struct device *dev = disk_to_dev(disk)->parent; |
---|
603 | 634 | struct nd_region *nd_region = to_nd_region(dev->parent); |
---|
.. | .. |
---|
608 | 639 | * read-only if the disk is already read-only. |
---|
609 | 640 | */ |
---|
610 | 641 | if (disk_ro || nd_region->ro == disk_ro) |
---|
611 | | - return 0; |
---|
| 642 | + return; |
---|
612 | 643 | |
---|
613 | 644 | dev_info(dev, "%s read-only, marking %s read-only\n", |
---|
614 | 645 | dev_name(&nd_region->dev), disk->disk_name); |
---|
615 | 646 | set_disk_ro(disk, 1); |
---|
616 | | - |
---|
617 | | - return 0; |
---|
618 | | - |
---|
619 | 647 | } |
---|
620 | | -EXPORT_SYMBOL(nvdimm_revalidate_disk); |
---|
| 648 | +EXPORT_SYMBOL(nvdimm_check_and_set_ro); |
---|
621 | 649 | |
---|
622 | 650 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, |
---|
623 | 651 | char *buf) |
---|
.. | .. |
---|
643 | 671 | /* |
---|
644 | 672 | * nd_device_attribute_group - generic attributes for all devices on an nd bus |
---|
645 | 673 | */ |
---|
646 | | -struct attribute_group nd_device_attribute_group = { |
---|
| 674 | +const struct attribute_group nd_device_attribute_group = { |
---|
647 | 675 | .attrs = nd_device_attributes, |
---|
648 | 676 | }; |
---|
649 | | -EXPORT_SYMBOL_GPL(nd_device_attribute_group); |
---|
650 | 677 | |
---|
651 | 678 | static ssize_t numa_node_show(struct device *dev, |
---|
652 | 679 | struct device_attribute *attr, char *buf) |
---|
.. | .. |
---|
655 | 682 | } |
---|
656 | 683 | static DEVICE_ATTR_RO(numa_node); |
---|
657 | 684 | |
---|
| 685 | +static int nvdimm_dev_to_target_node(struct device *dev) |
---|
| 686 | +{ |
---|
| 687 | + struct device *parent = dev->parent; |
---|
| 688 | + struct nd_region *nd_region = NULL; |
---|
| 689 | + |
---|
| 690 | + if (is_nd_region(dev)) |
---|
| 691 | + nd_region = to_nd_region(dev); |
---|
| 692 | + else if (parent && is_nd_region(parent)) |
---|
| 693 | + nd_region = to_nd_region(parent); |
---|
| 694 | + |
---|
| 695 | + if (!nd_region) |
---|
| 696 | + return NUMA_NO_NODE; |
---|
| 697 | + return nd_region->target_node; |
---|
| 698 | +} |
---|
| 699 | + |
---|
| 700 | +static ssize_t target_node_show(struct device *dev, |
---|
| 701 | + struct device_attribute *attr, char *buf) |
---|
| 702 | +{ |
---|
| 703 | + return sprintf(buf, "%d\n", nvdimm_dev_to_target_node(dev)); |
---|
| 704 | +} |
---|
| 705 | +static DEVICE_ATTR_RO(target_node); |
---|
| 706 | + |
---|
658 | 707 | static struct attribute *nd_numa_attributes[] = { |
---|
659 | 708 | &dev_attr_numa_node.attr, |
---|
| 709 | + &dev_attr_target_node.attr, |
---|
660 | 710 | NULL, |
---|
661 | 711 | }; |
---|
662 | 712 | |
---|
663 | 713 | static umode_t nd_numa_attr_visible(struct kobject *kobj, struct attribute *a, |
---|
664 | 714 | int n) |
---|
665 | 715 | { |
---|
| 716 | + struct device *dev = container_of(kobj, typeof(*dev), kobj); |
---|
| 717 | + |
---|
666 | 718 | if (!IS_ENABLED(CONFIG_NUMA)) |
---|
| 719 | + return 0; |
---|
| 720 | + |
---|
| 721 | + if (a == &dev_attr_target_node.attr && |
---|
| 722 | + nvdimm_dev_to_target_node(dev) == NUMA_NO_NODE) |
---|
667 | 723 | return 0; |
---|
668 | 724 | |
---|
669 | 725 | return a->mode; |
---|
.. | .. |
---|
672 | 728 | /* |
---|
673 | 729 | * nd_numa_attribute_group - NUMA attributes for all devices on an nd bus |
---|
674 | 730 | */ |
---|
675 | | -struct attribute_group nd_numa_attribute_group = { |
---|
| 731 | +const struct attribute_group nd_numa_attribute_group = { |
---|
676 | 732 | .attrs = nd_numa_attributes, |
---|
677 | 733 | .is_visible = nd_numa_attr_visible, |
---|
678 | 734 | }; |
---|
679 | | -EXPORT_SYMBOL_GPL(nd_numa_attribute_group); |
---|
680 | 735 | |
---|
681 | 736 | int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus) |
---|
682 | 737 | { |
---|
.. | .. |
---|
866 | 921 | if (nvdimm_bus->probe_active == 0) |
---|
867 | 922 | break; |
---|
868 | 923 | nvdimm_bus_unlock(dev); |
---|
869 | | - device_unlock(dev); |
---|
| 924 | + nd_device_unlock(dev); |
---|
870 | 925 | wait_event(nvdimm_bus->wait, |
---|
871 | 926 | nvdimm_bus->probe_active == 0); |
---|
872 | | - device_lock(dev); |
---|
| 927 | + nd_device_lock(dev); |
---|
873 | 928 | nvdimm_bus_lock(dev); |
---|
874 | 929 | } while (true); |
---|
875 | 930 | } |
---|
.. | .. |
---|
928 | 983 | |
---|
929 | 984 | /* ask the bus provider if it would like to block this request */ |
---|
930 | 985 | if (nd_desc->clear_to_send) { |
---|
931 | | - int rc = nd_desc->clear_to_send(nd_desc, nvdimm, cmd); |
---|
| 986 | + int rc = nd_desc->clear_to_send(nd_desc, nvdimm, cmd, data); |
---|
932 | 987 | |
---|
933 | 988 | if (rc) |
---|
934 | 989 | return rc; |
---|
.. | .. |
---|
979 | 1034 | dimm_name = "bus"; |
---|
980 | 1035 | } |
---|
981 | 1036 | |
---|
| 1037 | + /* Validate command family support against bus declared support */ |
---|
982 | 1038 | if (cmd == ND_CMD_CALL) { |
---|
| 1039 | + unsigned long *mask; |
---|
| 1040 | + |
---|
983 | 1041 | if (copy_from_user(&pkg, p, sizeof(pkg))) |
---|
984 | 1042 | return -EFAULT; |
---|
| 1043 | + |
---|
| 1044 | + if (nvdimm) { |
---|
| 1045 | + if (pkg.nd_family > NVDIMM_FAMILY_MAX) |
---|
| 1046 | + return -EINVAL; |
---|
| 1047 | + mask = &nd_desc->dimm_family_mask; |
---|
| 1048 | + } else { |
---|
| 1049 | + if (pkg.nd_family > NVDIMM_BUS_FAMILY_MAX) |
---|
| 1050 | + return -EINVAL; |
---|
| 1051 | + mask = &nd_desc->bus_family_mask; |
---|
| 1052 | + } |
---|
| 1053 | + |
---|
| 1054 | + if (!test_bit(pkg.nd_family, mask)) |
---|
| 1055 | + return -EINVAL; |
---|
985 | 1056 | } |
---|
986 | 1057 | |
---|
987 | 1058 | if (!desc || |
---|
.. | .. |
---|
1087 | 1158 | goto out; |
---|
1088 | 1159 | } |
---|
1089 | 1160 | |
---|
1090 | | - device_lock(dev); |
---|
| 1161 | + nd_device_lock(dev); |
---|
1091 | 1162 | nvdimm_bus_lock(dev); |
---|
1092 | 1163 | rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf); |
---|
1093 | 1164 | if (rc) |
---|
.. | .. |
---|
1109 | 1180 | |
---|
1110 | 1181 | out_unlock: |
---|
1111 | 1182 | nvdimm_bus_unlock(dev); |
---|
1112 | | - device_unlock(dev); |
---|
| 1183 | + nd_device_unlock(dev); |
---|
1113 | 1184 | out: |
---|
1114 | 1185 | kfree(in_env); |
---|
1115 | 1186 | kfree(out_env); |
---|
.. | .. |
---|
1203 | 1274 | .owner = THIS_MODULE, |
---|
1204 | 1275 | .open = nd_open, |
---|
1205 | 1276 | .unlocked_ioctl = bus_ioctl, |
---|
1206 | | - .compat_ioctl = bus_ioctl, |
---|
| 1277 | + .compat_ioctl = compat_ptr_ioctl, |
---|
1207 | 1278 | .llseek = noop_llseek, |
---|
1208 | 1279 | }; |
---|
1209 | 1280 | |
---|
.. | .. |
---|
1211 | 1282 | .owner = THIS_MODULE, |
---|
1212 | 1283 | .open = nd_open, |
---|
1213 | 1284 | .unlocked_ioctl = dimm_ioctl, |
---|
1214 | | - .compat_ioctl = dimm_ioctl, |
---|
| 1285 | + .compat_ioctl = compat_ptr_ioctl, |
---|
1215 | 1286 | .llseek = noop_llseek, |
---|
1216 | 1287 | }; |
---|
1217 | 1288 | |
---|