forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-31 f70575805708cabdedea7498aaa3f710fde4d920
kernel/drivers/nvdimm/bus.c
....@@ -1,14 +1,6 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of version 2 of the GNU General Public License as
6
- * published by the Free Software Foundation.
7
- *
8
- * This program is distributed in the hope that it will be useful, but
9
- * WITHOUT ANY WARRANTY; without even the implied warranty of
10
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11
- * General Public License for more details.
124 */
135 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
146 #include <linux/libnvdimm.h>
....@@ -23,6 +15,7 @@
2315 #include <linux/ndctl.h>
2416 #include <linux/sched.h>
2517 #include <linux/slab.h>
18
+#include <linux/cpu.h>
2619 #include <linux/fs.h>
2720 #include <linux/io.h>
2821 #include <linux/mm.h>
....@@ -33,7 +26,7 @@
3326
3427 int nvdimm_major;
3528 static int nvdimm_bus_major;
36
-static struct class *nd_class;
29
+struct class *nd_class;
3730 static DEFINE_IDA(nd_ida);
3831
3932 static int to_nd_device_type(struct device *dev)
....@@ -54,12 +47,6 @@
5447
5548 static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
5649 {
57
- /*
58
- * Ensure that region devices always have their numa node set as
59
- * early as possible.
60
- */
61
- if (is_nd_region(dev))
62
- set_dev_node(dev, to_nd_region(dev)->numa_node);
6350 return add_uevent_var(env, "MODALIAS=" ND_DEVICE_MODALIAS_FMT,
6451 to_nd_device_type(dev));
6552 }
....@@ -104,11 +91,13 @@
10491 dev->driver->name, dev_name(dev));
10592
10693 nvdimm_bus_probe_start(nvdimm_bus);
94
+ debug_nvdimm_lock(dev);
10795 rc = nd_drv->probe(dev);
108
- if (rc == 0)
109
- nd_region_probe_success(nvdimm_bus, dev);
110
- else
111
- nd_region_disable(nvdimm_bus, dev);
96
+ debug_nvdimm_unlock(dev);
97
+
98
+ if ((rc == 0 || rc == -EOPNOTSUPP) &&
99
+ dev->parent && is_nd_region(dev->parent))
100
+ nd_region_advance_seeds(to_nd_region(dev->parent), dev);
112101 nvdimm_bus_probe_end(nvdimm_bus);
113102
114103 dev_dbg(&nvdimm_bus->dev, "END: %s.probe(%s) = %d\n", dev->driver->name,
....@@ -126,9 +115,11 @@
126115 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
127116 int rc = 0;
128117
129
- if (nd_drv->remove)
118
+ if (nd_drv->remove) {
119
+ debug_nvdimm_lock(dev);
130120 rc = nd_drv->remove(dev);
131
- nd_region_disable(nvdimm_bus, dev);
121
+ debug_nvdimm_unlock(dev);
122
+ }
132123
133124 dev_dbg(&nvdimm_bus->dev, "%s.remove(%s) = %d\n", dev->driver->name,
134125 dev_name(dev), rc);
....@@ -153,7 +144,7 @@
153144
154145 void nd_device_notify(struct device *dev, enum nvdimm_event event)
155146 {
156
- device_lock(dev);
147
+ nd_device_lock(dev);
157148 if (dev->driver) {
158149 struct nd_device_driver *nd_drv;
159150
....@@ -161,7 +152,7 @@
161152 if (nd_drv->notify)
162153 nd_drv->notify(dev, event);
163154 }
164
- device_unlock(dev);
155
+ nd_device_unlock(dev);
165156 }
166157 EXPORT_SYMBOL(nd_device_notify);
167158
....@@ -196,8 +187,8 @@
196187 ndr_end = nd_region->ndr_start + nd_region->ndr_size - 1;
197188
198189 /* make sure we are in the region */
199
- if (ctx->phys < nd_region->ndr_start
200
- || (ctx->phys + ctx->cleared) > ndr_end)
190
+ if (ctx->phys < nd_region->ndr_start ||
191
+ (ctx->phys + ctx->cleared - 1) > ndr_end)
201192 return 0;
202193
203194 sector = (ctx->phys - nd_region->ndr_start) / 512;
....@@ -309,9 +300,14 @@
309300 kfree(nvdimm_bus);
310301 }
311302
312
-static bool is_nvdimm_bus(struct device *dev)
303
+static const struct device_type nvdimm_bus_dev_type = {
304
+ .release = nvdimm_bus_release,
305
+ .groups = nvdimm_bus_attribute_groups,
306
+};
307
+
308
+bool is_nvdimm_bus(struct device *dev)
313309 {
314
- return dev->release == nvdimm_bus_release;
310
+ return dev->type == &nvdimm_bus_dev_type;
315311 }
316312
317313 struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev)
....@@ -337,6 +333,12 @@
337333 }
338334 EXPORT_SYMBOL_GPL(to_nvdimm_bus);
339335
336
+struct nvdimm_bus *nvdimm_to_bus(struct nvdimm *nvdimm)
337
+{
338
+ return to_nvdimm_bus(nvdimm->dev.parent);
339
+}
340
+EXPORT_SYMBOL_GPL(nvdimm_to_bus);
341
+
340342 struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
341343 struct nvdimm_bus_descriptor *nd_desc)
342344 {
....@@ -350,15 +352,15 @@
350352 INIT_LIST_HEAD(&nvdimm_bus->mapping_list);
351353 init_waitqueue_head(&nvdimm_bus->wait);
352354 nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL);
353
- mutex_init(&nvdimm_bus->reconfig_mutex);
354
- badrange_init(&nvdimm_bus->badrange);
355355 if (nvdimm_bus->id < 0) {
356356 kfree(nvdimm_bus);
357357 return NULL;
358358 }
359
+ mutex_init(&nvdimm_bus->reconfig_mutex);
360
+ badrange_init(&nvdimm_bus->badrange);
359361 nvdimm_bus->nd_desc = nd_desc;
360362 nvdimm_bus->dev.parent = parent;
361
- nvdimm_bus->dev.release = nvdimm_bus_release;
363
+ nvdimm_bus->dev.type = &nvdimm_bus_dev_type;
362364 nvdimm_bus->dev.groups = nd_desc->attr_groups;
363365 nvdimm_bus->dev.bus = &nvdimm_bus_type;
364366 nvdimm_bus->dev.of_node = nd_desc->of_node;
....@@ -393,9 +395,24 @@
393395 * i.e. remove classless children
394396 */
395397 if (dev->class)
396
- /* pass */;
397
- else
398
- nd_device_unregister(dev, ND_SYNC);
398
+ return 0;
399
+
400
+ if (is_nvdimm(dev)) {
401
+ struct nvdimm *nvdimm = to_nvdimm(dev);
402
+ bool dev_put = false;
403
+
404
+ /* We are shutting down. Make state frozen artificially. */
405
+ nvdimm_bus_lock(dev);
406
+ set_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags);
407
+ if (test_and_clear_bit(NDD_WORK_PENDING, &nvdimm->flags))
408
+ dev_put = true;
409
+ nvdimm_bus_unlock(dev);
410
+ cancel_delayed_work_sync(&nvdimm->dwork);
411
+ if (dev_put)
412
+ put_device(dev);
413
+ }
414
+ nd_device_unregister(dev, ND_SYNC);
415
+
399416 return 0;
400417 }
401418
....@@ -511,12 +528,26 @@
511528 {
512529 if (!dev)
513530 return;
531
+
532
+ /*
533
+ * Ensure that region devices always have their NUMA node set as
534
+ * early as possible. This way we are able to make certain that
535
+ * any memory associated with the creation and the creation
536
+ * itself of the region is associated with the correct node.
537
+ */
538
+ if (is_nd_region(dev))
539
+ set_dev_node(dev, to_nd_region(dev)->numa_node);
540
+
514541 dev->bus = &nvdimm_bus_type;
515
- if (dev->parent)
542
+ if (dev->parent) {
516543 get_device(dev->parent);
544
+ if (dev_to_node(dev) == NUMA_NO_NODE)
545
+ set_dev_node(dev, dev_to_node(dev->parent));
546
+ }
517547 get_device(dev);
518
- async_schedule_domain(nd_async_device_register, dev,
519
- &nd_async_domain);
548
+
549
+ async_schedule_dev_domain(nd_async_device_register, dev,
550
+ &nd_async_domain);
520551 }
521552
522553 void nd_device_register(struct device *dev)
....@@ -553,9 +584,9 @@
553584 * or otherwise let the async path handle it if the
554585 * unregistration was already queued.
555586 */
556
- device_lock(dev);
587
+ nd_device_lock(dev);
557588 killed = kill_device(dev);
558
- device_unlock(dev);
589
+ nd_device_unlock(dev);
559590
560591 if (!killed)
561592 return;
....@@ -579,7 +610,7 @@
579610 struct device_driver *drv = &nd_drv->drv;
580611
581612 if (!nd_drv->type) {
582
- pr_debug("driver type bitmask not set (%pf)\n",
613
+ pr_debug("driver type bitmask not set (%ps)\n",
583614 __builtin_return_address(0));
584615 return -EINVAL;
585616 }
....@@ -597,7 +628,7 @@
597628 }
598629 EXPORT_SYMBOL(__nd_driver_register);
599630
600
-int nvdimm_revalidate_disk(struct gendisk *disk)
631
+void nvdimm_check_and_set_ro(struct gendisk *disk)
601632 {
602633 struct device *dev = disk_to_dev(disk)->parent;
603634 struct nd_region *nd_region = to_nd_region(dev->parent);
....@@ -608,16 +639,13 @@
608639 * read-only if the disk is already read-only.
609640 */
610641 if (disk_ro || nd_region->ro == disk_ro)
611
- return 0;
642
+ return;
612643
613644 dev_info(dev, "%s read-only, marking %s read-only\n",
614645 dev_name(&nd_region->dev), disk->disk_name);
615646 set_disk_ro(disk, 1);
616
-
617
- return 0;
618
-
619647 }
620
-EXPORT_SYMBOL(nvdimm_revalidate_disk);
648
+EXPORT_SYMBOL(nvdimm_check_and_set_ro);
621649
622650 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
623651 char *buf)
....@@ -643,10 +671,9 @@
643671 /*
644672 * nd_device_attribute_group - generic attributes for all devices on an nd bus
645673 */
646
-struct attribute_group nd_device_attribute_group = {
674
+const struct attribute_group nd_device_attribute_group = {
647675 .attrs = nd_device_attributes,
648676 };
649
-EXPORT_SYMBOL_GPL(nd_device_attribute_group);
650677
651678 static ssize_t numa_node_show(struct device *dev,
652679 struct device_attribute *attr, char *buf)
....@@ -655,15 +682,44 @@
655682 }
656683 static DEVICE_ATTR_RO(numa_node);
657684
685
+static int nvdimm_dev_to_target_node(struct device *dev)
686
+{
687
+ struct device *parent = dev->parent;
688
+ struct nd_region *nd_region = NULL;
689
+
690
+ if (is_nd_region(dev))
691
+ nd_region = to_nd_region(dev);
692
+ else if (parent && is_nd_region(parent))
693
+ nd_region = to_nd_region(parent);
694
+
695
+ if (!nd_region)
696
+ return NUMA_NO_NODE;
697
+ return nd_region->target_node;
698
+}
699
+
700
+static ssize_t target_node_show(struct device *dev,
701
+ struct device_attribute *attr, char *buf)
702
+{
703
+ return sprintf(buf, "%d\n", nvdimm_dev_to_target_node(dev));
704
+}
705
+static DEVICE_ATTR_RO(target_node);
706
+
658707 static struct attribute *nd_numa_attributes[] = {
659708 &dev_attr_numa_node.attr,
709
+ &dev_attr_target_node.attr,
660710 NULL,
661711 };
662712
663713 static umode_t nd_numa_attr_visible(struct kobject *kobj, struct attribute *a,
664714 int n)
665715 {
716
+ struct device *dev = container_of(kobj, typeof(*dev), kobj);
717
+
666718 if (!IS_ENABLED(CONFIG_NUMA))
719
+ return 0;
720
+
721
+ if (a == &dev_attr_target_node.attr &&
722
+ nvdimm_dev_to_target_node(dev) == NUMA_NO_NODE)
667723 return 0;
668724
669725 return a->mode;
....@@ -672,11 +728,10 @@
672728 /*
673729 * nd_numa_attribute_group - NUMA attributes for all devices on an nd bus
674730 */
675
-struct attribute_group nd_numa_attribute_group = {
731
+const struct attribute_group nd_numa_attribute_group = {
676732 .attrs = nd_numa_attributes,
677733 .is_visible = nd_numa_attr_visible,
678734 };
679
-EXPORT_SYMBOL_GPL(nd_numa_attribute_group);
680735
681736 int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus)
682737 {
....@@ -866,10 +921,10 @@
866921 if (nvdimm_bus->probe_active == 0)
867922 break;
868923 nvdimm_bus_unlock(dev);
869
- device_unlock(dev);
924
+ nd_device_unlock(dev);
870925 wait_event(nvdimm_bus->wait,
871926 nvdimm_bus->probe_active == 0);
872
- device_lock(dev);
927
+ nd_device_lock(dev);
873928 nvdimm_bus_lock(dev);
874929 } while (true);
875930 }
....@@ -928,7 +983,7 @@
928983
929984 /* ask the bus provider if it would like to block this request */
930985 if (nd_desc->clear_to_send) {
931
- int rc = nd_desc->clear_to_send(nd_desc, nvdimm, cmd);
986
+ int rc = nd_desc->clear_to_send(nd_desc, nvdimm, cmd, data);
932987
933988 if (rc)
934989 return rc;
....@@ -979,9 +1034,25 @@
9791034 dimm_name = "bus";
9801035 }
9811036
1037
+ /* Validate command family support against bus declared support */
9821038 if (cmd == ND_CMD_CALL) {
1039
+ unsigned long *mask;
1040
+
9831041 if (copy_from_user(&pkg, p, sizeof(pkg)))
9841042 return -EFAULT;
1043
+
1044
+ if (nvdimm) {
1045
+ if (pkg.nd_family > NVDIMM_FAMILY_MAX)
1046
+ return -EINVAL;
1047
+ mask = &nd_desc->dimm_family_mask;
1048
+ } else {
1049
+ if (pkg.nd_family > NVDIMM_BUS_FAMILY_MAX)
1050
+ return -EINVAL;
1051
+ mask = &nd_desc->bus_family_mask;
1052
+ }
1053
+
1054
+ if (!test_bit(pkg.nd_family, mask))
1055
+ return -EINVAL;
9851056 }
9861057
9871058 if (!desc ||
....@@ -1087,7 +1158,7 @@
10871158 goto out;
10881159 }
10891160
1090
- device_lock(dev);
1161
+ nd_device_lock(dev);
10911162 nvdimm_bus_lock(dev);
10921163 rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf);
10931164 if (rc)
....@@ -1109,7 +1180,7 @@
11091180
11101181 out_unlock:
11111182 nvdimm_bus_unlock(dev);
1112
- device_unlock(dev);
1183
+ nd_device_unlock(dev);
11131184 out:
11141185 kfree(in_env);
11151186 kfree(out_env);
....@@ -1203,7 +1274,7 @@
12031274 .owner = THIS_MODULE,
12041275 .open = nd_open,
12051276 .unlocked_ioctl = bus_ioctl,
1206
- .compat_ioctl = bus_ioctl,
1277
+ .compat_ioctl = compat_ptr_ioctl,
12071278 .llseek = noop_llseek,
12081279 };
12091280
....@@ -1211,7 +1282,7 @@
12111282 .owner = THIS_MODULE,
12121283 .open = nd_open,
12131284 .unlocked_ioctl = dimm_ioctl,
1214
- .compat_ioctl = dimm_ioctl,
1285
+ .compat_ioctl = compat_ptr_ioctl,
12151286 .llseek = noop_llseek,
12161287 };
12171288