hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/nvdimm/region_devs.c
....@@ -1,16 +1,9 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of version 2 of the GNU General Public License as
6
- * published by the Free Software Foundation.
7
- *
8
- * This program is distributed in the hope that it will be useful, but
9
- * WITHOUT ANY WARRANTY; without even the implied warranty of
10
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11
- * General Public License for more details.
124 */
135 #include <linux/scatterlist.h>
6
+#include <linux/memregion.h>
147 #include <linux/highmem.h>
158 #include <linux/sched.h>
169 #include <linux/slab.h>
....@@ -27,7 +20,6 @@
2720 */
2821 #include <linux/io-64-nonatomic-hi-lo.h>
2922
30
-static DEFINE_IDA(region_ida);
3123 static DEFINE_PER_CPU(int, flush_idx);
3224
3325 static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
....@@ -78,6 +70,11 @@
7870 for (i = 0; i < nd_region->ndr_mappings; i++) {
7971 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
8072 struct nvdimm *nvdimm = nd_mapping->nvdimm;
73
+
74
+ if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
75
+ nvdimm_bus_unlock(&nd_region->dev);
76
+ return -EBUSY;
77
+ }
8178
8279 /* at least one null hint slot per-dimm for the "no-hint" case */
8380 flush_data_size += sizeof(void *);
....@@ -136,41 +133,11 @@
136133 put_device(&nvdimm->dev);
137134 }
138135 free_percpu(nd_region->lane);
139
- ida_simple_remove(&region_ida, nd_region->id);
136
+ memregion_free(nd_region->id);
140137 if (is_nd_blk(dev))
141138 kfree(to_nd_blk_region(dev));
142139 else
143140 kfree(nd_region);
144
-}
145
-
146
-static struct device_type nd_blk_device_type = {
147
- .name = "nd_blk",
148
- .release = nd_region_release,
149
-};
150
-
151
-static struct device_type nd_pmem_device_type = {
152
- .name = "nd_pmem",
153
- .release = nd_region_release,
154
-};
155
-
156
-static struct device_type nd_volatile_device_type = {
157
- .name = "nd_volatile",
158
- .release = nd_region_release,
159
-};
160
-
161
-bool is_nd_pmem(struct device *dev)
162
-{
163
- return dev ? dev->type == &nd_pmem_device_type : false;
164
-}
165
-
166
-bool is_nd_blk(struct device *dev)
167
-{
168
- return dev ? dev->type == &nd_blk_device_type : false;
169
-}
170
-
171
-bool is_nd_volatile(struct device *dev)
172
-{
173
- return dev ? dev->type == &nd_volatile_device_type : false;
174141 }
175142
176143 struct nd_region *to_nd_region(struct device *dev)
....@@ -228,16 +195,16 @@
228195 int nd_region_to_nstype(struct nd_region *nd_region)
229196 {
230197 if (is_memory(&nd_region->dev)) {
231
- u16 i, alias;
198
+ u16 i, label;
232199
233
- for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
200
+ for (i = 0, label = 0; i < nd_region->ndr_mappings; i++) {
234201 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
235202 struct nvdimm *nvdimm = nd_mapping->nvdimm;
236203
237
- if (test_bit(NDD_ALIASING, &nvdimm->flags))
238
- alias++;
204
+ if (test_bit(NDD_LABELING, &nvdimm->flags))
205
+ label++;
239206 }
240
- if (alias)
207
+ if (label)
241208 return ND_DEVICE_NAMESPACE_PMEM;
242209 else
243210 return ND_DEVICE_NAMESPACE_IO;
....@@ -249,21 +216,25 @@
249216 }
250217 EXPORT_SYMBOL(nd_region_to_nstype);
251218
219
+static unsigned long long region_size(struct nd_region *nd_region)
220
+{
221
+ if (is_memory(&nd_region->dev)) {
222
+ return nd_region->ndr_size;
223
+ } else if (nd_region->ndr_mappings == 1) {
224
+ struct nd_mapping *nd_mapping = &nd_region->mapping[0];
225
+
226
+ return nd_mapping->size;
227
+ }
228
+
229
+ return 0;
230
+}
231
+
252232 static ssize_t size_show(struct device *dev,
253233 struct device_attribute *attr, char *buf)
254234 {
255235 struct nd_region *nd_region = to_nd_region(dev);
256
- unsigned long long size = 0;
257236
258
- if (is_memory(dev)) {
259
- size = nd_region->ndr_size;
260
- } else if (nd_region->ndr_mappings == 1) {
261
- struct nd_mapping *nd_mapping = &nd_region->mapping[0];
262
-
263
- size = nd_mapping->size;
264
- }
265
-
266
- return sprintf(buf, "%llu\n", size);
237
+ return sprintf(buf, "%llu\n", region_size(nd_region));
267238 }
268239 static DEVICE_ATTR_RO(size);
269240
....@@ -334,7 +305,7 @@
334305 * the v1.1 namespace label cookie definition. To read all this
335306 * data we need to wait for probing to settle.
336307 */
337
- device_lock(dev);
308
+ nd_device_lock(dev);
338309 nvdimm_bus_lock(dev);
339310 wait_nvdimm_bus_probe_idle(dev);
340311 if (nd_region->ndr_mappings) {
....@@ -351,7 +322,7 @@
351322 }
352323 }
353324 nvdimm_bus_unlock(dev);
354
- device_unlock(dev);
325
+ nd_device_unlock(dev);
355326
356327 if (rc)
357328 return rc;
....@@ -427,12 +398,12 @@
427398 * memory nvdimm_bus_lock() is dropped, but that's userspace's
428399 * problem to not race itself.
429400 */
430
- device_lock(dev);
401
+ nd_device_lock(dev);
431402 nvdimm_bus_lock(dev);
432403 wait_nvdimm_bus_probe_idle(dev);
433404 available = nd_region_available_dpa(nd_region);
434405 nvdimm_bus_unlock(dev);
435
- device_unlock(dev);
406
+ nd_device_unlock(dev);
436407
437408 return sprintf(buf, "%llu\n", available);
438409 }
....@@ -444,12 +415,12 @@
444415 struct nd_region *nd_region = to_nd_region(dev);
445416 unsigned long long available = 0;
446417
447
- device_lock(dev);
418
+ nd_device_lock(dev);
448419 nvdimm_bus_lock(dev);
449420 wait_nvdimm_bus_probe_idle(dev);
450421 available = nd_region_allocatable_dpa(nd_region);
451422 nvdimm_bus_unlock(dev);
452
- device_unlock(dev);
423
+ nd_device_unlock(dev);
453424
454425 return sprintf(buf, "%llu\n", available);
455426 }
....@@ -562,18 +533,66 @@
562533 }
563534 static DEVICE_ATTR_RW(read_only);
564535
536
+static ssize_t align_show(struct device *dev,
537
+ struct device_attribute *attr, char *buf)
538
+{
539
+ struct nd_region *nd_region = to_nd_region(dev);
540
+
541
+ return sprintf(buf, "%#lx\n", nd_region->align);
542
+}
543
+
544
+static ssize_t align_store(struct device *dev,
545
+ struct device_attribute *attr, const char *buf, size_t len)
546
+{
547
+ struct nd_region *nd_region = to_nd_region(dev);
548
+ unsigned long val, dpa;
549
+ u32 remainder;
550
+ int rc;
551
+
552
+ rc = kstrtoul(buf, 0, &val);
553
+ if (rc)
554
+ return rc;
555
+
556
+ if (!nd_region->ndr_mappings)
557
+ return -ENXIO;
558
+
559
+ /*
560
+ * Ensure space-align is evenly divisible by the region
561
+ * interleave-width because the kernel typically has no facility
562
+ * to determine which DIMM(s), dimm-physical-addresses, would
563
+ * contribute to the tail capacity in system-physical-address
564
+ * space for the namespace.
565
+ */
566
+ dpa = div_u64_rem(val, nd_region->ndr_mappings, &remainder);
567
+ if (!is_power_of_2(dpa) || dpa < PAGE_SIZE
568
+ || val > region_size(nd_region) || remainder)
569
+ return -EINVAL;
570
+
571
+ /*
572
+ * Given that space allocation consults this value multiple
573
+ * times ensure it does not change for the duration of the
574
+ * allocation.
575
+ */
576
+ nvdimm_bus_lock(dev);
577
+ nd_region->align = val;
578
+ nvdimm_bus_unlock(dev);
579
+
580
+ return len;
581
+}
582
+static DEVICE_ATTR_RW(align);
583
+
565584 static ssize_t region_badblocks_show(struct device *dev,
566585 struct device_attribute *attr, char *buf)
567586 {
568587 struct nd_region *nd_region = to_nd_region(dev);
569588 ssize_t rc;
570589
571
- device_lock(dev);
590
+ nd_device_lock(dev);
572591 if (dev->driver)
573592 rc = badblocks_show(&nd_region->bb, buf, 0);
574593 else
575594 rc = -ENXIO;
576
- device_unlock(dev);
595
+ nd_device_unlock(dev);
577596
578597 return rc;
579598 }
....@@ -586,7 +605,7 @@
586605
587606 return sprintf(buf, "%#llx\n", nd_region->ndr_start);
588607 }
589
-static DEVICE_ATTR_RO(resource);
608
+static DEVICE_ATTR_ADMIN_RO(resource);
590609
591610 static ssize_t persistence_domain_show(struct device *dev,
592611 struct device_attribute *attr, char *buf)
....@@ -604,6 +623,7 @@
604623
605624 static struct attribute *nd_region_attributes[] = {
606625 &dev_attr_size.attr,
626
+ &dev_attr_align.attr,
607627 &dev_attr_nstype.attr,
608628 &dev_attr_mappings.attr,
609629 &dev_attr_btt_seed.attr,
....@@ -638,12 +658,8 @@
638658 if (!is_memory(dev) && a == &dev_attr_badblocks.attr)
639659 return 0;
640660
641
- if (a == &dev_attr_resource.attr) {
642
- if (is_memory(dev))
643
- return 0400;
644
- else
645
- return 0;
646
- }
661
+ if (a == &dev_attr_resource.attr && !is_memory(dev))
662
+ return 0;
647663
648664 if (a == &dev_attr_deep_flush.attr) {
649665 int has_flush = nvdimm_has_flush(nd_region);
....@@ -663,6 +679,9 @@
663679 return a->mode;
664680 }
665681
682
+ if (a == &dev_attr_align.attr)
683
+ return a->mode;
684
+
666685 if (a != &dev_attr_set_cookie.attr
667686 && a != &dev_attr_available_size.attr)
668687 return a->mode;
....@@ -675,128 +694,6 @@
675694 return a->mode;
676695
677696 return 0;
678
-}
679
-
680
-struct attribute_group nd_region_attribute_group = {
681
- .attrs = nd_region_attributes,
682
- .is_visible = region_visible,
683
-};
684
-EXPORT_SYMBOL_GPL(nd_region_attribute_group);
685
-
686
-u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
687
- struct nd_namespace_index *nsindex)
688
-{
689
- struct nd_interleave_set *nd_set = nd_region->nd_set;
690
-
691
- if (!nd_set)
692
- return 0;
693
-
694
- if (nsindex && __le16_to_cpu(nsindex->major) == 1
695
- && __le16_to_cpu(nsindex->minor) == 1)
696
- return nd_set->cookie1;
697
- return nd_set->cookie2;
698
-}
699
-
700
-u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
701
-{
702
- struct nd_interleave_set *nd_set = nd_region->nd_set;
703
-
704
- if (nd_set)
705
- return nd_set->altcookie;
706
- return 0;
707
-}
708
-
709
-void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
710
-{
711
- struct nd_label_ent *label_ent, *e;
712
-
713
- lockdep_assert_held(&nd_mapping->lock);
714
- list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
715
- list_del(&label_ent->list);
716
- kfree(label_ent);
717
- }
718
-}
719
-
720
-/*
721
- * Upon successful probe/remove, take/release a reference on the
722
- * associated interleave set (if present), and plant new btt + namespace
723
- * seeds. Also, on the removal of a BLK region, notify the provider to
724
- * disable the region.
725
- */
726
-static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
727
- struct device *dev, bool probe)
728
-{
729
- struct nd_region *nd_region;
730
-
731
- if (!probe && is_nd_region(dev)) {
732
- int i;
733
-
734
- nd_region = to_nd_region(dev);
735
- for (i = 0; i < nd_region->ndr_mappings; i++) {
736
- struct nd_mapping *nd_mapping = &nd_region->mapping[i];
737
- struct nvdimm_drvdata *ndd = nd_mapping->ndd;
738
- struct nvdimm *nvdimm = nd_mapping->nvdimm;
739
-
740
- mutex_lock(&nd_mapping->lock);
741
- nd_mapping_free_labels(nd_mapping);
742
- mutex_unlock(&nd_mapping->lock);
743
-
744
- put_ndd(ndd);
745
- nd_mapping->ndd = NULL;
746
- if (ndd)
747
- atomic_dec(&nvdimm->busy);
748
- }
749
- }
750
- if (dev->parent && is_nd_region(dev->parent) && probe) {
751
- nd_region = to_nd_region(dev->parent);
752
- nvdimm_bus_lock(dev);
753
- if (nd_region->ns_seed == dev)
754
- nd_region_create_ns_seed(nd_region);
755
- nvdimm_bus_unlock(dev);
756
- }
757
- if (is_nd_btt(dev) && probe) {
758
- struct nd_btt *nd_btt = to_nd_btt(dev);
759
-
760
- nd_region = to_nd_region(dev->parent);
761
- nvdimm_bus_lock(dev);
762
- if (nd_region->btt_seed == dev)
763
- nd_region_create_btt_seed(nd_region);
764
- if (nd_region->ns_seed == &nd_btt->ndns->dev)
765
- nd_region_create_ns_seed(nd_region);
766
- nvdimm_bus_unlock(dev);
767
- }
768
- if (is_nd_pfn(dev) && probe) {
769
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
770
-
771
- nd_region = to_nd_region(dev->parent);
772
- nvdimm_bus_lock(dev);
773
- if (nd_region->pfn_seed == dev)
774
- nd_region_create_pfn_seed(nd_region);
775
- if (nd_region->ns_seed == &nd_pfn->ndns->dev)
776
- nd_region_create_ns_seed(nd_region);
777
- nvdimm_bus_unlock(dev);
778
- }
779
- if (is_nd_dax(dev) && probe) {
780
- struct nd_dax *nd_dax = to_nd_dax(dev);
781
-
782
- nd_region = to_nd_region(dev->parent);
783
- nvdimm_bus_lock(dev);
784
- if (nd_region->dax_seed == dev)
785
- nd_region_create_dax_seed(nd_region);
786
- if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
787
- nd_region_create_ns_seed(nd_region);
788
- nvdimm_bus_unlock(dev);
789
- }
790
-}
791
-
792
-void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev)
793
-{
794
- nd_region_notify_driver_action(nvdimm_bus, dev, true);
795
-}
796
-
797
-void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev)
798
-{
799
- nd_region_notify_driver_action(nvdimm_bus, dev, false);
800697 }
801698
802699 static ssize_t mappingN(struct device *dev, char *buf, int n)
....@@ -906,11 +803,124 @@
906803 NULL,
907804 };
908805
909
-struct attribute_group nd_mapping_attribute_group = {
806
+static const struct attribute_group nd_mapping_attribute_group = {
910807 .is_visible = mapping_visible,
911808 .attrs = mapping_attributes,
912809 };
913
-EXPORT_SYMBOL_GPL(nd_mapping_attribute_group);
810
+
811
+static const struct attribute_group nd_region_attribute_group = {
812
+ .attrs = nd_region_attributes,
813
+ .is_visible = region_visible,
814
+};
815
+
816
+static const struct attribute_group *nd_region_attribute_groups[] = {
817
+ &nd_device_attribute_group,
818
+ &nd_region_attribute_group,
819
+ &nd_numa_attribute_group,
820
+ &nd_mapping_attribute_group,
821
+ NULL,
822
+};
823
+
824
+static const struct device_type nd_blk_device_type = {
825
+ .name = "nd_blk",
826
+ .release = nd_region_release,
827
+ .groups = nd_region_attribute_groups,
828
+};
829
+
830
+static const struct device_type nd_pmem_device_type = {
831
+ .name = "nd_pmem",
832
+ .release = nd_region_release,
833
+ .groups = nd_region_attribute_groups,
834
+};
835
+
836
+static const struct device_type nd_volatile_device_type = {
837
+ .name = "nd_volatile",
838
+ .release = nd_region_release,
839
+ .groups = nd_region_attribute_groups,
840
+};
841
+
842
+bool is_nd_pmem(struct device *dev)
843
+{
844
+ return dev ? dev->type == &nd_pmem_device_type : false;
845
+}
846
+
847
+bool is_nd_blk(struct device *dev)
848
+{
849
+ return dev ? dev->type == &nd_blk_device_type : false;
850
+}
851
+
852
+bool is_nd_volatile(struct device *dev)
853
+{
854
+ return dev ? dev->type == &nd_volatile_device_type : false;
855
+}
856
+
857
+u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
858
+ struct nd_namespace_index *nsindex)
859
+{
860
+ struct nd_interleave_set *nd_set = nd_region->nd_set;
861
+
862
+ if (!nd_set)
863
+ return 0;
864
+
865
+ if (nsindex && __le16_to_cpu(nsindex->major) == 1
866
+ && __le16_to_cpu(nsindex->minor) == 1)
867
+ return nd_set->cookie1;
868
+ return nd_set->cookie2;
869
+}
870
+
871
+u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
872
+{
873
+ struct nd_interleave_set *nd_set = nd_region->nd_set;
874
+
875
+ if (nd_set)
876
+ return nd_set->altcookie;
877
+ return 0;
878
+}
879
+
880
+void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
881
+{
882
+ struct nd_label_ent *label_ent, *e;
883
+
884
+ lockdep_assert_held(&nd_mapping->lock);
885
+ list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
886
+ list_del(&label_ent->list);
887
+ kfree(label_ent);
888
+ }
889
+}
890
+
891
+/*
892
+ * When a namespace is activated create new seeds for the next
893
+ * namespace, or namespace-personality to be configured.
894
+ */
895
+void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev)
896
+{
897
+ nvdimm_bus_lock(dev);
898
+ if (nd_region->ns_seed == dev) {
899
+ nd_region_create_ns_seed(nd_region);
900
+ } else if (is_nd_btt(dev)) {
901
+ struct nd_btt *nd_btt = to_nd_btt(dev);
902
+
903
+ if (nd_region->btt_seed == dev)
904
+ nd_region_create_btt_seed(nd_region);
905
+ if (nd_region->ns_seed == &nd_btt->ndns->dev)
906
+ nd_region_create_ns_seed(nd_region);
907
+ } else if (is_nd_pfn(dev)) {
908
+ struct nd_pfn *nd_pfn = to_nd_pfn(dev);
909
+
910
+ if (nd_region->pfn_seed == dev)
911
+ nd_region_create_pfn_seed(nd_region);
912
+ if (nd_region->ns_seed == &nd_pfn->ndns->dev)
913
+ nd_region_create_ns_seed(nd_region);
914
+ } else if (is_nd_dax(dev)) {
915
+ struct nd_dax *nd_dax = to_nd_dax(dev);
916
+
917
+ if (nd_region->dax_seed == dev)
918
+ nd_region_create_dax_seed(nd_region);
919
+ if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
920
+ nd_region_create_ns_seed(nd_region);
921
+ }
922
+ nvdimm_bus_unlock(dev);
923
+}
914924
915925 int nd_blk_region_init(struct nd_region *nd_region)
916926 {
....@@ -981,9 +991,47 @@
981991 }
982992 EXPORT_SYMBOL(nd_region_release_lane);
983993
994
+/*
995
+ * PowerPC requires this alignment for memremap_pages(). All other archs
996
+ * should be ok with SUBSECTION_SIZE (see memremap_compat_align()).
997
+ */
998
+#define MEMREMAP_COMPAT_ALIGN_MAX SZ_16M
999
+
1000
+static unsigned long default_align(struct nd_region *nd_region)
1001
+{
1002
+ unsigned long align;
1003
+ int i, mappings;
1004
+ u32 remainder;
1005
+
1006
+ if (is_nd_blk(&nd_region->dev))
1007
+ align = PAGE_SIZE;
1008
+ else
1009
+ align = MEMREMAP_COMPAT_ALIGN_MAX;
1010
+
1011
+ for (i = 0; i < nd_region->ndr_mappings; i++) {
1012
+ struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1013
+ struct nvdimm *nvdimm = nd_mapping->nvdimm;
1014
+
1015
+ if (test_bit(NDD_ALIASING, &nvdimm->flags)) {
1016
+ align = MEMREMAP_COMPAT_ALIGN_MAX;
1017
+ break;
1018
+ }
1019
+ }
1020
+
1021
+ if (nd_region->ndr_size < MEMREMAP_COMPAT_ALIGN_MAX)
1022
+ align = PAGE_SIZE;
1023
+
1024
+ mappings = max_t(u16, 1, nd_region->ndr_mappings);
1025
+ div_u64_rem(align, mappings, &remainder);
1026
+ if (remainder)
1027
+ align *= mappings;
1028
+
1029
+ return align;
1030
+}
1031
+
9841032 static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
985
- struct nd_region_desc *ndr_desc, struct device_type *dev_type,
986
- const char *caller)
1033
+ struct nd_region_desc *ndr_desc,
1034
+ const struct device_type *dev_type, const char *caller)
9871035 {
9881036 struct nd_region *nd_region;
9891037 struct device *dev;
....@@ -995,15 +1043,22 @@
9951043 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
9961044 struct nvdimm *nvdimm = mapping->nvdimm;
9971045
998
- if ((mapping->start | mapping->size) % SZ_4K) {
999
- dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n",
1000
- caller, dev_name(&nvdimm->dev), i);
1001
-
1046
+ if ((mapping->start | mapping->size) % PAGE_SIZE) {
1047
+ dev_err(&nvdimm_bus->dev,
1048
+ "%s: %s mapping%d is not %ld aligned\n",
1049
+ caller, dev_name(&nvdimm->dev), i, PAGE_SIZE);
10021050 return NULL;
10031051 }
10041052
10051053 if (test_bit(NDD_UNARMED, &nvdimm->flags))
10061054 ro = 1;
1055
+
1056
+ if (test_bit(NDD_NOBLK, &nvdimm->flags)
1057
+ && dev_type == &nd_blk_device_type) {
1058
+ dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not BLK capable\n",
1059
+ caller, dev_name(&nvdimm->dev), i);
1060
+ return NULL;
1061
+ }
10071062 }
10081063
10091064 if (dev_type == &nd_blk_device_type) {
....@@ -1021,16 +1076,15 @@
10211076 }
10221077 region_buf = ndbr;
10231078 } else {
1024
- nd_region = kzalloc(sizeof(struct nd_region)
1025
- + sizeof(struct nd_mapping)
1026
- * ndr_desc->num_mappings,
1027
- GFP_KERNEL);
1079
+ nd_region = kzalloc(struct_size(nd_region, mapping,
1080
+ ndr_desc->num_mappings),
1081
+ GFP_KERNEL);
10281082 region_buf = nd_region;
10291083 }
10301084
10311085 if (!region_buf)
10321086 return NULL;
1033
- nd_region->id = ida_simple_get(&region_ida, 0, 0, GFP_KERNEL);
1087
+ nd_region->id = memregion_alloc(GFP_KERNEL);
10341088 if (nd_region->id < 0)
10351089 goto err_id;
10361090
....@@ -1066,6 +1120,7 @@
10661120 nd_region->flags = ndr_desc->flags;
10671121 nd_region->ro = ro;
10681122 nd_region->numa_node = ndr_desc->numa_node;
1123
+ nd_region->target_node = ndr_desc->target_node;
10691124 ida_init(&nd_region->ns_ida);
10701125 ida_init(&nd_region->btt_ida);
10711126 ida_init(&nd_region->pfn_ida);
....@@ -1078,6 +1133,7 @@
10781133 dev->of_node = ndr_desc->of_node;
10791134 nd_region->ndr_size = resource_size(ndr_desc->res);
10801135 nd_region->ndr_start = ndr_desc->res->start;
1136
+ nd_region->align = default_align(nd_region);
10811137 if (ndr_desc->flush)
10821138 nd_region->flush = ndr_desc->flush;
10831139 else
....@@ -1088,7 +1144,7 @@
10881144 return nd_region;
10891145
10901146 err_percpu:
1091
- ida_simple_remove(&region_ida, nd_region->id);
1147
+ memregion_free(nd_region->id);
10921148 err_id:
10931149 kfree(region_buf);
10941150 return NULL;
....@@ -1153,13 +1209,13 @@
11531209 idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8));
11541210
11551211 /*
1156
- * The first wmb() is needed to 'sfence' all previous writes
1157
- * such that they are architecturally visible for the platform
1158
- * buffer flush. Note that we've already arranged for pmem
1212
+ * The pmem_wmb() is needed to 'sfence' all
1213
+ * previous writes such that they are architecturally visible for
1214
+ * the platform buffer flush. Note that we've already arranged for pmem
11591215 * writes to avoid the cache via memcpy_flushcache(). The final
11601216 * wmb() ensures ordering for the NVDIMM flush write.
11611217 */
1162
- wmb();
1218
+ pmem_wmb();
11631219 for (i = 0; i < nd_region->ndr_mappings; i++)
11641220 if (ndrd_get_flush_wpq(ndrd, i, 0))
11651221 writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
....@@ -1186,6 +1242,11 @@
11861242 || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
11871243 return -ENXIO;
11881244
1245
+ /* Test if an explicit flush function is defined */
1246
+ if (test_bit(ND_REGION_ASYNC, &nd_region->flags) && nd_region->flush)
1247
+ return 1;
1248
+
1249
+ /* Test if any flush hints for the region are available */
11891250 for (i = 0; i < nd_region->ndr_mappings; i++) {
11901251 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
11911252 struct nvdimm *nvdimm = nd_mapping->nvdimm;
....@@ -1196,8 +1257,8 @@
11961257 }
11971258
11981259 /*
1199
- * The platform defines dimm devices without hints, assume
1200
- * platform persistence mechanism like ADR
1260
+ * The platform defines dimm devices without hints nor explicit flush,
1261
+ * assume platform persistence mechanism like ADR
12011262 */
12021263 return 0;
12031264 }
....@@ -1209,6 +1270,16 @@
12091270 !test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags);
12101271 }
12111272 EXPORT_SYMBOL_GPL(nvdimm_has_cache);
1273
+
1274
+bool is_nvdimm_sync(struct nd_region *nd_region)
1275
+{
1276
+ if (is_nd_volatile(&nd_region->dev))
1277
+ return true;
1278
+
1279
+ return is_nd_pmem(&nd_region->dev) &&
1280
+ !test_bit(ND_REGION_ASYNC, &nd_region->flags);
1281
+}
1282
+EXPORT_SYMBOL_GPL(is_nvdimm_sync);
12121283
12131284 struct conflict_context {
12141285 struct nd_region *nd_region;
....@@ -1249,9 +1320,4 @@
12491320 };
12501321
12511322 return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
1252
-}
1253
-
1254
-void __exit nd_region_devs_exit(void)
1255
-{
1256
- ida_destroy(&region_ida);
12571323 }