hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/nvdimm/dimm_devs.c
....@@ -1,16 +1,9 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of version 2 of the GNU General Public License as
6
- * published by the Free Software Foundation.
7
- *
8
- * This program is distributed in the hope that it will be useful, but
9
- * WITHOUT ANY WARRANTY; without even the implied warranty of
10
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11
- * General Public License for more details.
124 */
135 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6
+#include <linux/moduleparam.h>
147 #include <linux/vmalloc.h>
158 #include <linux/device.h>
169 #include <linux/ndctl.h>
....@@ -25,6 +18,10 @@
2518
2619 static DEFINE_IDA(dimm_ida);
2720
21
+static bool noblk;
22
+module_param(noblk, bool, 0444);
23
+MODULE_PARM_DESC(noblk, "force disable BLK / local alias support");
24
+
2825 /*
2926 * Retrieve bus and dimm handle and return if this bus supports
3027 * get_config_data commands
....@@ -35,7 +32,7 @@
3532
3633 if (!nvdimm->cmd_mask ||
3734 !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
38
- if (test_bit(NDD_ALIASING, &nvdimm->flags))
35
+ if (test_bit(NDD_LABELING, &nvdimm->flags))
3936 return -ENXIO;
4037 else
4138 return -ENOTTY;
....@@ -53,7 +50,7 @@
5350
5451 rc = nvdimm_check_config_data(ndd->dev);
5552 if (rc)
56
- dev_dbg(ndd->dev, "%pf: %s error: %d\n",
53
+ dev_dbg(ndd->dev, "%ps: %s error: %d\n",
5754 __builtin_return_address(0), __func__, rc);
5855 return rc;
5956 }
....@@ -85,56 +82,48 @@
8582 return cmd_rc;
8683 }
8784
88
-int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
85
+int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
86
+ size_t offset, size_t len)
8987 {
9088 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
89
+ struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
9190 int rc = validate_dimm(ndd), cmd_rc = 0;
9291 struct nd_cmd_get_config_data_hdr *cmd;
93
- struct nvdimm_bus_descriptor *nd_desc;
94
- u32 max_cmd_size, config_size;
95
- size_t offset;
92
+ size_t max_cmd_size, buf_offset;
9693
9794 if (rc)
9895 return rc;
9996
100
- if (ndd->data)
101
- return 0;
102
-
103
- if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0
104
- || ndd->nsarea.config_size < ND_LABEL_MIN_SIZE) {
105
- dev_dbg(ndd->dev, "failed to init config data area: (%d:%d)\n",
106
- ndd->nsarea.max_xfer, ndd->nsarea.config_size);
97
+ if (offset + len > ndd->nsarea.config_size)
10798 return -ENXIO;
108
- }
10999
110
- ndd->data = kvmalloc(ndd->nsarea.config_size, GFP_KERNEL);
111
- if (!ndd->data)
112
- return -ENOMEM;
113
-
114
- max_cmd_size = min_t(u32, PAGE_SIZE, ndd->nsarea.max_xfer);
115
- cmd = kzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
100
+ max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
101
+ cmd = kvzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
116102 if (!cmd)
117103 return -ENOMEM;
118104
119
- nd_desc = nvdimm_bus->nd_desc;
120
- for (config_size = ndd->nsarea.config_size, offset = 0;
121
- config_size; config_size -= cmd->in_length,
122
- offset += cmd->in_length) {
123
- cmd->in_length = min(config_size, max_cmd_size);
124
- cmd->in_offset = offset;
105
+ for (buf_offset = 0; len;
106
+ len -= cmd->in_length, buf_offset += cmd->in_length) {
107
+ size_t cmd_size;
108
+
109
+ cmd->in_offset = offset + buf_offset;
110
+ cmd->in_length = min(max_cmd_size, len);
111
+
112
+ cmd_size = sizeof(*cmd) + cmd->in_length;
113
+
125114 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
126
- ND_CMD_GET_CONFIG_DATA, cmd,
127
- cmd->in_length + sizeof(*cmd), &cmd_rc);
115
+ ND_CMD_GET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
128116 if (rc < 0)
129117 break;
130118 if (cmd_rc < 0) {
131119 rc = cmd_rc;
132120 break;
133121 }
134
- memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length);
122
+
123
+ /* out_buf should be valid, copy it into our output buffer */
124
+ memcpy(buf + buf_offset, cmd->out_buf, cmd->in_length);
135125 }
136
- dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
137
- kfree(cmd);
126
+ kvfree(cmd);
138127
139128 return rc;
140129 }
....@@ -151,15 +140,11 @@
151140 if (rc)
152141 return rc;
153142
154
- if (!ndd->data)
155
- return -ENXIO;
156
-
157143 if (offset + len > ndd->nsarea.config_size)
158144 return -ENXIO;
159145
160
- max_cmd_size = min_t(u32, PAGE_SIZE, len);
161
- max_cmd_size = min_t(u32, max_cmd_size, ndd->nsarea.max_xfer);
162
- cmd = kzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
146
+ max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
147
+ cmd = kvzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
163148 if (!cmd)
164149 return -ENOMEM;
165150
....@@ -183,16 +168,16 @@
183168 break;
184169 }
185170 }
186
- kfree(cmd);
171
+ kvfree(cmd);
187172
188173 return rc;
189174 }
190175
191
-void nvdimm_set_aliasing(struct device *dev)
176
+void nvdimm_set_labeling(struct device *dev)
192177 {
193178 struct nvdimm *nvdimm = to_nvdimm(dev);
194179
195
- set_bit(NDD_ALIASING, &nvdimm->flags);
180
+ set_bit(NDD_LABELING, &nvdimm->flags);
196181 }
197182
198183 void nvdimm_set_locked(struct device *dev)
....@@ -215,16 +200,6 @@
215200
216201 ida_simple_remove(&dimm_ida, nvdimm->id);
217202 kfree(nvdimm);
218
-}
219
-
220
-static struct device_type nvdimm_device_type = {
221
- .name = "nvdimm",
222
- .release = nvdimm_release,
223
-};
224
-
225
-bool is_nvdimm(struct device *dev)
226
-{
227
- return dev->type == &nvdimm_device_type;
228203 }
229204
230205 struct nvdimm *to_nvdimm(struct device *dev)
....@@ -337,8 +312,9 @@
337312 {
338313 struct nvdimm *nvdimm = to_nvdimm(dev);
339314
340
- return sprintf(buf, "%s%s\n",
315
+ return sprintf(buf, "%s%s%s\n",
341316 test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "",
317
+ test_bit(NDD_LABELING, &nvdimm->flags) ? "label " : "",
342318 test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
343319 }
344320 static DEVICE_ATTR_RO(flags);
....@@ -386,31 +362,241 @@
386362 {
387363 ssize_t rc;
388364
389
- device_lock(dev);
365
+ nd_device_lock(dev);
390366 rc = __available_slots_show(dev_get_drvdata(dev), buf);
391
- device_unlock(dev);
367
+ nd_device_unlock(dev);
392368
393369 return rc;
394370 }
395371 static DEVICE_ATTR_RO(available_slots);
372
+
373
+__weak ssize_t security_show(struct device *dev,
374
+ struct device_attribute *attr, char *buf)
375
+{
376
+ struct nvdimm *nvdimm = to_nvdimm(dev);
377
+
378
+ if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags))
379
+ return sprintf(buf, "overwrite\n");
380
+ if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
381
+ return sprintf(buf, "disabled\n");
382
+ if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags))
383
+ return sprintf(buf, "unlocked\n");
384
+ if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags))
385
+ return sprintf(buf, "locked\n");
386
+ return -ENOTTY;
387
+}
388
+
389
+static ssize_t frozen_show(struct device *dev,
390
+ struct device_attribute *attr, char *buf)
391
+{
392
+ struct nvdimm *nvdimm = to_nvdimm(dev);
393
+
394
+ return sprintf(buf, "%d\n", test_bit(NVDIMM_SECURITY_FROZEN,
395
+ &nvdimm->sec.flags));
396
+}
397
+static DEVICE_ATTR_RO(frozen);
398
+
399
+static ssize_t security_store(struct device *dev,
400
+ struct device_attribute *attr, const char *buf, size_t len)
401
+
402
+{
403
+ ssize_t rc;
404
+
405
+ /*
406
+ * Require all userspace triggered security management to be
407
+ * done while probing is idle and the DIMM is not in active use
408
+ * in any region.
409
+ */
410
+ nd_device_lock(dev);
411
+ nvdimm_bus_lock(dev);
412
+ wait_nvdimm_bus_probe_idle(dev);
413
+ rc = nvdimm_security_store(dev, buf, len);
414
+ nvdimm_bus_unlock(dev);
415
+ nd_device_unlock(dev);
416
+
417
+ return rc;
418
+}
419
+static DEVICE_ATTR_RW(security);
396420
397421 static struct attribute *nvdimm_attributes[] = {
398422 &dev_attr_state.attr,
399423 &dev_attr_flags.attr,
400424 &dev_attr_commands.attr,
401425 &dev_attr_available_slots.attr,
426
+ &dev_attr_security.attr,
427
+ &dev_attr_frozen.attr,
402428 NULL,
403429 };
404430
405
-struct attribute_group nvdimm_attribute_group = {
406
- .attrs = nvdimm_attributes,
407
-};
408
-EXPORT_SYMBOL_GPL(nvdimm_attribute_group);
431
+static umode_t nvdimm_visible(struct kobject *kobj, struct attribute *a, int n)
432
+{
433
+ struct device *dev = container_of(kobj, typeof(*dev), kobj);
434
+ struct nvdimm *nvdimm = to_nvdimm(dev);
409435
410
-struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
411
- const struct attribute_group **groups, unsigned long flags,
412
- unsigned long cmd_mask, int num_flush,
413
- struct resource *flush_wpq)
436
+ if (a != &dev_attr_security.attr && a != &dev_attr_frozen.attr)
437
+ return a->mode;
438
+ if (!nvdimm->sec.flags)
439
+ return 0;
440
+
441
+ if (a == &dev_attr_security.attr) {
442
+ /* Are there any state mutation ops (make writable)? */
443
+ if (nvdimm->sec.ops->freeze || nvdimm->sec.ops->disable
444
+ || nvdimm->sec.ops->change_key
445
+ || nvdimm->sec.ops->erase
446
+ || nvdimm->sec.ops->overwrite)
447
+ return a->mode;
448
+ return 0444;
449
+ }
450
+
451
+ if (nvdimm->sec.ops->freeze)
452
+ return a->mode;
453
+ return 0;
454
+}
455
+
456
+static const struct attribute_group nvdimm_attribute_group = {
457
+ .attrs = nvdimm_attributes,
458
+ .is_visible = nvdimm_visible,
459
+};
460
+
461
+static ssize_t result_show(struct device *dev, struct device_attribute *attr, char *buf)
462
+{
463
+ struct nvdimm *nvdimm = to_nvdimm(dev);
464
+ enum nvdimm_fwa_result result;
465
+
466
+ if (!nvdimm->fw_ops)
467
+ return -EOPNOTSUPP;
468
+
469
+ nvdimm_bus_lock(dev);
470
+ result = nvdimm->fw_ops->activate_result(nvdimm);
471
+ nvdimm_bus_unlock(dev);
472
+
473
+ switch (result) {
474
+ case NVDIMM_FWA_RESULT_NONE:
475
+ return sprintf(buf, "none\n");
476
+ case NVDIMM_FWA_RESULT_SUCCESS:
477
+ return sprintf(buf, "success\n");
478
+ case NVDIMM_FWA_RESULT_FAIL:
479
+ return sprintf(buf, "fail\n");
480
+ case NVDIMM_FWA_RESULT_NOTSTAGED:
481
+ return sprintf(buf, "not_staged\n");
482
+ case NVDIMM_FWA_RESULT_NEEDRESET:
483
+ return sprintf(buf, "need_reset\n");
484
+ default:
485
+ return -ENXIO;
486
+ }
487
+}
488
+static DEVICE_ATTR_ADMIN_RO(result);
489
+
490
+static ssize_t activate_show(struct device *dev, struct device_attribute *attr, char *buf)
491
+{
492
+ struct nvdimm *nvdimm = to_nvdimm(dev);
493
+ enum nvdimm_fwa_state state;
494
+
495
+ if (!nvdimm->fw_ops)
496
+ return -EOPNOTSUPP;
497
+
498
+ nvdimm_bus_lock(dev);
499
+ state = nvdimm->fw_ops->activate_state(nvdimm);
500
+ nvdimm_bus_unlock(dev);
501
+
502
+ switch (state) {
503
+ case NVDIMM_FWA_IDLE:
504
+ return sprintf(buf, "idle\n");
505
+ case NVDIMM_FWA_BUSY:
506
+ return sprintf(buf, "busy\n");
507
+ case NVDIMM_FWA_ARMED:
508
+ return sprintf(buf, "armed\n");
509
+ default:
510
+ return -ENXIO;
511
+ }
512
+}
513
+
514
+static ssize_t activate_store(struct device *dev, struct device_attribute *attr,
515
+ const char *buf, size_t len)
516
+{
517
+ struct nvdimm *nvdimm = to_nvdimm(dev);
518
+ enum nvdimm_fwa_trigger arg;
519
+ int rc;
520
+
521
+ if (!nvdimm->fw_ops)
522
+ return -EOPNOTSUPP;
523
+
524
+ if (sysfs_streq(buf, "arm"))
525
+ arg = NVDIMM_FWA_ARM;
526
+ else if (sysfs_streq(buf, "disarm"))
527
+ arg = NVDIMM_FWA_DISARM;
528
+ else
529
+ return -EINVAL;
530
+
531
+ nvdimm_bus_lock(dev);
532
+ rc = nvdimm->fw_ops->arm(nvdimm, arg);
533
+ nvdimm_bus_unlock(dev);
534
+
535
+ if (rc < 0)
536
+ return rc;
537
+ return len;
538
+}
539
+static DEVICE_ATTR_ADMIN_RW(activate);
540
+
541
+static struct attribute *nvdimm_firmware_attributes[] = {
542
+ &dev_attr_activate.attr,
543
+ &dev_attr_result.attr,
544
+ NULL,
545
+};
546
+
547
+static umode_t nvdimm_firmware_visible(struct kobject *kobj, struct attribute *a, int n)
548
+{
549
+ struct device *dev = container_of(kobj, typeof(*dev), kobj);
550
+ struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
551
+ struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
552
+ struct nvdimm *nvdimm = to_nvdimm(dev);
553
+ enum nvdimm_fwa_capability cap;
554
+
555
+ if (!nd_desc->fw_ops)
556
+ return 0;
557
+ if (!nvdimm->fw_ops)
558
+ return 0;
559
+
560
+ nvdimm_bus_lock(dev);
561
+ cap = nd_desc->fw_ops->capability(nd_desc);
562
+ nvdimm_bus_unlock(dev);
563
+
564
+ if (cap < NVDIMM_FWA_CAP_QUIESCE)
565
+ return 0;
566
+
567
+ return a->mode;
568
+}
569
+
570
+static const struct attribute_group nvdimm_firmware_attribute_group = {
571
+ .name = "firmware",
572
+ .attrs = nvdimm_firmware_attributes,
573
+ .is_visible = nvdimm_firmware_visible,
574
+};
575
+
576
+static const struct attribute_group *nvdimm_attribute_groups[] = {
577
+ &nd_device_attribute_group,
578
+ &nvdimm_attribute_group,
579
+ &nvdimm_firmware_attribute_group,
580
+ NULL,
581
+};
582
+
583
+static const struct device_type nvdimm_device_type = {
584
+ .name = "nvdimm",
585
+ .release = nvdimm_release,
586
+ .groups = nvdimm_attribute_groups,
587
+};
588
+
589
+bool is_nvdimm(struct device *dev)
590
+{
591
+ return dev->type == &nvdimm_device_type;
592
+}
593
+
594
+struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
595
+ void *provider_data, const struct attribute_group **groups,
596
+ unsigned long flags, unsigned long cmd_mask, int num_flush,
597
+ struct resource *flush_wpq, const char *dimm_id,
598
+ const struct nvdimm_security_ops *sec_ops,
599
+ const struct nvdimm_fw_ops *fw_ops)
414600 {
415601 struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
416602 struct device *dev;
....@@ -423,7 +609,11 @@
423609 kfree(nvdimm);
424610 return NULL;
425611 }
612
+
613
+ nvdimm->dimm_id = dimm_id;
426614 nvdimm->provider_data = provider_data;
615
+ if (noblk)
616
+ flags |= 1 << NDD_NOBLK;
427617 nvdimm->flags = flags;
428618 nvdimm->cmd_mask = cmd_mask;
429619 nvdimm->num_flush = num_flush;
....@@ -435,11 +625,88 @@
435625 dev->type = &nvdimm_device_type;
436626 dev->devt = MKDEV(nvdimm_major, nvdimm->id);
437627 dev->groups = groups;
628
+ nvdimm->sec.ops = sec_ops;
629
+ nvdimm->fw_ops = fw_ops;
630
+ nvdimm->sec.overwrite_tmo = 0;
631
+ INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query);
632
+ /*
633
+ * Security state must be initialized before device_add() for
634
+ * attribute visibility.
635
+ */
636
+ /* get security state and extended (master) state */
637
+ nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
638
+ nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
438639 nd_device_register(dev);
439640
440641 return nvdimm;
441642 }
442
-EXPORT_SYMBOL_GPL(nvdimm_create);
643
+EXPORT_SYMBOL_GPL(__nvdimm_create);
644
+
645
+static void shutdown_security_notify(void *data)
646
+{
647
+ struct nvdimm *nvdimm = data;
648
+
649
+ sysfs_put(nvdimm->sec.overwrite_state);
650
+}
651
+
652
+int nvdimm_security_setup_events(struct device *dev)
653
+{
654
+ struct nvdimm *nvdimm = to_nvdimm(dev);
655
+
656
+ if (!nvdimm->sec.flags || !nvdimm->sec.ops
657
+ || !nvdimm->sec.ops->overwrite)
658
+ return 0;
659
+ nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security");
660
+ if (!nvdimm->sec.overwrite_state)
661
+ return -ENOMEM;
662
+
663
+ return devm_add_action_or_reset(dev, shutdown_security_notify, nvdimm);
664
+}
665
+EXPORT_SYMBOL_GPL(nvdimm_security_setup_events);
666
+
667
+int nvdimm_in_overwrite(struct nvdimm *nvdimm)
668
+{
669
+ return test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
670
+}
671
+EXPORT_SYMBOL_GPL(nvdimm_in_overwrite);
672
+
673
+int nvdimm_security_freeze(struct nvdimm *nvdimm)
674
+{
675
+ int rc;
676
+
677
+ WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
678
+
679
+ if (!nvdimm->sec.ops || !nvdimm->sec.ops->freeze)
680
+ return -EOPNOTSUPP;
681
+
682
+ if (!nvdimm->sec.flags)
683
+ return -EIO;
684
+
685
+ if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
686
+ dev_warn(&nvdimm->dev, "Overwrite operation in progress.\n");
687
+ return -EBUSY;
688
+ }
689
+
690
+ rc = nvdimm->sec.ops->freeze(nvdimm);
691
+ nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
692
+
693
+ return rc;
694
+}
695
+
696
+static unsigned long dpa_align(struct nd_region *nd_region)
697
+{
698
+ struct device *dev = &nd_region->dev;
699
+
700
+ if (dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev),
701
+ "bus lock required for capacity provision\n"))
702
+ return 0;
703
+ if (dev_WARN_ONCE(dev, !nd_region->ndr_mappings || nd_region->align
704
+ % nd_region->ndr_mappings,
705
+ "invalid region align %#lx mappings: %d\n",
706
+ nd_region->align, nd_region->ndr_mappings))
707
+ return 0;
708
+ return nd_region->align / nd_region->ndr_mappings;
709
+}
443710
444711 int alias_dpa_busy(struct device *dev, void *data)
445712 {
....@@ -449,6 +716,7 @@
449716 struct nd_region *nd_region;
450717 struct nvdimm_drvdata *ndd;
451718 struct resource *res;
719
+ unsigned long align;
452720 int i;
453721
454722 if (!is_memory(dev))
....@@ -486,13 +754,21 @@
486754 * Find the free dpa from the end of the last pmem allocation to
487755 * the end of the interleave-set mapping.
488756 */
757
+ align = dpa_align(nd_region);
758
+ if (!align)
759
+ return 0;
760
+
489761 for_each_dpa_resource(ndd, res) {
762
+ resource_size_t start, end;
763
+
490764 if (strncmp(res->name, "pmem", 4) != 0)
491765 continue;
492
- if ((res->start >= blk_start && res->start < map_end)
493
- || (res->end >= blk_start
494
- && res->end <= map_end)) {
495
- new = max(blk_start, min(map_end + 1, res->end + 1));
766
+
767
+ start = ALIGN_DOWN(res->start, align);
768
+ end = ALIGN(res->end + 1, align) - 1;
769
+ if ((start >= blk_start && start < map_end)
770
+ || (end >= blk_start && end <= map_end)) {
771
+ new = max(blk_start, min(map_end, end) + 1);
496772 if (new != blk_start) {
497773 blk_start = new;
498774 goto retry;
....@@ -532,6 +808,7 @@
532808 .res = NULL,
533809 };
534810 struct resource *res;
811
+ unsigned long align;
535812
536813 if (!ndd)
537814 return 0;
....@@ -539,10 +816,20 @@
539816 device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
540817
541818 /* now account for busy blk allocations in unaliased dpa */
819
+ align = dpa_align(nd_region);
820
+ if (!align)
821
+ return 0;
542822 for_each_dpa_resource(ndd, res) {
823
+ resource_size_t start, end, size;
824
+
543825 if (strncmp(res->name, "blk", 3) != 0)
544826 continue;
545
- info.available -= resource_size(res);
827
+ start = ALIGN_DOWN(res->start, align);
828
+ end = ALIGN(res->end + 1, align) - 1;
829
+ size = end - start + 1;
830
+ if (size >= info.available)
831
+ return 0;
832
+ info.available -= size;
546833 }
547834
548835 return info.available;
....@@ -561,19 +848,31 @@
561848 struct nvdimm_bus *nvdimm_bus;
562849 resource_size_t max = 0;
563850 struct resource *res;
851
+ unsigned long align;
564852
565853 /* if a dimm is disabled the available capacity is zero */
566854 if (!ndd)
855
+ return 0;
856
+
857
+ align = dpa_align(nd_region);
858
+ if (!align)
567859 return 0;
568860
569861 nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
570862 if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm))
571863 return 0;
572864 for_each_dpa_resource(ndd, res) {
865
+ resource_size_t start, end;
866
+
573867 if (strcmp(res->name, "pmem-reserve") != 0)
574868 continue;
575
- if (resource_size(res) > max)
576
- max = resource_size(res);
869
+ /* trim free space relative to current alignment setting */
870
+ start = ALIGN(res->start, align);
871
+ end = ALIGN_DOWN(res->end + 1, align) - 1;
872
+ if (end < start)
873
+ continue;
874
+ if (end - start + 1 > max)
875
+ max = end - start + 1;
577876 }
578877 release_free_pmem(nvdimm_bus, nd_mapping);
579878 return max;
....@@ -601,24 +900,33 @@
601900 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
602901 struct resource *res;
603902 const char *reason;
903
+ unsigned long align;
604904
605905 if (!ndd)
906
+ return 0;
907
+
908
+ align = dpa_align(nd_region);
909
+ if (!align)
606910 return 0;
607911
608912 map_start = nd_mapping->start;
609913 map_end = map_start + nd_mapping->size - 1;
610914 blk_start = max(map_start, map_end + 1 - *overlap);
611915 for_each_dpa_resource(ndd, res) {
612
- if (res->start >= map_start && res->start < map_end) {
916
+ resource_size_t start, end;
917
+
918
+ start = ALIGN_DOWN(res->start, align);
919
+ end = ALIGN(res->end + 1, align) - 1;
920
+ if (start >= map_start && start < map_end) {
613921 if (strncmp(res->name, "blk", 3) == 0)
614922 blk_start = min(blk_start,
615
- max(map_start, res->start));
616
- else if (res->end > map_end) {
923
+ max(map_start, start));
924
+ else if (end > map_end) {
617925 reason = "misaligned to iset";
618926 goto err;
619927 } else
620
- busy += resource_size(res);
621
- } else if (res->end >= map_start && res->end <= map_end) {
928
+ busy += end - start + 1;
929
+ } else if (end >= map_start && end <= map_end) {
622930 if (strncmp(res->name, "blk", 3) == 0) {
623931 /*
624932 * If a BLK allocation overlaps the start of
....@@ -627,8 +935,8 @@
627935 */
628936 blk_start = map_start;
629937 } else
630
- busy += resource_size(res);
631
- } else if (map_start > res->start && map_start < res->end) {
938
+ busy += end - start + 1;
939
+ } else if (map_start > start && map_start < end) {
632940 /* total eclipse of the mapping */
633941 busy += nd_mapping->size;
634942 blk_start = map_start;
....@@ -638,7 +946,7 @@
638946 *overlap = map_end + 1 - blk_start;
639947 available = blk_start - map_start;
640948 if (busy < available)
641
- return available - busy;
949
+ return ALIGN_DOWN(available - busy, align);
642950 return 0;
643951
644952 err: