forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 95099d4622f8cb224d94e314c7a8e0df60b13f87
kernel/drivers/acpi/nfit/core.c
....@@ -1,18 +1,11 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of version 2 of the GNU General Public License as
6
- * published by the Free Software Foundation.
7
- *
8
- * This program is distributed in the hope that it will be useful, but
9
- * WITHOUT ANY WARRANTY; without even the implied warranty of
10
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11
- * General Public License for more details.
124 */
135 #include <linux/list_sort.h>
146 #include <linux/libnvdimm.h>
157 #include <linux/module.h>
8
+#include <linux/nospec.h>
169 #include <linux/mutex.h>
1710 #include <linux/ndctl.h>
1811 #include <linux/sysfs.h>
....@@ -24,6 +17,7 @@
2417 #include <linux/nd.h>
2518 #include <asm/cacheflush.h>
2619 #include <acpi/nfit.h>
20
+#include "intel.h"
2721 #include "nfit.h"
2822
2923 /*
....@@ -54,6 +48,10 @@
5448 module_param(no_init_ars, bool, 0644);
5549 MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time");
5650
51
+static bool force_labels;
52
+module_param(force_labels, bool, 0444);
53
+MODULE_PARM_DESC(force_labels, "Opt-in to labels despite missing methods");
54
+
5755 LIST_HEAD(acpi_descs);
5856 DEFINE_MUTEX(acpi_desc_lock);
5957
....@@ -76,10 +74,16 @@
7674 }
7775 EXPORT_SYMBOL(to_nfit_uuid);
7876
79
-static struct acpi_nfit_desc *to_acpi_nfit_desc(
80
- struct nvdimm_bus_descriptor *nd_desc)
77
+static const guid_t *to_nfit_bus_uuid(int family)
8178 {
82
- return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
79
+ if (WARN_ONCE(family == NVDIMM_BUS_FAMILY_NFIT,
80
+ "only secondary bus families can be translated\n"))
81
+ return NULL;
82
+ /*
83
+ * The index of bus UUIDs starts immediately following the last
84
+ * NVDIMM/leaf family.
85
+ */
86
+ return to_nfit_uuid(family + NVDIMM_FAMILY_MAX);
8387 }
8488
8589 static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
....@@ -191,18 +195,20 @@
191195 * In the _LSI, _LSR, _LSW case the locked status is
192196 * communicated via the read/write commands
193197 */
194
- if (nfit_mem->has_lsr)
198
+ if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags))
195199 break;
196200
197201 if (status >> 16 & ND_CONFIG_LOCKED)
198202 return -EACCES;
199203 break;
200204 case ND_CMD_GET_CONFIG_DATA:
201
- if (nfit_mem->has_lsr && status == ACPI_LABELS_LOCKED)
205
+ if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
206
+ && status == ACPI_LABELS_LOCKED)
202207 return -EACCES;
203208 break;
204209 case ND_CMD_SET_CONFIG_DATA:
205
- if (nfit_mem->has_lsw && status == ACPI_LABELS_LOCKED)
210
+ if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags)
211
+ && status == ACPI_LABELS_LOCKED)
206212 return -EACCES;
207213 break;
208214 default:
....@@ -367,23 +373,17 @@
367373
368374 static u8 nfit_dsm_revid(unsigned family, unsigned func)
369375 {
370
- static const u8 revid_table[NVDIMM_FAMILY_MAX+1][32] = {
376
+ static const u8 revid_table[NVDIMM_FAMILY_MAX+1][NVDIMM_CMD_MAX+1] = {
371377 [NVDIMM_FAMILY_INTEL] = {
372
- [NVDIMM_INTEL_GET_MODES] = 2,
373
- [NVDIMM_INTEL_GET_FWINFO] = 2,
374
- [NVDIMM_INTEL_START_FWUPDATE] = 2,
375
- [NVDIMM_INTEL_SEND_FWUPDATE] = 2,
376
- [NVDIMM_INTEL_FINISH_FWUPDATE] = 2,
377
- [NVDIMM_INTEL_QUERY_FWUPDATE] = 2,
378
- [NVDIMM_INTEL_SET_THRESHOLD] = 2,
379
- [NVDIMM_INTEL_INJECT_ERROR] = 2,
378
+ [NVDIMM_INTEL_GET_MODES ...
379
+ NVDIMM_INTEL_FW_ACTIVATE_ARM] = 2,
380380 },
381381 };
382382 u8 id;
383383
384384 if (family > NVDIMM_FAMILY_MAX)
385385 return 0;
386
- if (func > 31)
386
+ if (func > NVDIMM_CMD_MAX)
387387 return 0;
388388 id = revid_table[family][func];
389389 if (id == 0)
....@@ -391,8 +391,19 @@
391391 return id;
392392 }
393393
394
+static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func)
395
+{
396
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
397
+
398
+ if (nfit_mem && nfit_mem->family == NVDIMM_FAMILY_INTEL
399
+ && func >= NVDIMM_INTEL_GET_SECURITY_STATE
400
+ && func <= NVDIMM_INTEL_MASTER_SECURE_ERASE)
401
+ return IS_ENABLED(CONFIG_NFIT_SECURITY_DEBUG);
402
+ return true;
403
+}
404
+
394405 static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
395
- struct nd_cmd_pkg *call_pkg)
406
+ struct nd_cmd_pkg *call_pkg, int *family)
396407 {
397408 if (call_pkg) {
398409 int i;
....@@ -403,6 +414,7 @@
403414 for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
404415 if (call_pkg->nd_reserved2[i])
405416 return -EINVAL;
417
+ *family = call_pkg->nd_family;
406418 return call_pkg->nd_command;
407419 }
408420
....@@ -424,7 +436,7 @@
424436 int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
425437 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
426438 {
427
- struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
439
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
428440 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
429441 union acpi_object in_obj, in_buf, *out_obj;
430442 const struct nd_cmd_desc *desc = NULL;
....@@ -436,13 +448,14 @@
436448 acpi_handle handle;
437449 const guid_t *guid;
438450 int func, rc, i;
451
+ int family = 0;
439452
440453 if (cmd_rc)
441454 *cmd_rc = -EINVAL;
442455
443456 if (cmd == ND_CMD_CALL)
444457 call_pkg = buf;
445
- func = cmd_to_func(nfit_mem, cmd, call_pkg);
458
+ func = cmd_to_func(nfit_mem, cmd, call_pkg, &family);
446459 if (func < 0)
447460 return func;
448461
....@@ -464,9 +477,20 @@
464477
465478 cmd_name = nvdimm_bus_cmd_name(cmd);
466479 cmd_mask = nd_desc->cmd_mask;
467
- dsm_mask = nd_desc->bus_dsm_mask;
480
+ if (cmd == ND_CMD_CALL && call_pkg->nd_family) {
481
+ family = call_pkg->nd_family;
482
+ if (family > NVDIMM_BUS_FAMILY_MAX ||
483
+ !test_bit(family, &nd_desc->bus_family_mask))
484
+ return -EINVAL;
485
+ family = array_index_nospec(family,
486
+ NVDIMM_BUS_FAMILY_MAX + 1);
487
+ dsm_mask = acpi_desc->family_dsm_mask[family];
488
+ guid = to_nfit_bus_uuid(family);
489
+ } else {
490
+ dsm_mask = acpi_desc->bus_dsm_mask;
491
+ guid = to_nfit_uuid(NFIT_DEV_BUS);
492
+ }
468493 desc = nd_cmd_bus_desc(cmd);
469
- guid = to_nfit_uuid(NFIT_DEV_BUS);
470494 handle = adev->handle;
471495 dimm_name = "bus";
472496 }
....@@ -478,7 +502,8 @@
478502 * Check for a valid command. For ND_CMD_CALL, we also have to
479503 * make sure that the DSM function is supported.
480504 */
481
- if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask))
505
+ if (cmd == ND_CMD_CALL &&
506
+ (func > NVDIMM_CMD_MAX || !test_bit(func, &dsm_mask)))
482507 return -ENOTTY;
483508 else if (!test_bit(cmd, &cmd_mask))
484509 return -ENOTTY;
....@@ -501,21 +526,24 @@
501526 in_buf.buffer.length = call_pkg->nd_size_in;
502527 }
503528
504
- dev_dbg(dev, "%s cmd: %d: func: %d input length: %d\n",
505
- dimm_name, cmd, func, in_buf.buffer.length);
506
- print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4,
507
- in_buf.buffer.pointer,
508
- min_t(u32, 256, in_buf.buffer.length), true);
529
+ dev_dbg(dev, "%s cmd: %d: family: %d func: %d input length: %d\n",
530
+ dimm_name, cmd, family, func, in_buf.buffer.length);
531
+ if (payload_dumpable(nvdimm, func))
532
+ print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4,
533
+ in_buf.buffer.pointer,
534
+ min_t(u32, 256, in_buf.buffer.length), true);
509535
510536 /* call the BIOS, prefer the named methods over _DSM if available */
511
- if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE && nfit_mem->has_lsr)
537
+ if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE
538
+ && test_bit(NFIT_MEM_LSR, &nfit_mem->flags))
512539 out_obj = acpi_label_info(handle);
513
- else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA && nfit_mem->has_lsr) {
540
+ else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA
541
+ && test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
514542 struct nd_cmd_get_config_data_hdr *p = buf;
515543
516544 out_obj = acpi_label_read(handle, p->in_offset, p->in_length);
517545 } else if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA
518
- && nfit_mem->has_lsw) {
546
+ && test_bit(NFIT_MEM_LSW, &nfit_mem->flags)) {
519547 struct nd_cmd_set_config_hdr *p = buf;
520548
521549 out_obj = acpi_label_write(handle, p->in_offset, p->in_length,
....@@ -1220,8 +1248,9 @@
12201248 {
12211249 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
12221250 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1251
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
12231252
1224
- return sprintf(buf, "%#lx\n", nd_desc->bus_dsm_mask);
1253
+ return sprintf(buf, "%#lx\n", acpi_desc->bus_dsm_mask);
12251254 }
12261255 static struct device_attribute dev_attr_bus_dsm_mask =
12271256 __ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL);
....@@ -1265,7 +1294,7 @@
12651294 if (rc)
12661295 return rc;
12671296
1268
- device_lock(dev);
1297
+ nfit_device_lock(dev);
12691298 nd_desc = dev_get_drvdata(dev);
12701299 if (nd_desc) {
12711300 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
....@@ -1282,7 +1311,7 @@
12821311 break;
12831312 }
12841313 }
1285
- device_unlock(dev);
1314
+ nfit_device_unlock(dev);
12861315 if (rc)
12871316 return rc;
12881317 return size;
....@@ -1302,10 +1331,10 @@
13021331 ssize_t rc = -ENXIO;
13031332 bool busy;
13041333
1305
- device_lock(dev);
1334
+ nfit_device_lock(dev);
13061335 nd_desc = dev_get_drvdata(dev);
13071336 if (!nd_desc) {
1308
- device_unlock(dev);
1337
+ nfit_device_unlock(dev);
13091338 return rc;
13101339 }
13111340 acpi_desc = to_acpi_desc(nd_desc);
....@@ -1322,7 +1351,7 @@
13221351 }
13231352
13241353 mutex_unlock(&acpi_desc->init_mutex);
1325
- device_unlock(dev);
1354
+ nfit_device_unlock(dev);
13261355 return rc;
13271356 }
13281357
....@@ -1339,14 +1368,14 @@
13391368 if (val != 1)
13401369 return -EINVAL;
13411370
1342
- device_lock(dev);
1371
+ nfit_device_lock(dev);
13431372 nd_desc = dev_get_drvdata(dev);
13441373 if (nd_desc) {
13451374 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
13461375
13471376 rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
13481377 }
1349
- device_unlock(dev);
1378
+ nfit_device_unlock(dev);
13501379 if (rc)
13511380 return rc;
13521381 return size;
....@@ -1364,11 +1393,15 @@
13641393
13651394 static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
13661395 {
1367
- struct device *dev = container_of(kobj, struct device, kobj);
1396
+ struct device *dev = kobj_to_dev(kobj);
13681397 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
13691398
1370
- if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus))
1371
- return 0;
1399
+ if (a == &dev_attr_scrub.attr)
1400
+ return ars_supported(nvdimm_bus) ? a->mode : 0;
1401
+
1402
+ if (a == &dev_attr_firmware_activate_noidle.attr)
1403
+ return intel_fwa_supported(nvdimm_bus) ? a->mode : 0;
1404
+
13721405 return a->mode;
13731406 }
13741407
....@@ -1377,6 +1410,7 @@
13771410 &dev_attr_scrub.attr,
13781411 &dev_attr_hw_error_scrub.attr,
13791412 &dev_attr_bus_dsm_mask.attr,
1413
+ &dev_attr_firmware_activate_noidle.attr,
13801414 NULL,
13811415 };
13821416
....@@ -1387,7 +1421,6 @@
13871421 };
13881422
13891423 static const struct attribute_group *acpi_nfit_attribute_groups[] = {
1390
- &nvdimm_bus_attribute_group,
13911424 &acpi_nfit_attribute_group,
13921425 NULL,
13931426 };
....@@ -1588,7 +1621,12 @@
15881621 static ssize_t flags_show(struct device *dev,
15891622 struct device_attribute *attr, char *buf)
15901623 {
1591
- u16 flags = to_nfit_memdev(dev)->flags;
1624
+ struct nvdimm *nvdimm = to_nvdimm(dev);
1625
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1626
+ u16 flags = __to_nfit_memdev(nfit_mem)->flags;
1627
+
1628
+ if (test_bit(NFIT_MEM_DIRTY, &nfit_mem->flags))
1629
+ flags |= ACPI_NFIT_MEM_FLUSH_FAILED;
15921630
15931631 return sprintf(buf, "%s%s%s%s%s%s%s\n",
15941632 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
....@@ -1604,20 +1642,22 @@
16041642 static ssize_t id_show(struct device *dev,
16051643 struct device_attribute *attr, char *buf)
16061644 {
1607
- struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1645
+ struct nvdimm *nvdimm = to_nvdimm(dev);
1646
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
16081647
1609
- if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
1610
- return sprintf(buf, "%04x-%02x-%04x-%08x\n",
1611
- be16_to_cpu(dcr->vendor_id),
1612
- dcr->manufacturing_location,
1613
- be16_to_cpu(dcr->manufacturing_date),
1614
- be32_to_cpu(dcr->serial_number));
1615
- else
1616
- return sprintf(buf, "%04x-%08x\n",
1617
- be16_to_cpu(dcr->vendor_id),
1618
- be32_to_cpu(dcr->serial_number));
1648
+ return sprintf(buf, "%s\n", nfit_mem->id);
16191649 }
16201650 static DEVICE_ATTR_RO(id);
1651
+
1652
+static ssize_t dirty_shutdown_show(struct device *dev,
1653
+ struct device_attribute *attr, char *buf)
1654
+{
1655
+ struct nvdimm *nvdimm = to_nvdimm(dev);
1656
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1657
+
1658
+ return sprintf(buf, "%d\n", nfit_mem->dirty_shutdown);
1659
+}
1660
+static DEVICE_ATTR_RO(dirty_shutdown);
16211661
16221662 static struct attribute *acpi_nfit_dimm_attributes[] = {
16231663 &dev_attr_handle.attr,
....@@ -1636,14 +1676,16 @@
16361676 &dev_attr_id.attr,
16371677 &dev_attr_family.attr,
16381678 &dev_attr_dsm_mask.attr,
1679
+ &dev_attr_dirty_shutdown.attr,
16391680 NULL,
16401681 };
16411682
16421683 static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
16431684 struct attribute *a, int n)
16441685 {
1645
- struct device *dev = container_of(kobj, struct device, kobj);
1686
+ struct device *dev = kobj_to_dev(kobj);
16461687 struct nvdimm *nvdimm = to_nvdimm(dev);
1688
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
16471689
16481690 if (!to_nfit_dcr(dev)) {
16491691 /* Without a dcr only the memdev attributes can be surfaced */
....@@ -1657,6 +1699,11 @@
16571699
16581700 if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
16591701 return 0;
1702
+
1703
+ if (!test_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags)
1704
+ && a == &dev_attr_dirty_shutdown.attr)
1705
+ return 0;
1706
+
16601707 return a->mode;
16611708 }
16621709
....@@ -1667,8 +1714,6 @@
16671714 };
16681715
16691716 static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
1670
- &nvdimm_attribute_group,
1671
- &nd_device_attribute_group,
16721717 &acpi_nfit_dimm_attribute_group,
16731718 NULL,
16741719 };
....@@ -1718,9 +1763,9 @@
17181763 struct acpi_device *adev = data;
17191764 struct device *dev = &adev->dev;
17201765
1721
- device_lock(dev->parent);
1766
+ nfit_device_lock(dev->parent);
17221767 __acpi_nvdimm_notify(dev, event);
1723
- device_unlock(dev->parent);
1768
+ nfit_device_unlock(dev->parent);
17241769 }
17251770
17261771 static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
....@@ -1735,22 +1780,96 @@
17351780 return false;
17361781 }
17371782
1783
+__weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
1784
+{
1785
+ struct device *dev = &nfit_mem->adev->dev;
1786
+ struct nd_intel_smart smart = { 0 };
1787
+ union acpi_object in_buf = {
1788
+ .buffer.type = ACPI_TYPE_BUFFER,
1789
+ .buffer.length = 0,
1790
+ };
1791
+ union acpi_object in_obj = {
1792
+ .package.type = ACPI_TYPE_PACKAGE,
1793
+ .package.count = 1,
1794
+ .package.elements = &in_buf,
1795
+ };
1796
+ const u8 func = ND_INTEL_SMART;
1797
+ const guid_t *guid = to_nfit_uuid(nfit_mem->family);
1798
+ u8 revid = nfit_dsm_revid(nfit_mem->family, func);
1799
+ struct acpi_device *adev = nfit_mem->adev;
1800
+ acpi_handle handle = adev->handle;
1801
+ union acpi_object *out_obj;
1802
+
1803
+ if ((nfit_mem->dsm_mask & (1 << func)) == 0)
1804
+ return;
1805
+
1806
+ out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
1807
+ if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER
1808
+ || out_obj->buffer.length < sizeof(smart)) {
1809
+ dev_dbg(dev->parent, "%s: failed to retrieve initial health\n",
1810
+ dev_name(dev));
1811
+ ACPI_FREE(out_obj);
1812
+ return;
1813
+ }
1814
+ memcpy(&smart, out_obj->buffer.pointer, sizeof(smart));
1815
+ ACPI_FREE(out_obj);
1816
+
1817
+ if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) {
1818
+ if (smart.shutdown_state)
1819
+ set_bit(NFIT_MEM_DIRTY, &nfit_mem->flags);
1820
+ }
1821
+
1822
+ if (smart.flags & ND_INTEL_SMART_SHUTDOWN_COUNT_VALID) {
1823
+ set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags);
1824
+ nfit_mem->dirty_shutdown = smart.shutdown_count;
1825
+ }
1826
+}
1827
+
1828
+static void populate_shutdown_status(struct nfit_mem *nfit_mem)
1829
+{
1830
+ /*
1831
+ * For DIMMs that provide a dynamic facility to retrieve a
1832
+ * dirty-shutdown status and/or a dirty-shutdown count, cache
1833
+ * these values in nfit_mem.
1834
+ */
1835
+ if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
1836
+ nfit_intel_shutdown_status(nfit_mem);
1837
+}
1838
+
17381839 static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
17391840 struct nfit_mem *nfit_mem, u32 device_handle)
17401841 {
1842
+ struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
17411843 struct acpi_device *adev, *adev_dimm;
17421844 struct device *dev = acpi_desc->dev;
17431845 unsigned long dsm_mask, label_mask;
17441846 const guid_t *guid;
17451847 int i;
17461848 int family = -1;
1849
+ struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
17471850
17481851 /* nfit test assumes 1:1 relationship between commands and dsms */
17491852 nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
17501853 nfit_mem->family = NVDIMM_FAMILY_INTEL;
1854
+ set_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask);
1855
+
1856
+ if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
1857
+ sprintf(nfit_mem->id, "%04x-%02x-%04x-%08x",
1858
+ be16_to_cpu(dcr->vendor_id),
1859
+ dcr->manufacturing_location,
1860
+ be16_to_cpu(dcr->manufacturing_date),
1861
+ be32_to_cpu(dcr->serial_number));
1862
+ else
1863
+ sprintf(nfit_mem->id, "%04x-%08x",
1864
+ be16_to_cpu(dcr->vendor_id),
1865
+ be32_to_cpu(dcr->serial_number));
1866
+
17511867 adev = to_acpi_dev(acpi_desc);
1752
- if (!adev)
1868
+ if (!adev) {
1869
+ /* unit test case */
1870
+ populate_shutdown_status(nfit_mem);
17531871 return 0;
1872
+ }
17541873
17551874 adev_dimm = acpi_find_child_device(adev, device_handle, false);
17561875 nfit_mem->adev = adev_dimm;
....@@ -1785,10 +1904,13 @@
17851904 * Note, that checking for function0 (bit0) tells us if any commands
17861905 * are reachable through this GUID.
17871906 */
1907
+ clear_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask);
17881908 for (i = 0; i <= NVDIMM_FAMILY_MAX; i++)
1789
- if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
1909
+ if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) {
1910
+ set_bit(i, &nd_desc->dimm_family_mask);
17901911 if (family < 0 || i == default_dsm_family)
17911912 family = i;
1913
+ }
17921914
17931915 /* limit the supported commands to those that are publicly documented */
17941916 nfit_mem->family = family;
....@@ -1837,18 +1959,35 @@
18371959 | 1 << ND_CMD_SET_CONFIG_DATA;
18381960 if (family == NVDIMM_FAMILY_INTEL
18391961 && (dsm_mask & label_mask) == label_mask)
1840
- return 0;
1962
+ /* skip _LS{I,R,W} enabling */;
1963
+ else {
1964
+ if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
1965
+ && acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
1966
+ dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
1967
+ set_bit(NFIT_MEM_LSR, &nfit_mem->flags);
1968
+ }
18411969
1842
- if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
1843
- && acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
1844
- dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
1845
- nfit_mem->has_lsr = true;
1970
+ if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
1971
+ && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
1972
+ dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
1973
+ set_bit(NFIT_MEM_LSW, &nfit_mem->flags);
1974
+ }
1975
+
1976
+ /*
1977
+ * Quirk read-only label configurations to preserve
1978
+ * access to label-less namespaces by default.
1979
+ */
1980
+ if (!test_bit(NFIT_MEM_LSW, &nfit_mem->flags)
1981
+ && !force_labels) {
1982
+ dev_dbg(dev, "%s: No _LSW, disable labels\n",
1983
+ dev_name(&adev_dimm->dev));
1984
+ clear_bit(NFIT_MEM_LSR, &nfit_mem->flags);
1985
+ } else
1986
+ dev_dbg(dev, "%s: Force enable labels\n",
1987
+ dev_name(&adev_dimm->dev));
18461988 }
18471989
1848
- if (nfit_mem->has_lsr && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
1849
- dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
1850
- nfit_mem->has_lsw = true;
1851
- }
1990
+ populate_shutdown_status(nfit_mem);
18521991
18531992 return 0;
18541993 }
....@@ -1879,6 +2018,36 @@
18792018 mutex_unlock(&acpi_desc->init_mutex);
18802019 }
18812020
2021
+static const struct nvdimm_security_ops *acpi_nfit_get_security_ops(int family)
2022
+{
2023
+ switch (family) {
2024
+ case NVDIMM_FAMILY_INTEL:
2025
+ return intel_security_ops;
2026
+ default:
2027
+ return NULL;
2028
+ }
2029
+}
2030
+
2031
+static const struct nvdimm_fw_ops *acpi_nfit_get_fw_ops(
2032
+ struct nfit_mem *nfit_mem)
2033
+{
2034
+ unsigned long mask;
2035
+ struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
2036
+ struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2037
+
2038
+ if (!nd_desc->fw_ops)
2039
+ return NULL;
2040
+
2041
+ if (nfit_mem->family != NVDIMM_FAMILY_INTEL)
2042
+ return NULL;
2043
+
2044
+ mask = nfit_mem->dsm_mask & NVDIMM_INTEL_FW_ACTIVATE_CMDMASK;
2045
+ if (mask != NVDIMM_INTEL_FW_ACTIVATE_CMDMASK)
2046
+ return NULL;
2047
+
2048
+ return intel_fw_ops;
2049
+}
2050
+
18822051 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
18832052 {
18842053 struct nfit_mem *nfit_mem;
....@@ -1899,8 +2068,10 @@
18992068 continue;
19002069 }
19012070
1902
- if (nfit_mem->bdw && nfit_mem->memdev_pmem)
2071
+ if (nfit_mem->bdw && nfit_mem->memdev_pmem) {
19032072 set_bit(NDD_ALIASING, &flags);
2073
+ set_bit(NDD_LABELING, &flags);
2074
+ }
19042075
19052076 /* collate flags across all memdevs for this dimm */
19062077 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
....@@ -1936,19 +2107,25 @@
19362107 cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK;
19372108 }
19382109
1939
- if (nfit_mem->has_lsr) {
2110
+ /* Quirk to ignore LOCAL for labels on HYPERV DIMMs */
2111
+ if (nfit_mem->family == NVDIMM_FAMILY_HYPERV)
2112
+ set_bit(NDD_NOBLK, &flags);
2113
+
2114
+ if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
19402115 set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
19412116 set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
19422117 }
1943
- if (nfit_mem->has_lsw)
2118
+ if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags))
19442119 set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
19452120
19462121 flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush
19472122 : NULL;
1948
- nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
2123
+ nvdimm = __nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
19492124 acpi_nfit_dimm_attribute_groups,
19502125 flags, cmd_mask, flush ? flush->hint_count : 0,
1951
- nfit_mem->flush_wpq);
2126
+ nfit_mem->flush_wpq, &nfit_mem->id[0],
2127
+ acpi_nfit_get_security_ops(nfit_mem->family),
2128
+ acpi_nfit_get_fw_ops(nfit_mem));
19522129 if (!nvdimm)
19532130 return -ENOMEM;
19542131
....@@ -1958,7 +2135,7 @@
19582135 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
19592136 continue;
19602137
1961
- dev_info(acpi_desc->dev, "%s flags:%s%s%s%s%s\n",
2138
+ dev_err(acpi_desc->dev, "Error found in NVDIMM %s flags:%s%s%s%s%s\n",
19622139 nvdimm_name(nvdimm),
19632140 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
19642141 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
....@@ -2002,22 +2179,33 @@
20022179 * these commands.
20032180 */
20042181 enum nfit_aux_cmds {
2005
- NFIT_CMD_TRANSLATE_SPA = 5,
2006
- NFIT_CMD_ARS_INJECT_SET = 7,
2007
- NFIT_CMD_ARS_INJECT_CLEAR = 8,
2008
- NFIT_CMD_ARS_INJECT_GET = 9,
2182
+ NFIT_CMD_TRANSLATE_SPA = 5,
2183
+ NFIT_CMD_ARS_INJECT_SET = 7,
2184
+ NFIT_CMD_ARS_INJECT_CLEAR = 8,
2185
+ NFIT_CMD_ARS_INJECT_GET = 9,
20092186 };
20102187
20112188 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
20122189 {
20132190 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
20142191 const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS);
2192
+ unsigned long dsm_mask, *mask;
20152193 struct acpi_device *adev;
2016
- unsigned long dsm_mask;
20172194 int i;
20182195
2019
- nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
2020
- nd_desc->bus_dsm_mask = acpi_desc->bus_nfit_cmd_force_en;
2196
+ set_bit(ND_CMD_CALL, &nd_desc->cmd_mask);
2197
+ set_bit(NVDIMM_BUS_FAMILY_NFIT, &nd_desc->bus_family_mask);
2198
+
2199
+ /* enable nfit_test to inject bus command emulation */
2200
+ if (acpi_desc->bus_cmd_force_en) {
2201
+ nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
2202
+ mask = &nd_desc->bus_family_mask;
2203
+ if (acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]) {
2204
+ set_bit(NVDIMM_BUS_FAMILY_INTEL, mask);
2205
+ nd_desc->fw_ops = intel_bus_fw_ops;
2206
+ }
2207
+ }
2208
+
20212209 adev = to_acpi_dev(acpi_desc);
20222210 if (!adev)
20232211 return;
....@@ -2025,7 +2213,6 @@
20252213 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
20262214 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
20272215 set_bit(i, &nd_desc->cmd_mask);
2028
- set_bit(ND_CMD_CALL, &nd_desc->cmd_mask);
20292216
20302217 dsm_mask =
20312218 (1 << ND_CMD_ARS_CAP) |
....@@ -2038,7 +2225,20 @@
20382225 (1 << NFIT_CMD_ARS_INJECT_GET);
20392226 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
20402227 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
2041
- set_bit(i, &nd_desc->bus_dsm_mask);
2228
+ set_bit(i, &acpi_desc->bus_dsm_mask);
2229
+
2230
+ /* Enumerate allowed NVDIMM_BUS_FAMILY_INTEL commands */
2231
+ dsm_mask = NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK;
2232
+ guid = to_nfit_bus_uuid(NVDIMM_BUS_FAMILY_INTEL);
2233
+ mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL];
2234
+ for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
2235
+ if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
2236
+ set_bit(i, mask);
2237
+
2238
+ if (*mask == dsm_mask) {
2239
+ set_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask);
2240
+ nd_desc->fw_ops = intel_bus_fw_ops;
2241
+ }
20422242 }
20432243
20442244 static ssize_t range_index_show(struct device *dev,
....@@ -2062,10 +2262,6 @@
20622262 };
20632263
20642264 static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
2065
- &nd_region_attribute_group,
2066
- &nd_mapping_attribute_group,
2067
- &nd_device_attribute_group,
2068
- &nd_numa_attribute_group,
20692265 &acpi_nfit_region_attribute_group,
20702266 NULL,
20712267 };
....@@ -2162,8 +2358,7 @@
21622358 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
21632359 if (!nd_set)
21642360 return -ENOMEM;
2165
- ndr_desc->nd_set = nd_set;
2166
- guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid);
2361
+ import_guid(&nd_set->type_guid, spa->range_guid);
21672362
21682363 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
21692364 if (!info)
....@@ -2441,7 +2636,7 @@
24412636 nfit_blk->bdw_offset = nfit_mem->bdw->offset;
24422637 mmio = &nfit_blk->mmio[BDW];
24432638 mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
2444
- nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr));
2639
+ nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr));
24452640 if (!mmio->addr.base) {
24462641 dev_dbg(dev, "%s failed to map bdw\n",
24472642 nvdimm_name(nvdimm));
....@@ -2814,11 +3009,25 @@
28143009 ndr_desc->res = &res;
28153010 ndr_desc->provider_data = nfit_spa;
28163011 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
2817
- if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
2818
- ndr_desc->numa_node = acpi_map_pxm_to_online_node(
2819
- spa->proximity_domain);
2820
- else
3012
+ if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) {
3013
+ ndr_desc->numa_node = pxm_to_online_node(spa->proximity_domain);
3014
+ ndr_desc->target_node = pxm_to_node(spa->proximity_domain);
3015
+ } else {
28213016 ndr_desc->numa_node = NUMA_NO_NODE;
3017
+ ndr_desc->target_node = NUMA_NO_NODE;
3018
+ }
3019
+
3020
+ /* Fallback to address based numa information if node lookup failed */
3021
+ if (ndr_desc->numa_node == NUMA_NO_NODE) {
3022
+ ndr_desc->numa_node = memory_add_physaddr_to_nid(spa->address);
3023
+ dev_info(acpi_desc->dev, "changing numa node from %d to %d for nfit region [%pa-%pa]",
3024
+ NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
3025
+ }
3026
+ if (ndr_desc->target_node == NUMA_NO_NODE) {
3027
+ ndr_desc->target_node = phys_to_target_node(spa->address);
3028
+ dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]",
3029
+ NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
3030
+ }
28223031
28233032 /*
28243033 * Persistence domain bits are hierarchical, if
....@@ -3142,7 +3351,7 @@
31423351 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
31433352 {
31443353 struct nfit_spa *nfit_spa;
3145
- int rc;
3354
+ int rc, do_sched_ars = 0;
31463355
31473356 set_bit(ARS_VALID, &acpi_desc->scrub_flags);
31483357 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
....@@ -3154,7 +3363,7 @@
31543363 }
31553364 }
31563365
3157
- list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
3366
+ list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
31583367 switch (nfit_spa_type(nfit_spa->spa)) {
31593368 case NFIT_SPA_VOLATILE:
31603369 case NFIT_SPA_PM:
....@@ -3162,6 +3371,13 @@
31623371 rc = ars_register(acpi_desc, nfit_spa);
31633372 if (rc)
31643373 return rc;
3374
+
3375
+ /*
3376
+ * Kick off background ARS if at least one
3377
+ * region successfully registered ARS
3378
+ */
3379
+ if (!test_bit(ARS_FAILED, &nfit_spa->ars_state))
3380
+ do_sched_ars++;
31653381 break;
31663382 case NFIT_SPA_BDW:
31673383 /* nothing to register */
....@@ -3180,8 +3396,10 @@
31803396 /* don't register unknown regions */
31813397 break;
31823398 }
3399
+ }
31833400
3184
- sched_ars(acpi_desc);
3401
+ if (do_sched_ars)
3402
+ sched_ars(acpi_desc);
31853403 return 0;
31863404 }
31873405
....@@ -3318,12 +3536,12 @@
33183536
33193537 static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
33203538 {
3321
- struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
3539
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
33223540 struct device *dev = acpi_desc->dev;
33233541
33243542 /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
3325
- device_lock(dev);
3326
- device_unlock(dev);
3543
+ nfit_device_lock(dev);
3544
+ nfit_device_unlock(dev);
33273545
33283546 /* Bounce the init_mutex to complete initial registration */
33293547 mutex_lock(&acpi_desc->init_mutex);
....@@ -3332,10 +3550,10 @@
33323550 return 0;
33333551 }
33343552
3335
-static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
3553
+static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
33363554 struct nvdimm *nvdimm, unsigned int cmd)
33373555 {
3338
- struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
3556
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
33393557
33403558 if (nvdimm)
33413559 return 0;
....@@ -3345,13 +3563,39 @@
33453563 /*
33463564 * The kernel and userspace may race to initiate a scrub, but
33473565 * the scrub thread is prepared to lose that initial race. It
3348
- * just needs guarantees that any ars it initiates are not
3349
- * interrupted by any intervening start reqeusts from userspace.
3566
+ * just needs guarantees that any ARS it initiates are not
3567
+ * interrupted by any intervening start requests from userspace.
33503568 */
33513569 if (work_busy(&acpi_desc->dwork.work))
33523570 return -EBUSY;
33533571
33543572 return 0;
3573
+}
3574
+
3575
+/*
3576
+ * Prevent security and firmware activate commands from being issued via
3577
+ * ioctl.
3578
+ */
3579
+static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
3580
+ struct nvdimm *nvdimm, unsigned int cmd, void *buf)
3581
+{
3582
+ struct nd_cmd_pkg *call_pkg = buf;
3583
+ unsigned int func;
3584
+
3585
+ if (nvdimm && cmd == ND_CMD_CALL &&
3586
+ call_pkg->nd_family == NVDIMM_FAMILY_INTEL) {
3587
+ func = call_pkg->nd_command;
3588
+ if (func > NVDIMM_CMD_MAX ||
3589
+ (1 << func) & NVDIMM_INTEL_DENY_CMDMASK)
3590
+ return -EOPNOTSUPP;
3591
+ }
3592
+
3593
+ /* block all non-nfit bus commands */
3594
+ if (!nvdimm && cmd == ND_CMD_CALL &&
3595
+ call_pkg->nd_family != NVDIMM_BUS_FAMILY_NFIT)
3596
+ return -EOPNOTSUPP;
3597
+
3598
+ return __acpi_nfit_clear_to_send(nd_desc, nvdimm, cmd);
33553599 }
33563600
33573601 int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
....@@ -3450,8 +3694,8 @@
34503694 * acpi_nfit_ars_rescan() submissions have had a chance to
34513695 * either submit or see ->cancel set.
34523696 */
3453
- device_lock(bus_dev);
3454
- device_unlock(bus_dev);
3697
+ nfit_device_lock(bus_dev);
3698
+ nfit_device_unlock(bus_dev);
34553699
34563700 flush_workqueue(nfit_wq);
34573701 }
....@@ -3469,7 +3713,13 @@
34693713
34703714 status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl);
34713715 if (ACPI_FAILURE(status)) {
3472
- /* This is ok, we could have an nvdimm hotplugged later */
3716
+ /* The NVDIMM root device allows OS to trigger enumeration of
3717
+ * NVDIMMs through NFIT at boot time and re-enumeration at
3718
+ * root level via the _FIT method during runtime.
3719
+ * This is ok to return 0 here, we could have an nvdimm
3720
+ * hotplugged later and evaluate _FIT method which returns
3721
+ * data in the format of a series of NFIT Structures.
3722
+ */
34733723 dev_dbg(dev, "failed to find NFIT at startup\n");
34743724 return 0;
34753725 }
....@@ -3588,9 +3838,9 @@
35883838
35893839 static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
35903840 {
3591
- device_lock(&adev->dev);
3841
+ nfit_device_lock(&adev->dev);
35923842 __acpi_nfit_notify(&adev->dev, adev->handle, event);
3593
- device_unlock(&adev->dev);
3843
+ nfit_device_unlock(&adev->dev);
35943844 }
35953845
35963846 static const struct acpi_device_id acpi_nfit_ids[] = {
....@@ -3636,6 +3886,7 @@
36363886 guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
36373887 guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
36383888 guid_parse(UUID_NFIT_DIMM_N_HYPERV, &nfit_uuid[NFIT_DEV_DIMM_N_HYPERV]);
3889
+ guid_parse(UUID_INTEL_BUS, &nfit_uuid[NFIT_BUS_INTEL]);
36393890
36403891 nfit_wq = create_singlethread_workqueue("nfit");
36413892 if (!nfit_wq)