| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. |
|---|
| 3 | | - * |
|---|
| 4 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 5 | | - * it under the terms of version 2 of the GNU General Public License as |
|---|
| 6 | | - * published by the Free Software Foundation. |
|---|
| 7 | | - * |
|---|
| 8 | | - * This program is distributed in the hope that it will be useful, but |
|---|
| 9 | | - * WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 10 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
|---|
| 11 | | - * General Public License for more details. |
|---|
| 12 | 4 | */ |
|---|
| 13 | 5 | #include <linux/device.h> |
|---|
| 14 | 6 | #include <linux/ndctl.h> |
|---|
| .. | .. |
|---|
| 77 | 69 | /* |
|---|
| 78 | 70 | * Per UEFI 2.7, the minimum size of the Label Storage Area is large |
|---|
| 79 | 71 | * enough to hold 2 index blocks and 2 labels. The minimum index |
|---|
| 80 | | - * block size is 256 bytes, and the minimum label size is 256 bytes. |
|---|
| 72 | + * block size is 256 bytes. The label size is 128 for namespaces |
|---|
| 73 | + * prior to version 1.2 and at minimum 256 for version 1.2 and later. |
|---|
| 81 | 74 | */ |
|---|
| 82 | 75 | nslot = nvdimm_num_label_slots(ndd); |
|---|
| 83 | 76 | space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd); |
|---|
| .. | .. |
|---|
| 185 | 178 | __le64_to_cpu(nsindex[i]->otheroff)); |
|---|
| 186 | 179 | continue; |
|---|
| 187 | 180 | } |
|---|
| 181 | + if (__le64_to_cpu(nsindex[i]->labeloff) |
|---|
| 182 | + != 2 * sizeof_namespace_index(ndd)) { |
|---|
| 183 | + dev_dbg(dev, "nsindex%d labeloff: %#llx invalid\n", |
|---|
| 184 | + i, (unsigned long long) |
|---|
| 185 | + __le64_to_cpu(nsindex[i]->labeloff)); |
|---|
| 186 | + continue; |
|---|
| 187 | + } |
|---|
| 188 | 188 | |
|---|
| 189 | 189 | size = __le64_to_cpu(nsindex[i]->mysize); |
|---|
| 190 | 190 | if (size > sizeof_namespace_index(ndd) |
|---|
| .. | .. |
|---|
| 229 | 229 | return -1; |
|---|
| 230 | 230 | } |
|---|
| 231 | 231 | |
|---|
| 232 | | -int nd_label_validate(struct nvdimm_drvdata *ndd) |
|---|
| 232 | +static int nd_label_validate(struct nvdimm_drvdata *ndd) |
|---|
| 233 | 233 | { |
|---|
| 234 | 234 | /* |
|---|
| 235 | 235 | * In order to probe for and validate namespace index blocks we |
|---|
| .. | .. |
|---|
| 252 | 252 | return -1; |
|---|
| 253 | 253 | } |
|---|
| 254 | 254 | |
|---|
| 255 | | -void nd_label_copy(struct nvdimm_drvdata *ndd, struct nd_namespace_index *dst, |
|---|
| 256 | | - struct nd_namespace_index *src) |
|---|
| 255 | +static void nd_label_copy(struct nvdimm_drvdata *ndd, |
|---|
| 256 | + struct nd_namespace_index *dst, |
|---|
| 257 | + struct nd_namespace_index *src) |
|---|
| 257 | 258 | { |
|---|
| 258 | | - if (dst && src) |
|---|
| 259 | | - /* pass */; |
|---|
| 260 | | - else |
|---|
| 259 | + /* just exit if either destination or source is NULL */ |
|---|
| 260 | + if (!dst || !src) |
|---|
| 261 | 261 | return; |
|---|
| 262 | 262 | |
|---|
| 263 | 263 | memcpy(dst, src, sizeof_namespace_index(ndd)); |
|---|
| .. | .. |
|---|
| 353 | 353 | if (slot != __le32_to_cpu(nd_label->slot)) |
|---|
| 354 | 354 | return false; |
|---|
| 355 | 355 | |
|---|
| 356 | | - /* check that DPA allocations are page aligned */ |
|---|
| 357 | | - if ((__le64_to_cpu(nd_label->dpa) |
|---|
| 358 | | - | __le64_to_cpu(nd_label->rawsize)) % SZ_4K) |
|---|
| 359 | | - return false; |
|---|
| 360 | | - |
|---|
| 361 | 356 | /* check checksum */ |
|---|
| 362 | 357 | if (namespace_label_has(ndd, checksum)) { |
|---|
| 363 | 358 | u64 sum, sum_save; |
|---|
| .. | .. |
|---|
| 386 | 381 | return 0; /* no label, nothing to reserve */ |
|---|
| 387 | 382 | |
|---|
| 388 | 383 | for_each_clear_bit_le(slot, free, nslot) { |
|---|
| 384 | + struct nvdimm *nvdimm = to_nvdimm(ndd->dev); |
|---|
| 389 | 385 | struct nd_namespace_label *nd_label; |
|---|
| 390 | 386 | struct nd_region *nd_region = NULL; |
|---|
| 391 | 387 | u8 label_uuid[NSLABEL_UUID_LEN]; |
|---|
| .. | .. |
|---|
| 400 | 396 | |
|---|
| 401 | 397 | memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN); |
|---|
| 402 | 398 | flags = __le32_to_cpu(nd_label->flags); |
|---|
| 399 | + if (test_bit(NDD_NOBLK, &nvdimm->flags)) |
|---|
| 400 | + flags &= ~NSLABEL_FLAG_LOCAL; |
|---|
| 403 | 401 | nd_label_gen_id(&label_id, label_uuid, flags); |
|---|
| 404 | 402 | res = nvdimm_allocate_dpa(ndd, &label_id, |
|---|
| 405 | 403 | __le64_to_cpu(nd_label->dpa), |
|---|
| .. | .. |
|---|
| 410 | 408 | } |
|---|
| 411 | 409 | |
|---|
| 412 | 410 | return 0; |
|---|
| 411 | +} |
|---|
| 412 | + |
|---|
| 413 | +int nd_label_data_init(struct nvdimm_drvdata *ndd) |
|---|
| 414 | +{ |
|---|
| 415 | + size_t config_size, read_size, max_xfer, offset; |
|---|
| 416 | + struct nd_namespace_index *nsindex; |
|---|
| 417 | + unsigned int i; |
|---|
| 418 | + int rc = 0; |
|---|
| 419 | + u32 nslot; |
|---|
| 420 | + |
|---|
| 421 | + if (ndd->data) |
|---|
| 422 | + return 0; |
|---|
| 423 | + |
|---|
| 424 | + if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) { |
|---|
| 425 | + dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n", |
|---|
| 426 | + ndd->nsarea.max_xfer, ndd->nsarea.config_size); |
|---|
| 427 | + return -ENXIO; |
|---|
| 428 | + } |
|---|
| 429 | + |
|---|
| 430 | + /* |
|---|
| 431 | + * We need to determine the maximum index area as this is the section |
|---|
| 432 | + * we must read and validate before we can start processing labels. |
|---|
| 433 | + * |
|---|
| 434 | + * If the area is too small to contain the two indexes and 2 labels |
|---|
| 435 | + * then we abort. |
|---|
| 436 | + * |
|---|
| 437 | + * Start at a label size of 128 as this should result in the largest |
|---|
| 438 | + * possible namespace index size. |
|---|
| 439 | + */ |
|---|
| 440 | + ndd->nslabel_size = 128; |
|---|
| 441 | + read_size = sizeof_namespace_index(ndd) * 2; |
|---|
| 442 | + if (!read_size) |
|---|
| 443 | + return -ENXIO; |
|---|
| 444 | + |
|---|
| 445 | + /* Allocate config data */ |
|---|
| 446 | + config_size = ndd->nsarea.config_size; |
|---|
| 447 | + ndd->data = kvzalloc(config_size, GFP_KERNEL); |
|---|
| 448 | + if (!ndd->data) |
|---|
| 449 | + return -ENOMEM; |
|---|
| 450 | + |
|---|
| 451 | + /* |
|---|
| 452 | + * We want to guarantee as few reads as possible while conserving |
|---|
| 453 | + * memory. To do that we figure out how much unused space will be left |
|---|
| 454 | + * in the last read, divide that by the total number of reads it is |
|---|
| 455 | + * going to take given our maximum transfer size, and then reduce our |
|---|
| 456 | + * maximum transfer size based on that result. |
|---|
| 457 | + */ |
|---|
| 458 | + max_xfer = min_t(size_t, ndd->nsarea.max_xfer, config_size); |
|---|
| 459 | + if (read_size < max_xfer) { |
|---|
| 460 | + /* trim waste */ |
|---|
| 461 | + max_xfer -= ((max_xfer - 1) - (config_size - 1) % max_xfer) / |
|---|
| 462 | + DIV_ROUND_UP(config_size, max_xfer); |
|---|
| 463 | + /* make certain we read indexes in exactly 1 read */ |
|---|
| 464 | + if (max_xfer < read_size) |
|---|
| 465 | + max_xfer = read_size; |
|---|
| 466 | + } |
|---|
| 467 | + |
|---|
| 468 | + /* Make our initial read size a multiple of max_xfer size */ |
|---|
| 469 | + read_size = min(DIV_ROUND_UP(read_size, max_xfer) * max_xfer, |
|---|
| 470 | + config_size); |
|---|
| 471 | + |
|---|
| 472 | + /* Read the index data */ |
|---|
| 473 | + rc = nvdimm_get_config_data(ndd, ndd->data, 0, read_size); |
|---|
| 474 | + if (rc) |
|---|
| 475 | + goto out_err; |
|---|
| 476 | + |
|---|
| 477 | + /* Validate index data, if not valid assume all labels are invalid */ |
|---|
| 478 | + ndd->ns_current = nd_label_validate(ndd); |
|---|
| 479 | + if (ndd->ns_current < 0) |
|---|
| 480 | + return 0; |
|---|
| 481 | + |
|---|
| 482 | + /* Record our index values */ |
|---|
| 483 | + ndd->ns_next = nd_label_next_nsindex(ndd->ns_current); |
|---|
| 484 | + |
|---|
| 485 | + /* Copy "current" index on top of the "next" index */ |
|---|
| 486 | + nsindex = to_current_namespace_index(ndd); |
|---|
| 487 | + nd_label_copy(ndd, to_next_namespace_index(ndd), nsindex); |
|---|
| 488 | + |
|---|
| 489 | + /* Determine starting offset for label data */ |
|---|
| 490 | + offset = __le64_to_cpu(nsindex->labeloff); |
|---|
| 491 | + nslot = __le32_to_cpu(nsindex->nslot); |
|---|
| 492 | + |
|---|
| 493 | + /* Loop through the free list pulling in any active labels */ |
|---|
| 494 | + for (i = 0; i < nslot; i++, offset += ndd->nslabel_size) { |
|---|
| 495 | + size_t label_read_size; |
|---|
| 496 | + |
|---|
| 497 | + /* zero out the unused labels */ |
|---|
| 498 | + if (test_bit_le(i, nsindex->free)) { |
|---|
| 499 | + memset(ndd->data + offset, 0, ndd->nslabel_size); |
|---|
| 500 | + continue; |
|---|
| 501 | + } |
|---|
| 502 | + |
|---|
| 503 | + /* if we already read past here then just continue */ |
|---|
| 504 | + if (offset + ndd->nslabel_size <= read_size) |
|---|
| 505 | + continue; |
|---|
| 506 | + |
|---|
| 507 | + /* if we haven't read in a while reset our read_size offset */ |
|---|
| 508 | + if (read_size < offset) |
|---|
| 509 | + read_size = offset; |
|---|
| 510 | + |
|---|
| 511 | + /* determine how much more will be read after this next call. */ |
|---|
| 512 | + label_read_size = offset + ndd->nslabel_size - read_size; |
|---|
| 513 | + label_read_size = DIV_ROUND_UP(label_read_size, max_xfer) * |
|---|
| 514 | + max_xfer; |
|---|
| 515 | + |
|---|
| 516 | + /* truncate last read if needed */ |
|---|
| 517 | + if (read_size + label_read_size > config_size) |
|---|
| 518 | + label_read_size = config_size - read_size; |
|---|
| 519 | + |
|---|
| 520 | + /* Read the label data */ |
|---|
| 521 | + rc = nvdimm_get_config_data(ndd, ndd->data + read_size, |
|---|
| 522 | + read_size, label_read_size); |
|---|
| 523 | + if (rc) |
|---|
| 524 | + goto out_err; |
|---|
| 525 | + |
|---|
| 526 | + /* push read_size to next read offset */ |
|---|
| 527 | + read_size += label_read_size; |
|---|
| 528 | + } |
|---|
| 529 | + |
|---|
| 530 | + dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc); |
|---|
| 531 | +out_err: |
|---|
| 532 | + return rc; |
|---|
| 413 | 533 | } |
|---|
| 414 | 534 | |
|---|
| 415 | 535 | int nd_label_active_count(struct nvdimm_drvdata *ndd) |
|---|
| .. | .. |
|---|
| 819 | 939 | victims = 0; |
|---|
| 820 | 940 | if (old_num_resources) { |
|---|
| 821 | 941 | /* convert old local-label-map to dimm-slot victim-map */ |
|---|
| 822 | | - victim_map = kcalloc(BITS_TO_LONGS(nslot), sizeof(long), |
|---|
| 823 | | - GFP_KERNEL); |
|---|
| 942 | + victim_map = bitmap_zalloc(nslot, GFP_KERNEL); |
|---|
| 824 | 943 | if (!victim_map) |
|---|
| 825 | 944 | return -ENOMEM; |
|---|
| 826 | 945 | |
|---|
| .. | .. |
|---|
| 843 | 962 | /* don't allow updates that consume the last label */ |
|---|
| 844 | 963 | if (nfree - alloc < 0 || nfree - alloc + victims < 1) { |
|---|
| 845 | 964 | dev_info(&nsblk->common.dev, "insufficient label space\n"); |
|---|
| 846 | | - kfree(victim_map); |
|---|
| 965 | + bitmap_free(victim_map); |
|---|
| 847 | 966 | return -ENOSPC; |
|---|
| 848 | 967 | } |
|---|
| 849 | 968 | /* from here on we need to abort on error */ |
|---|
| .. | .. |
|---|
| 1026 | 1145 | |
|---|
| 1027 | 1146 | out: |
|---|
| 1028 | 1147 | kfree(old_res_list); |
|---|
| 1029 | | - kfree(victim_map); |
|---|
| 1148 | + bitmap_free(victim_map); |
|---|
| 1030 | 1149 | return rc; |
|---|
| 1031 | 1150 | |
|---|
| 1032 | 1151 | abort: |
|---|