hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/md/dm-ioctl.c
....@@ -17,6 +17,7 @@
1717 #include <linux/dm-ioctl.h>
1818 #include <linux/hdreg.h>
1919 #include <linux/compat.h>
20
+#include <linux/nospec.h>
2021
2122 #include <linux/uaccess.h>
2223
....@@ -572,7 +573,7 @@
572573 size_t *needed = needed_param;
573574
574575 *needed += sizeof(struct dm_target_versions);
575
- *needed += strlen(tt->name);
576
+ *needed += strlen(tt->name) + 1;
576577 *needed += ALIGN_MASK;
577578 }
578579
....@@ -601,17 +602,27 @@
601602 info->vers = align_ptr(((void *) ++info->vers) + strlen(tt->name) + 1);
602603 }
603604
604
-static int list_versions(struct file *filp, struct dm_ioctl *param, size_t param_size)
605
+static int __list_versions(struct dm_ioctl *param, size_t param_size, const char *name)
605606 {
606607 size_t len, needed = 0;
607608 struct dm_target_versions *vers;
608609 struct vers_iter iter_info;
610
+ struct target_type *tt = NULL;
611
+
612
+ if (name) {
613
+ tt = dm_get_target_type(name);
614
+ if (!tt)
615
+ return -EINVAL;
616
+ }
609617
610618 /*
611619 * Loop through all the devices working out how much
612620 * space we need.
613621 */
614
- dm_target_iterate(list_version_get_needed, &needed);
622
+ if (!tt)
623
+ dm_target_iterate(list_version_get_needed, &needed);
624
+ else
625
+ list_version_get_needed(tt, &needed);
615626
616627 /*
617628 * Grab our output buffer.
....@@ -627,16 +638,31 @@
627638 iter_info.old_vers = NULL;
628639 iter_info.vers = vers;
629640 iter_info.flags = 0;
630
- iter_info.end = (char *)vers+len;
641
+ iter_info.end = (char *)vers + needed;
631642
632643 /*
633644 * Now loop through filling out the names & versions.
634645 */
635
- dm_target_iterate(list_version_get_info, &iter_info);
646
+ if (!tt)
647
+ dm_target_iterate(list_version_get_info, &iter_info);
648
+ else
649
+ list_version_get_info(tt, &iter_info);
636650 param->flags |= iter_info.flags;
637651
638652 out:
653
+ if (tt)
654
+ dm_put_target_type(tt);
639655 return 0;
656
+}
657
+
658
+static int list_versions(struct file *filp, struct dm_ioctl *param, size_t param_size)
659
+{
660
+ return __list_versions(param, param_size, NULL);
661
+}
662
+
663
+static int get_target_version(struct file *filp, struct dm_ioctl *param, size_t param_size)
664
+{
665
+ return __list_versions(param, param_size, param->name);
640666 }
641667
642668 static int check_name(const char *name)
....@@ -1143,7 +1169,7 @@
11431169 spec->sector_start = ti->begin;
11441170 spec->length = ti->len;
11451171 strncpy(spec->target_type, ti->type->name,
1146
- sizeof(spec->target_type));
1172
+ sizeof(spec->target_type) - 1);
11471173
11481174 outptr += sizeof(struct dm_target_spec);
11491175 remaining = len - (outptr - outbuf);
....@@ -1409,11 +1435,12 @@
14091435 hc->new_map = NULL;
14101436 }
14111437
1412
- param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
1413
-
1414
- __dev_status(hc->md, param);
14151438 md = hc->md;
14161439 up_write(&_hash_lock);
1440
+
1441
+ param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
1442
+ __dev_status(md, param);
1443
+
14171444 if (old_map) {
14181445 dm_sync_table(md);
14191446 dm_table_destroy(old_map);
....@@ -1446,7 +1473,7 @@
14461473 /*
14471474 * Check we have enough space.
14481475 */
1449
- needed = sizeof(*deps) + (sizeof(*deps->dev) * count);
1476
+ needed = struct_size(deps, dev, count);
14501477 if (len < needed) {
14511478 param->flags |= DM_BUFFER_FULL_FLAG;
14521479 return;
....@@ -1593,7 +1620,7 @@
15931620 }
15941621
15951622 ti = dm_table_find_target(table, tmsg->sector);
1596
- if (!dm_target_is_valid(ti)) {
1623
+ if (!ti) {
15971624 DMWARN("Target message sector outside device.");
15981625 r = -EINVAL;
15991626 } else if (ti->type->message)
....@@ -1665,11 +1692,13 @@
16651692 {DM_TARGET_MSG_CMD, 0, target_message},
16661693 {DM_DEV_SET_GEOMETRY_CMD, 0, dev_set_geometry},
16671694 {DM_DEV_ARM_POLL, IOCTL_FLAGS_NO_PARAMS, dev_arm_poll},
1695
+ {DM_GET_TARGET_VERSION, 0, get_target_version},
16681696 };
16691697
16701698 if (unlikely(cmd >= ARRAY_SIZE(_ioctls)))
16711699 return NULL;
16721700
1701
+ cmd = array_index_nospec(cmd, ARRAY_SIZE(_ioctls));
16731702 *ioctl_flags = _ioctls[cmd].flags;
16741703 return _ioctls[cmd].fn;
16751704 }
....@@ -1819,7 +1848,7 @@
18191848 int ioctl_flags;
18201849 int param_flags;
18211850 unsigned int cmd;
1822
- struct dm_ioctl *uninitialized_var(param);
1851
+ struct dm_ioctl *param;
18231852 ioctl_fn fn = NULL;
18241853 size_t input_param_size;
18251854 struct dm_ioctl param_kernel;
....@@ -2019,3 +2048,110 @@
20192048
20202049 return r;
20212050 }
2051
+EXPORT_SYMBOL_GPL(dm_copy_name_and_uuid);
2052
+
2053
+/**
2054
+ * dm_early_create - create a mapped device in early boot.
2055
+ *
2056
+ * @dmi: Contains main information of the device mapping to be created.
2057
+ * @spec_array: array of pointers to struct dm_target_spec. Describes the
2058
+ * mapping table of the device.
2059
+ * @target_params_array: array of strings with the parameters to a specific
2060
+ * target.
2061
+ *
2062
+ * Instead of having the struct dm_target_spec and the parameters for every
2063
+ * target embedded at the end of struct dm_ioctl (as performed in a normal
2064
+ * ioctl), pass them as arguments, so the caller doesn't need to serialize them.
2065
+ * The size of the spec_array and target_params_array is given by
2066
+ * @dmi->target_count.
2067
+ * This function is supposed to be called in early boot, so locking mechanisms
2068
+ * to protect against concurrent loads are not required.
2069
+ */
2070
+int __init dm_early_create(struct dm_ioctl *dmi,
2071
+ struct dm_target_spec **spec_array,
2072
+ char **target_params_array)
2073
+{
2074
+ int r, m = DM_ANY_MINOR;
2075
+ struct dm_table *t, *old_map;
2076
+ struct mapped_device *md;
2077
+ unsigned int i;
2078
+
2079
+ if (!dmi->target_count)
2080
+ return -EINVAL;
2081
+
2082
+ r = check_name(dmi->name);
2083
+ if (r)
2084
+ return r;
2085
+
2086
+ if (dmi->flags & DM_PERSISTENT_DEV_FLAG)
2087
+ m = MINOR(huge_decode_dev(dmi->dev));
2088
+
2089
+ /* alloc dm device */
2090
+ r = dm_create(m, &md);
2091
+ if (r)
2092
+ return r;
2093
+
2094
+ /* hash insert */
2095
+ r = dm_hash_insert(dmi->name, *dmi->uuid ? dmi->uuid : NULL, md);
2096
+ if (r)
2097
+ goto err_destroy_dm;
2098
+
2099
+ /* alloc table */
2100
+ r = dm_table_create(&t, get_mode(dmi), dmi->target_count, md);
2101
+ if (r)
2102
+ goto err_hash_remove;
2103
+
2104
+ /* add targets */
2105
+ for (i = 0; i < dmi->target_count; i++) {
2106
+ r = dm_table_add_target(t, spec_array[i]->target_type,
2107
+ (sector_t) spec_array[i]->sector_start,
2108
+ (sector_t) spec_array[i]->length,
2109
+ target_params_array[i]);
2110
+ if (r) {
2111
+ DMWARN("error adding target to table");
2112
+ goto err_destroy_table;
2113
+ }
2114
+ }
2115
+
2116
+ /* finish table */
2117
+ r = dm_table_complete(t);
2118
+ if (r)
2119
+ goto err_destroy_table;
2120
+
2121
+ md->type = dm_table_get_type(t);
2122
+ /* setup md->queue to reflect md's type (may block) */
2123
+ r = dm_setup_md_queue(md, t);
2124
+ if (r) {
2125
+ DMWARN("unable to set up device queue for new table.");
2126
+ goto err_destroy_table;
2127
+ }
2128
+
2129
+ /* Set new map */
2130
+ dm_suspend(md, 0);
2131
+ old_map = dm_swap_table(md, t);
2132
+ if (IS_ERR(old_map)) {
2133
+ r = PTR_ERR(old_map);
2134
+ goto err_destroy_table;
2135
+ }
2136
+ set_disk_ro(dm_disk(md), !!(dmi->flags & DM_READONLY_FLAG));
2137
+
2138
+ /* resume device */
2139
+ r = dm_resume(md);
2140
+ if (r)
2141
+ goto err_destroy_table;
2142
+
2143
+ DMINFO("%s (%s) is ready", md->disk->disk_name, dmi->name);
2144
+ dm_put(md);
2145
+ return 0;
2146
+
2147
+err_destroy_table:
2148
+ dm_table_destroy(t);
2149
+err_hash_remove:
2150
+ (void) __hash_remove(__get_name_cell(dmi->name));
2151
+ /* release reference from __get_name_cell */
2152
+ dm_put(md);
2153
+err_destroy_dm:
2154
+ dm_put(md);
2155
+ dm_destroy(md);
2156
+ return r;
2157
+}