hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/md/dm-stats.c
....@@ -56,7 +56,7 @@
5656 size_t percpu_alloc_size;
5757 size_t histogram_alloc_size;
5858 struct dm_stat_percpu *stat_percpu[NR_CPUS];
59
- struct dm_stat_shared stat_shared[0];
59
+ struct dm_stat_shared stat_shared[];
6060 };
6161
6262 #define STAT_PRECISE_TIMESTAMPS 1
....@@ -85,7 +85,7 @@
8585 a = shared_memory_amount + alloc_size;
8686 if (a < shared_memory_amount)
8787 return false;
88
- if (a >> PAGE_SHIFT > totalram_pages / DM_STATS_MEMORY_FACTOR)
88
+ if (a >> PAGE_SHIFT > totalram_pages() / DM_STATS_MEMORY_FACTOR)
8989 return false;
9090 #ifdef CONFIG_MMU
9191 if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR)
....@@ -188,7 +188,7 @@
188188 atomic_read(&shared->in_flight[WRITE]);
189189 }
190190
191
-void dm_stats_init(struct dm_stats *stats)
191
+int dm_stats_init(struct dm_stats *stats)
192192 {
193193 int cpu;
194194 struct dm_stats_last_position *last;
....@@ -196,11 +196,16 @@
196196 mutex_init(&stats->mutex);
197197 INIT_LIST_HEAD(&stats->list);
198198 stats->last = alloc_percpu(struct dm_stats_last_position);
199
+ if (!stats->last)
200
+ return -ENOMEM;
201
+
199202 for_each_possible_cpu(cpu) {
200203 last = per_cpu_ptr(stats->last, cpu);
201204 last->last_sector = (sector_t)ULLONG_MAX;
202205 last->last_rw = UINT_MAX;
203206 }
207
+
208
+ return 0;
204209 }
205210
206211 void dm_stats_cleanup(struct dm_stats *stats)
....@@ -224,6 +229,7 @@
224229 atomic_read(&shared->in_flight[READ]),
225230 atomic_read(&shared->in_flight[WRITE]));
226231 }
232
+ cond_resched();
227233 }
228234 dm_stat_free(&s->rcu_head);
229235 }
....@@ -262,7 +268,7 @@
262268 if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1))
263269 return -EOVERFLOW;
264270
265
- shared_alloc_size = sizeof(struct dm_stat) + (size_t)n_entries * sizeof(struct dm_stat_shared);
271
+ shared_alloc_size = struct_size(s, stat_shared, n_entries);
266272 if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries)
267273 return -EOVERFLOW;
268274
....@@ -313,6 +319,7 @@
313319 for (ni = 0; ni < n_entries; ni++) {
314320 atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
315321 atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
322
+ cond_resched();
316323 }
317324
318325 if (s->n_histogram_entries) {
....@@ -325,6 +332,7 @@
325332 for (ni = 0; ni < n_entries; ni++) {
326333 s->stat_shared[ni].tmp.histogram = hi;
327334 hi += s->n_histogram_entries + 1;
335
+ cond_resched();
328336 }
329337 }
330338
....@@ -345,6 +353,7 @@
345353 for (ni = 0; ni < n_entries; ni++) {
346354 p[ni].histogram = hi;
347355 hi += s->n_histogram_entries + 1;
356
+ cond_resched();
348357 }
349358 }
350359 }
....@@ -474,6 +483,7 @@
474483 }
475484 DMEMIT("\n");
476485 }
486
+ cond_resched();
477487 }
478488 mutex_unlock(&stats->mutex);
479489
....@@ -750,6 +760,7 @@
750760 local_irq_enable();
751761 }
752762 }
763
+ cond_resched();
753764 }
754765 }
755766
....@@ -865,6 +876,8 @@
865876
866877 if (unlikely(sz + 1 >= maxlen))
867878 goto buffer_overflow;
879
+
880
+ cond_resched();
868881 }
869882
870883 if (clear)