| .. | .. |
|---|
| 56 | 56 | size_t percpu_alloc_size; |
|---|
| 57 | 57 | size_t histogram_alloc_size; |
|---|
| 58 | 58 | struct dm_stat_percpu *stat_percpu[NR_CPUS]; |
|---|
| 59 | | - struct dm_stat_shared stat_shared[0]; |
|---|
| 59 | + struct dm_stat_shared stat_shared[]; |
|---|
| 60 | 60 | }; |
|---|
| 61 | 61 | |
|---|
| 62 | 62 | #define STAT_PRECISE_TIMESTAMPS 1 |
|---|
| .. | .. |
|---|
| 85 | 85 | a = shared_memory_amount + alloc_size; |
|---|
| 86 | 86 | if (a < shared_memory_amount) |
|---|
| 87 | 87 | return false; |
|---|
| 88 | | - if (a >> PAGE_SHIFT > totalram_pages / DM_STATS_MEMORY_FACTOR) |
|---|
| 88 | + if (a >> PAGE_SHIFT > totalram_pages() / DM_STATS_MEMORY_FACTOR) |
|---|
| 89 | 89 | return false; |
|---|
| 90 | 90 | #ifdef CONFIG_MMU |
|---|
| 91 | 91 | if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR) |
|---|
| .. | .. |
|---|
| 224 | 224 | atomic_read(&shared->in_flight[READ]), |
|---|
| 225 | 225 | atomic_read(&shared->in_flight[WRITE])); |
|---|
| 226 | 226 | } |
|---|
| 227 | + cond_resched(); |
|---|
| 227 | 228 | } |
|---|
| 228 | 229 | dm_stat_free(&s->rcu_head); |
|---|
| 229 | 230 | } |
|---|
| .. | .. |
|---|
| 262 | 263 | if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1)) |
|---|
| 263 | 264 | return -EOVERFLOW; |
|---|
| 264 | 265 | |
|---|
| 265 | | - shared_alloc_size = sizeof(struct dm_stat) + (size_t)n_entries * sizeof(struct dm_stat_shared); |
|---|
| 266 | + shared_alloc_size = struct_size(s, stat_shared, n_entries); |
|---|
| 266 | 267 | if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries) |
|---|
| 267 | 268 | return -EOVERFLOW; |
|---|
| 268 | 269 | |
|---|
| .. | .. |
|---|
| 313 | 314 | for (ni = 0; ni < n_entries; ni++) { |
|---|
| 314 | 315 | atomic_set(&s->stat_shared[ni].in_flight[READ], 0); |
|---|
| 315 | 316 | atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0); |
|---|
| 317 | + cond_resched(); |
|---|
| 316 | 318 | } |
|---|
| 317 | 319 | |
|---|
| 318 | 320 | if (s->n_histogram_entries) { |
|---|
| .. | .. |
|---|
| 325 | 327 | for (ni = 0; ni < n_entries; ni++) { |
|---|
| 326 | 328 | s->stat_shared[ni].tmp.histogram = hi; |
|---|
| 327 | 329 | hi += s->n_histogram_entries + 1; |
|---|
| 330 | + cond_resched(); |
|---|
| 328 | 331 | } |
|---|
| 329 | 332 | } |
|---|
| 330 | 333 | |
|---|
| .. | .. |
|---|
| 345 | 348 | for (ni = 0; ni < n_entries; ni++) { |
|---|
| 346 | 349 | p[ni].histogram = hi; |
|---|
| 347 | 350 | hi += s->n_histogram_entries + 1; |
|---|
| 351 | + cond_resched(); |
|---|
| 348 | 352 | } |
|---|
| 349 | 353 | } |
|---|
| 350 | 354 | } |
|---|
| .. | .. |
|---|
| 474 | 478 | } |
|---|
| 475 | 479 | DMEMIT("\n"); |
|---|
| 476 | 480 | } |
|---|
| 481 | + cond_resched(); |
|---|
| 477 | 482 | } |
|---|
| 478 | 483 | mutex_unlock(&stats->mutex); |
|---|
| 479 | 484 | |
|---|
| .. | .. |
|---|
| 750 | 755 | local_irq_enable(); |
|---|
| 751 | 756 | } |
|---|
| 752 | 757 | } |
|---|
| 758 | + cond_resched(); |
|---|
| 753 | 759 | } |
|---|
| 754 | 760 | } |
|---|
| 755 | 761 | |
|---|
| .. | .. |
|---|
| 865 | 871 | |
|---|
| 866 | 872 | if (unlikely(sz + 1 >= maxlen)) |
|---|
| 867 | 873 | goto buffer_overflow; |
|---|
| 874 | + |
|---|
| 875 | + cond_resched(); |
|---|
| 868 | 876 | } |
|---|
| 869 | 877 | |
|---|
| 870 | 878 | if (clear) |
|---|