.. | .. |
---|
355 | 355 | local_set(&bpage->commit, 0); |
---|
356 | 356 | } |
---|
357 | 357 | |
---|
358 | | -/* |
---|
359 | | - * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing |
---|
360 | | - * this issue out. |
---|
361 | | - */ |
---|
| 358 | +static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage) |
---|
| 359 | +{ |
---|
| 360 | + return local_read(&bpage->page->commit); |
---|
| 361 | +} |
---|
| 362 | + |
---|
362 | 363 | static void free_buffer_page(struct buffer_page *bpage) |
---|
363 | 364 | { |
---|
364 | 365 | free_page((unsigned long)bpage->page); |
---|
.. | .. |
---|
526 | 527 | rb_time_t write_stamp; |
---|
527 | 528 | rb_time_t before_stamp; |
---|
528 | 529 | u64 read_stamp; |
---|
| 530 | + /* pages removed since last reset */ |
---|
| 531 | + unsigned long pages_removed; |
---|
529 | 532 | /* ring buffer pages to update, > 0 to add, < 0 to remove */ |
---|
530 | 533 | long nr_pages_to_update; |
---|
531 | 534 | struct list_head new_pages; /* new pages to add */ |
---|
.. | .. |
---|
539 | 542 | unsigned flags; |
---|
540 | 543 | int cpus; |
---|
541 | 544 | atomic_t record_disabled; |
---|
| 545 | + atomic_t resizing; |
---|
542 | 546 | cpumask_var_t cpumask; |
---|
543 | 547 | |
---|
544 | 548 | struct lock_class_key *reader_lock_key; |
---|
.. | .. |
---|
561 | 565 | struct buffer_page *head_page; |
---|
562 | 566 | struct buffer_page *cache_reader_page; |
---|
563 | 567 | unsigned long cache_read; |
---|
| 568 | + unsigned long cache_pages_removed; |
---|
564 | 569 | u64 read_stamp; |
---|
565 | 570 | u64 page_stamp; |
---|
566 | 571 | struct ring_buffer_event *event; |
---|
.. | .. |
---|
1004 | 1009 | if (full) { |
---|
1005 | 1010 | poll_wait(filp, &work->full_waiters, poll_table); |
---|
1006 | 1011 | work->full_waiters_pending = true; |
---|
| 1012 | + if (!cpu_buffer->shortest_full || |
---|
| 1013 | + cpu_buffer->shortest_full > full) |
---|
| 1014 | + cpu_buffer->shortest_full = full; |
---|
1007 | 1015 | } else { |
---|
1008 | 1016 | poll_wait(filp, &work->waiters, poll_table); |
---|
1009 | 1017 | work->waiters_pending = true; |
---|
.. | .. |
---|
1450 | 1458 | } |
---|
1451 | 1459 | |
---|
1452 | 1460 | /** |
---|
1453 | | - * rb_check_list - make sure a pointer to a list has the last bits zero |
---|
1454 | | - */ |
---|
1455 | | -static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer, |
---|
1456 | | - struct list_head *list) |
---|
1457 | | -{ |
---|
1458 | | - if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev)) |
---|
1459 | | - return 1; |
---|
1460 | | - if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next)) |
---|
1461 | | - return 1; |
---|
1462 | | - return 0; |
---|
1463 | | -} |
---|
1464 | | - |
---|
1465 | | -/** |
---|
1466 | 1461 | * rb_check_pages - integrity check of buffer pages |
---|
1467 | 1462 | * @cpu_buffer: CPU buffer with pages to test |
---|
1468 | 1463 | * |
---|
.. | .. |
---|
1471 | 1466 | */ |
---|
1472 | 1467 | static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) |
---|
1473 | 1468 | { |
---|
1474 | | - struct list_head *head = cpu_buffer->pages; |
---|
1475 | | - struct buffer_page *bpage, *tmp; |
---|
| 1469 | + struct list_head *head = rb_list_head(cpu_buffer->pages); |
---|
| 1470 | + struct list_head *tmp; |
---|
1476 | 1471 | |
---|
1477 | | - /* Reset the head page if it exists */ |
---|
1478 | | - if (cpu_buffer->head_page) |
---|
1479 | | - rb_set_head_page(cpu_buffer); |
---|
1480 | | - |
---|
1481 | | - rb_head_page_deactivate(cpu_buffer); |
---|
1482 | | - |
---|
1483 | | - if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) |
---|
1484 | | - return -1; |
---|
1485 | | - if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) |
---|
| 1472 | + if (RB_WARN_ON(cpu_buffer, |
---|
| 1473 | + rb_list_head(rb_list_head(head->next)->prev) != head)) |
---|
1486 | 1474 | return -1; |
---|
1487 | 1475 | |
---|
1488 | | - if (rb_check_list(cpu_buffer, head)) |
---|
| 1476 | + if (RB_WARN_ON(cpu_buffer, |
---|
| 1477 | + rb_list_head(rb_list_head(head->prev)->next) != head)) |
---|
1489 | 1478 | return -1; |
---|
1490 | 1479 | |
---|
1491 | | - list_for_each_entry_safe(bpage, tmp, head, list) { |
---|
| 1480 | + for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) { |
---|
1492 | 1481 | if (RB_WARN_ON(cpu_buffer, |
---|
1493 | | - bpage->list.next->prev != &bpage->list)) |
---|
| 1482 | + rb_list_head(rb_list_head(tmp->next)->prev) != tmp)) |
---|
1494 | 1483 | return -1; |
---|
| 1484 | + |
---|
1495 | 1485 | if (RB_WARN_ON(cpu_buffer, |
---|
1496 | | - bpage->list.prev->next != &bpage->list)) |
---|
1497 | | - return -1; |
---|
1498 | | - if (rb_check_list(cpu_buffer, &bpage->list)) |
---|
| 1486 | + rb_list_head(rb_list_head(tmp->prev)->next) != tmp)) |
---|
1499 | 1487 | return -1; |
---|
1500 | 1488 | } |
---|
1501 | | - |
---|
1502 | | - rb_head_page_activate(cpu_buffer); |
---|
1503 | 1489 | |
---|
1504 | 1490 | return 0; |
---|
1505 | 1491 | } |
---|
.. | .. |
---|
1666 | 1652 | struct list_head *head = cpu_buffer->pages; |
---|
1667 | 1653 | struct buffer_page *bpage, *tmp; |
---|
1668 | 1654 | |
---|
| 1655 | + irq_work_sync(&cpu_buffer->irq_work.work); |
---|
| 1656 | + |
---|
1669 | 1657 | free_buffer_page(cpu_buffer->reader_page); |
---|
1670 | 1658 | |
---|
1671 | 1659 | if (head) { |
---|
.. | .. |
---|
1772 | 1760 | |
---|
1773 | 1761 | cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); |
---|
1774 | 1762 | |
---|
| 1763 | + irq_work_sync(&buffer->irq_work.work); |
---|
| 1764 | + |
---|
1775 | 1765 | for_each_buffer_cpu(buffer, cpu) |
---|
1776 | 1766 | rb_free_cpu_buffer(buffer->buffers[cpu]); |
---|
1777 | 1767 | |
---|
.. | .. |
---|
1851 | 1841 | to_remove = rb_list_head(to_remove)->next; |
---|
1852 | 1842 | head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD; |
---|
1853 | 1843 | } |
---|
| 1844 | + /* Read iterators need to reset themselves when some pages removed */ |
---|
| 1845 | + cpu_buffer->pages_removed += nr_removed; |
---|
1854 | 1846 | |
---|
1855 | 1847 | next_page = rb_list_head(to_remove)->next; |
---|
1856 | 1848 | |
---|
.. | .. |
---|
1871 | 1863 | if (head_bit) |
---|
1872 | 1864 | cpu_buffer->head_page = list_entry(next_page, |
---|
1873 | 1865 | struct buffer_page, list); |
---|
1874 | | - |
---|
1875 | | - /* |
---|
1876 | | - * change read pointer to make sure any read iterators reset |
---|
1877 | | - * themselves |
---|
1878 | | - */ |
---|
1879 | | - cpu_buffer->read = 0; |
---|
1880 | 1866 | |
---|
1881 | 1867 | /* pages are removed, resume tracing and then free the pages */ |
---|
1882 | 1868 | atomic_dec(&cpu_buffer->record_disabled); |
---|
.. | .. |
---|
1905 | 1891 | * Increment overrun to account for the lost events. |
---|
1906 | 1892 | */ |
---|
1907 | 1893 | local_add(page_entries, &cpu_buffer->overrun); |
---|
1908 | | - local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); |
---|
| 1894 | + local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes); |
---|
1909 | 1895 | local_inc(&cpu_buffer->pages_lost); |
---|
1910 | 1896 | } |
---|
1911 | 1897 | |
---|
.. | .. |
---|
2060 | 2046 | |
---|
2061 | 2047 | /* prevent another thread from changing buffer sizes */ |
---|
2062 | 2048 | mutex_lock(&buffer->mutex); |
---|
2063 | | - |
---|
| 2049 | + atomic_inc(&buffer->resizing); |
---|
2064 | 2050 | |
---|
2065 | 2051 | if (cpu_id == RING_BUFFER_ALL_CPUS) { |
---|
2066 | 2052 | /* |
---|
.. | .. |
---|
2098 | 2084 | err = -ENOMEM; |
---|
2099 | 2085 | goto out_err; |
---|
2100 | 2086 | } |
---|
| 2087 | + |
---|
| 2088 | + cond_resched(); |
---|
2101 | 2089 | } |
---|
2102 | 2090 | |
---|
2103 | 2091 | get_online_cpus(); |
---|
.. | .. |
---|
2203 | 2191 | atomic_dec(&buffer->record_disabled); |
---|
2204 | 2192 | } |
---|
2205 | 2193 | |
---|
| 2194 | + atomic_dec(&buffer->resizing); |
---|
2206 | 2195 | mutex_unlock(&buffer->mutex); |
---|
2207 | 2196 | return 0; |
---|
2208 | 2197 | |
---|
.. | .. |
---|
2223 | 2212 | } |
---|
2224 | 2213 | } |
---|
2225 | 2214 | out_err_unlock: |
---|
| 2215 | + atomic_dec(&buffer->resizing); |
---|
2226 | 2216 | mutex_unlock(&buffer->mutex); |
---|
2227 | 2217 | return err; |
---|
2228 | 2218 | } |
---|
.. | .. |
---|
2251 | 2241 | cpu_buffer->reader_page->read); |
---|
2252 | 2242 | } |
---|
2253 | 2243 | |
---|
2254 | | -static __always_inline unsigned rb_page_commit(struct buffer_page *bpage) |
---|
2255 | | -{ |
---|
2256 | | - return local_read(&bpage->page->commit); |
---|
2257 | | -} |
---|
2258 | | - |
---|
2259 | 2244 | static struct ring_buffer_event * |
---|
2260 | 2245 | rb_iter_head_event(struct ring_buffer_iter *iter) |
---|
2261 | 2246 | { |
---|
.. | .. |
---|
2274 | 2259 | */ |
---|
2275 | 2260 | commit = rb_page_commit(iter_head_page); |
---|
2276 | 2261 | smp_rmb(); |
---|
| 2262 | + |
---|
| 2263 | + /* An event needs to be at least 8 bytes in size */ |
---|
| 2264 | + if (iter->head > commit - 8) |
---|
| 2265 | + goto reset; |
---|
| 2266 | + |
---|
2277 | 2267 | event = __rb_page_index(iter_head_page, iter->head); |
---|
2278 | 2268 | length = rb_event_length(event); |
---|
2279 | 2269 | |
---|
.. | .. |
---|
2396 | 2386 | * the counters. |
---|
2397 | 2387 | */ |
---|
2398 | 2388 | local_add(entries, &cpu_buffer->overrun); |
---|
2399 | | - local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); |
---|
| 2389 | + local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes); |
---|
2400 | 2390 | local_inc(&cpu_buffer->pages_lost); |
---|
2401 | 2391 | |
---|
2402 | 2392 | /* |
---|
.. | .. |
---|
2539 | 2529 | |
---|
2540 | 2530 | event = __rb_page_index(tail_page, tail); |
---|
2541 | 2531 | |
---|
2542 | | - /* account for padding bytes */ |
---|
2543 | | - local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); |
---|
2544 | | - |
---|
2545 | 2532 | /* |
---|
2546 | 2533 | * Save the original length to the meta data. |
---|
2547 | 2534 | * This will be used by the reader to add lost event |
---|
.. | .. |
---|
2555 | 2542 | * write counter enough to allow another writer to slip |
---|
2556 | 2543 | * in on this page. |
---|
2557 | 2544 | * We put in a discarded commit instead, to make sure |
---|
2558 | | - * that this space is not used again. |
---|
| 2545 | + * that this space is not used again, and this space will |
---|
| 2546 | + * not be accounted into 'entries_bytes'. |
---|
2559 | 2547 | * |
---|
2560 | 2548 | * If we are less than the minimum size, we don't need to |
---|
2561 | 2549 | * worry about it. |
---|
.. | .. |
---|
2579 | 2567 | event->type_len = RINGBUF_TYPE_PADDING; |
---|
2580 | 2568 | /* time delta must be non zero */ |
---|
2581 | 2569 | event->time_delta = 1; |
---|
| 2570 | + |
---|
| 2571 | + /* account for padding bytes */ |
---|
| 2572 | + local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); |
---|
2582 | 2573 | |
---|
2583 | 2574 | /* Make sure the padding is visible before the tail_page->write update */ |
---|
2584 | 2575 | smp_wmb(); |
---|
.. | .. |
---|
2984 | 2975 | if (RB_WARN_ON(cpu_buffer, |
---|
2985 | 2976 | rb_is_reader_page(cpu_buffer->tail_page))) |
---|
2986 | 2977 | return; |
---|
| 2978 | + /* |
---|
| 2979 | + * No need for a memory barrier here, as the update |
---|
| 2980 | + * of the tail_page did it for this page. |
---|
| 2981 | + */ |
---|
2987 | 2982 | local_set(&cpu_buffer->commit_page->page->commit, |
---|
2988 | 2983 | rb_page_write(cpu_buffer->commit_page)); |
---|
2989 | 2984 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); |
---|
.. | .. |
---|
2993 | 2988 | while (rb_commit_index(cpu_buffer) != |
---|
2994 | 2989 | rb_page_write(cpu_buffer->commit_page)) { |
---|
2995 | 2990 | |
---|
| 2991 | + /* Make sure the readers see the content of what is committed. */ |
---|
| 2992 | + smp_wmb(); |
---|
2996 | 2993 | local_set(&cpu_buffer->commit_page->page->commit, |
---|
2997 | 2994 | rb_page_write(cpu_buffer->commit_page)); |
---|
2998 | 2995 | RB_WARN_ON(cpu_buffer, |
---|
.. | .. |
---|
3939 | 3936 | EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts); |
---|
3940 | 3937 | |
---|
3941 | 3938 | /** |
---|
3942 | | - * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer |
---|
| 3939 | + * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer |
---|
3943 | 3940 | * @buffer: The ring buffer |
---|
3944 | 3941 | * @cpu: The per CPU buffer to read from. |
---|
3945 | 3942 | */ |
---|
.. | .. |
---|
4117 | 4114 | |
---|
4118 | 4115 | iter->cache_reader_page = iter->head_page; |
---|
4119 | 4116 | iter->cache_read = cpu_buffer->read; |
---|
| 4117 | + iter->cache_pages_removed = cpu_buffer->pages_removed; |
---|
4120 | 4118 | |
---|
4121 | 4119 | if (iter->head) { |
---|
4122 | 4120 | iter->read_stamp = cpu_buffer->read_stamp; |
---|
.. | .. |
---|
4412 | 4410 | |
---|
4413 | 4411 | /* |
---|
4414 | 4412 | * Make sure we see any padding after the write update |
---|
4415 | | - * (see rb_reset_tail()) |
---|
| 4413 | + * (see rb_reset_tail()). |
---|
| 4414 | + * |
---|
| 4415 | + * In addition, a writer may be writing on the reader page |
---|
| 4416 | + * if the page has not been fully filled, so the read barrier |
---|
| 4417 | + * is also needed to make sure we see the content of what is |
---|
| 4418 | + * committed by the writer (see rb_set_commit_to_write()). |
---|
4416 | 4419 | */ |
---|
4417 | 4420 | smp_rmb(); |
---|
4418 | 4421 | |
---|
.. | .. |
---|
4441 | 4444 | |
---|
4442 | 4445 | length = rb_event_length(event); |
---|
4443 | 4446 | cpu_buffer->reader_page->read += length; |
---|
| 4447 | + cpu_buffer->read_bytes += length; |
---|
4444 | 4448 | } |
---|
4445 | 4449 | |
---|
4446 | 4450 | static void rb_advance_iter(struct ring_buffer_iter *iter) |
---|
.. | .. |
---|
4565 | 4569 | buffer = cpu_buffer->buffer; |
---|
4566 | 4570 | |
---|
4567 | 4571 | /* |
---|
4568 | | - * Check if someone performed a consuming read to |
---|
4569 | | - * the buffer. A consuming read invalidates the iterator |
---|
4570 | | - * and we need to reset the iterator in this case. |
---|
| 4572 | + * Check if someone performed a consuming read to the buffer |
---|
| 4573 | + * or removed some pages from the buffer. In these cases, |
---|
| 4574 | + * iterator was invalidated and we need to reset it. |
---|
4571 | 4575 | */ |
---|
4572 | 4576 | if (unlikely(iter->cache_read != cpu_buffer->read || |
---|
4573 | | - iter->cache_reader_page != cpu_buffer->reader_page)) |
---|
| 4577 | + iter->cache_reader_page != cpu_buffer->reader_page || |
---|
| 4578 | + iter->cache_pages_removed != cpu_buffer->pages_removed)) |
---|
4574 | 4579 | rb_iter_reset(iter); |
---|
4575 | 4580 | |
---|
4576 | 4581 | again: |
---|
.. | .. |
---|
4961 | 4966 | } |
---|
4962 | 4967 | EXPORT_SYMBOL_GPL(ring_buffer_size); |
---|
4963 | 4968 | |
---|
| 4969 | +static void rb_clear_buffer_page(struct buffer_page *page) |
---|
| 4970 | +{ |
---|
| 4971 | + local_set(&page->write, 0); |
---|
| 4972 | + local_set(&page->entries, 0); |
---|
| 4973 | + rb_init_page(page->page); |
---|
| 4974 | + page->read = 0; |
---|
| 4975 | +} |
---|
| 4976 | + |
---|
4964 | 4977 | static void |
---|
4965 | 4978 | rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) |
---|
4966 | 4979 | { |
---|
| 4980 | + struct buffer_page *page; |
---|
| 4981 | + |
---|
4967 | 4982 | rb_head_page_deactivate(cpu_buffer); |
---|
4968 | 4983 | |
---|
4969 | 4984 | cpu_buffer->head_page |
---|
4970 | 4985 | = list_entry(cpu_buffer->pages, struct buffer_page, list); |
---|
4971 | | - local_set(&cpu_buffer->head_page->write, 0); |
---|
4972 | | - local_set(&cpu_buffer->head_page->entries, 0); |
---|
4973 | | - local_set(&cpu_buffer->head_page->page->commit, 0); |
---|
4974 | | - |
---|
4975 | | - cpu_buffer->head_page->read = 0; |
---|
| 4986 | + rb_clear_buffer_page(cpu_buffer->head_page); |
---|
| 4987 | + list_for_each_entry(page, cpu_buffer->pages, list) { |
---|
| 4988 | + rb_clear_buffer_page(page); |
---|
| 4989 | + } |
---|
4976 | 4990 | |
---|
4977 | 4991 | cpu_buffer->tail_page = cpu_buffer->head_page; |
---|
4978 | 4992 | cpu_buffer->commit_page = cpu_buffer->head_page; |
---|
4979 | 4993 | |
---|
4980 | 4994 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); |
---|
4981 | 4995 | INIT_LIST_HEAD(&cpu_buffer->new_pages); |
---|
4982 | | - local_set(&cpu_buffer->reader_page->write, 0); |
---|
4983 | | - local_set(&cpu_buffer->reader_page->entries, 0); |
---|
4984 | | - local_set(&cpu_buffer->reader_page->page->commit, 0); |
---|
4985 | | - cpu_buffer->reader_page->read = 0; |
---|
| 4996 | + rb_clear_buffer_page(cpu_buffer->reader_page); |
---|
4986 | 4997 | |
---|
4987 | 4998 | local_set(&cpu_buffer->entries_bytes, 0); |
---|
4988 | 4999 | local_set(&cpu_buffer->overrun, 0); |
---|
.. | .. |
---|
5006 | 5017 | cpu_buffer->last_overrun = 0; |
---|
5007 | 5018 | |
---|
5008 | 5019 | rb_head_page_activate(cpu_buffer); |
---|
| 5020 | + cpu_buffer->pages_removed = 0; |
---|
5009 | 5021 | } |
---|
5010 | 5022 | |
---|
5011 | 5023 | /* Must have disabled the cpu buffer then done a synchronize_rcu */ |
---|
.. | .. |
---|
5058 | 5070 | } |
---|
5059 | 5071 | EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); |
---|
5060 | 5072 | |
---|
| 5073 | +/* Flag to ensure proper resetting of atomic variables */ |
---|
| 5074 | +#define RESET_BIT (1 << 30) |
---|
| 5075 | + |
---|
5061 | 5076 | /** |
---|
5062 | 5077 | * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer |
---|
5063 | 5078 | * @buffer: The ring buffer to reset a per cpu buffer of |
---|
.. | .. |
---|
5074 | 5089 | for_each_online_buffer_cpu(buffer, cpu) { |
---|
5075 | 5090 | cpu_buffer = buffer->buffers[cpu]; |
---|
5076 | 5091 | |
---|
5077 | | - atomic_inc(&cpu_buffer->resize_disabled); |
---|
| 5092 | + atomic_add(RESET_BIT, &cpu_buffer->resize_disabled); |
---|
5078 | 5093 | atomic_inc(&cpu_buffer->record_disabled); |
---|
5079 | 5094 | } |
---|
5080 | 5095 | |
---|
5081 | 5096 | /* Make sure all commits have finished */ |
---|
5082 | 5097 | synchronize_rcu(); |
---|
5083 | 5098 | |
---|
5084 | | - for_each_online_buffer_cpu(buffer, cpu) { |
---|
| 5099 | + for_each_buffer_cpu(buffer, cpu) { |
---|
5085 | 5100 | cpu_buffer = buffer->buffers[cpu]; |
---|
| 5101 | + |
---|
| 5102 | + /* |
---|
| 5103 | + * If a CPU came online during the synchronize_rcu(), then |
---|
| 5104 | + * ignore it. |
---|
| 5105 | + */ |
---|
| 5106 | + if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT)) |
---|
| 5107 | + continue; |
---|
5086 | 5108 | |
---|
5087 | 5109 | reset_disabled_cpu_buffer(cpu_buffer); |
---|
5088 | 5110 | |
---|
5089 | 5111 | atomic_dec(&cpu_buffer->record_disabled); |
---|
5090 | | - atomic_dec(&cpu_buffer->resize_disabled); |
---|
| 5112 | + atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled); |
---|
5091 | 5113 | } |
---|
5092 | 5114 | |
---|
5093 | 5115 | mutex_unlock(&buffer->mutex); |
---|
.. | .. |
---|
5242 | 5264 | if (local_read(&cpu_buffer_b->committing)) |
---|
5243 | 5265 | goto out_dec; |
---|
5244 | 5266 | |
---|
| 5267 | + /* |
---|
| 5268 | + * When resize is in progress, we cannot swap it because |
---|
| 5269 | + * it will mess the state of the cpu buffer. |
---|
| 5270 | + */ |
---|
| 5271 | + if (atomic_read(&buffer_a->resizing)) |
---|
| 5272 | + goto out_dec; |
---|
| 5273 | + if (atomic_read(&buffer_b->resizing)) |
---|
| 5274 | + goto out_dec; |
---|
| 5275 | + |
---|
5245 | 5276 | buffer_a->buffers[cpu] = cpu_buffer_b; |
---|
5246 | 5277 | buffer_b->buffers[cpu] = cpu_buffer_a; |
---|
5247 | 5278 | |
---|
.. | .. |
---|
5324 | 5355 | */ |
---|
5325 | 5356 | void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data) |
---|
5326 | 5357 | { |
---|
5327 | | - struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
---|
| 5358 | + struct ring_buffer_per_cpu *cpu_buffer; |
---|
5328 | 5359 | struct buffer_data_page *bpage = data; |
---|
5329 | 5360 | struct page *page = virt_to_page(bpage); |
---|
5330 | 5361 | unsigned long flags; |
---|
| 5362 | + |
---|
| 5363 | + if (!buffer || !buffer->buffers || !buffer->buffers[cpu]) |
---|
| 5364 | + return; |
---|
| 5365 | + |
---|
| 5366 | + cpu_buffer = buffer->buffers[cpu]; |
---|
5331 | 5367 | |
---|
5332 | 5368 | /* If the page is still in use someplace else, we can't reuse it */ |
---|
5333 | 5369 | if (page_ref_count(page) > 1) |
---|
.. | .. |
---|
5500 | 5536 | } else { |
---|
5501 | 5537 | /* update the entry counter */ |
---|
5502 | 5538 | cpu_buffer->read += rb_page_entries(reader); |
---|
5503 | | - cpu_buffer->read_bytes += BUF_PAGE_SIZE; |
---|
| 5539 | + cpu_buffer->read_bytes += rb_page_commit(reader); |
---|
5504 | 5540 | |
---|
5505 | 5541 | /* swap the pages */ |
---|
5506 | 5542 | rb_init_page(bpage); |
---|