.. | .. |
---|
47 | 47 | }; |
---|
48 | 48 | |
---|
49 | 49 | /* Convert the perf index to an offset within the ETR buffer */ |
---|
50 | | -#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT)) |
---|
| 50 | +#define PERF_IDX2OFF(idx, buf) \ |
---|
| 51 | + ((idx) % ((unsigned long)(buf)->nr_pages << PAGE_SHIFT)) |
---|
51 | 52 | |
---|
52 | 53 | /* Lower limit for ETR hardware buffer */ |
---|
53 | 54 | #define TMC_ETR_PERF_MIN_BUF_SIZE SZ_1M |
---|
.. | .. |
---|
162 | 163 | struct device *dev, enum dma_data_direction dir) |
---|
163 | 164 | { |
---|
164 | 165 | int i; |
---|
| 166 | + struct device *real_dev = dev->parent; |
---|
165 | 167 | |
---|
166 | 168 | for (i = 0; i < tmc_pages->nr_pages; i++) { |
---|
167 | 169 | if (tmc_pages->daddrs && tmc_pages->daddrs[i]) |
---|
168 | | - dma_unmap_page(dev, tmc_pages->daddrs[i], |
---|
| 170 | + dma_unmap_page(real_dev, tmc_pages->daddrs[i], |
---|
169 | 171 | PAGE_SIZE, dir); |
---|
170 | 172 | if (tmc_pages->pages && tmc_pages->pages[i]) |
---|
171 | 173 | __free_page(tmc_pages->pages[i]); |
---|
.. | .. |
---|
193 | 195 | int i, nr_pages; |
---|
194 | 196 | dma_addr_t paddr; |
---|
195 | 197 | struct page *page; |
---|
| 198 | + struct device *real_dev = dev->parent; |
---|
196 | 199 | |
---|
197 | 200 | nr_pages = tmc_pages->nr_pages; |
---|
198 | 201 | tmc_pages->daddrs = kcalloc(nr_pages, sizeof(*tmc_pages->daddrs), |
---|
.. | .. |
---|
218 | 221 | if (!page) |
---|
219 | 222 | goto err; |
---|
220 | 223 | } |
---|
221 | | - paddr = dma_map_page(dev, page, 0, PAGE_SIZE, dir); |
---|
222 | | - if (dma_mapping_error(dev, paddr)) |
---|
| 224 | + paddr = dma_map_page(real_dev, page, 0, PAGE_SIZE, dir); |
---|
| 225 | + if (dma_mapping_error(real_dev, paddr)) |
---|
223 | 226 | goto err; |
---|
224 | 227 | tmc_pages->daddrs[i] = paddr; |
---|
225 | 228 | tmc_pages->pages[i] = page; |
---|
.. | .. |
---|
255 | 258 | tmc_free_table_pages(sg_table); |
---|
256 | 259 | tmc_free_data_pages(sg_table); |
---|
257 | 260 | } |
---|
| 261 | +EXPORT_SYMBOL_GPL(tmc_free_sg_table); |
---|
258 | 262 | |
---|
259 | 263 | /* |
---|
260 | 264 | * Alloc pages for the table. Since this will be used by the device, |
---|
.. | .. |
---|
306 | 310 | * and data buffers. TMC writes to the data buffers and reads from the SG |
---|
307 | 311 | * Table pages. |
---|
308 | 312 | * |
---|
309 | | - * @dev - Device to which page should be DMA mapped. |
---|
| 313 | + * @dev - Coresight device to which page should be DMA mapped. |
---|
310 | 314 | * @node - Numa node for mem allocations |
---|
311 | 315 | * @nr_tpages - Number of pages for the table entries. |
---|
312 | 316 | * @nr_dpages - Number of pages for Data buffer. |
---|
.. | .. |
---|
340 | 344 | |
---|
341 | 345 | return sg_table; |
---|
342 | 346 | } |
---|
| 347 | +EXPORT_SYMBOL_GPL(tmc_alloc_sg_table); |
---|
343 | 348 | |
---|
344 | 349 | /* |
---|
345 | 350 | * tmc_sg_table_sync_data_range: Sync the data buffer written |
---|
.. | .. |
---|
350 | 355 | { |
---|
351 | 356 | int i, index, start; |
---|
352 | 357 | int npages = DIV_ROUND_UP(size, PAGE_SIZE); |
---|
353 | | - struct device *dev = table->dev; |
---|
| 358 | + struct device *real_dev = table->dev->parent; |
---|
354 | 359 | struct tmc_pages *data = &table->data_pages; |
---|
355 | 360 | |
---|
356 | 361 | start = offset >> PAGE_SHIFT; |
---|
357 | 362 | for (i = start; i < (start + npages); i++) { |
---|
358 | 363 | index = i % data->nr_pages; |
---|
359 | | - dma_sync_single_for_cpu(dev, data->daddrs[index], |
---|
| 364 | + dma_sync_single_for_cpu(real_dev, data->daddrs[index], |
---|
360 | 365 | PAGE_SIZE, DMA_FROM_DEVICE); |
---|
361 | 366 | } |
---|
362 | 367 | } |
---|
| 368 | +EXPORT_SYMBOL_GPL(tmc_sg_table_sync_data_range); |
---|
363 | 369 | |
---|
364 | 370 | /* tmc_sg_sync_table: Sync the page table */ |
---|
365 | 371 | void tmc_sg_table_sync_table(struct tmc_sg_table *sg_table) |
---|
366 | 372 | { |
---|
367 | 373 | int i; |
---|
368 | | - struct device *dev = sg_table->dev; |
---|
| 374 | + struct device *real_dev = sg_table->dev->parent; |
---|
369 | 375 | struct tmc_pages *table_pages = &sg_table->table_pages; |
---|
370 | 376 | |
---|
371 | 377 | for (i = 0; i < table_pages->nr_pages; i++) |
---|
372 | | - dma_sync_single_for_device(dev, table_pages->daddrs[i], |
---|
| 378 | + dma_sync_single_for_device(real_dev, table_pages->daddrs[i], |
---|
373 | 379 | PAGE_SIZE, DMA_TO_DEVICE); |
---|
374 | 380 | } |
---|
| 381 | +EXPORT_SYMBOL_GPL(tmc_sg_table_sync_table); |
---|
375 | 382 | |
---|
376 | 383 | /* |
---|
377 | 384 | * tmc_sg_table_get_data: Get the buffer pointer for data @offset |
---|
.. | .. |
---|
401 | 408 | *bufpp = page_address(data_pages->pages[pg_idx]) + pg_offset; |
---|
402 | 409 | return len; |
---|
403 | 410 | } |
---|
| 411 | +EXPORT_SYMBOL_GPL(tmc_sg_table_get_data); |
---|
404 | 412 | |
---|
405 | 413 | #ifdef ETR_SG_DEBUG |
---|
406 | 414 | /* Map a dma address to virtual address */ |
---|
.. | .. |
---|
592 | 600 | void **pages) |
---|
593 | 601 | { |
---|
594 | 602 | struct etr_flat_buf *flat_buf; |
---|
| 603 | + struct device *real_dev = drvdata->csdev->dev.parent; |
---|
595 | 604 | |
---|
596 | 605 | /* We cannot reuse existing pages for flat buf */ |
---|
597 | 606 | if (pages) |
---|
.. | .. |
---|
601 | 610 | if (!flat_buf) |
---|
602 | 611 | return -ENOMEM; |
---|
603 | 612 | |
---|
604 | | - flat_buf->vaddr = dma_alloc_coherent(drvdata->dev, etr_buf->size, |
---|
605 | | - &flat_buf->daddr, GFP_KERNEL); |
---|
| 613 | + flat_buf->vaddr = dma_alloc_noncoherent(real_dev, etr_buf->size, |
---|
| 614 | + &flat_buf->daddr, |
---|
| 615 | + DMA_FROM_DEVICE, GFP_KERNEL); |
---|
606 | 616 | if (!flat_buf->vaddr) { |
---|
607 | 617 | kfree(flat_buf); |
---|
608 | 618 | return -ENOMEM; |
---|
609 | 619 | } |
---|
610 | 620 | |
---|
611 | 621 | flat_buf->size = etr_buf->size; |
---|
612 | | - flat_buf->dev = drvdata->dev; |
---|
| 622 | + flat_buf->dev = &drvdata->csdev->dev; |
---|
613 | 623 | etr_buf->hwaddr = flat_buf->daddr; |
---|
614 | 624 | etr_buf->mode = ETR_MODE_FLAT; |
---|
615 | 625 | etr_buf->private = flat_buf; |
---|
.. | .. |
---|
620 | 630 | { |
---|
621 | 631 | struct etr_flat_buf *flat_buf = etr_buf->private; |
---|
622 | 632 | |
---|
623 | | - if (flat_buf && flat_buf->daddr) |
---|
624 | | - dma_free_coherent(flat_buf->dev, flat_buf->size, |
---|
625 | | - flat_buf->vaddr, flat_buf->daddr); |
---|
| 633 | + if (flat_buf && flat_buf->daddr) { |
---|
| 634 | + struct device *real_dev = flat_buf->dev->parent; |
---|
| 635 | + |
---|
| 636 | + dma_free_noncoherent(real_dev, etr_buf->size, |
---|
| 637 | + flat_buf->vaddr, flat_buf->daddr, |
---|
| 638 | + DMA_FROM_DEVICE); |
---|
| 639 | + } |
---|
626 | 640 | kfree(flat_buf); |
---|
627 | 641 | } |
---|
628 | 642 | |
---|
629 | 643 | static void tmc_etr_sync_flat_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp) |
---|
630 | 644 | { |
---|
| 645 | + struct etr_flat_buf *flat_buf = etr_buf->private; |
---|
| 646 | + struct device *real_dev = flat_buf->dev->parent; |
---|
| 647 | + |
---|
631 | 648 | /* |
---|
632 | 649 | * Adjust the buffer to point to the beginning of the trace data |
---|
633 | 650 | * and update the available trace data. |
---|
.. | .. |
---|
637 | 654 | etr_buf->len = etr_buf->size; |
---|
638 | 655 | else |
---|
639 | 656 | etr_buf->len = rwp - rrp; |
---|
| 657 | + |
---|
| 658 | + /* |
---|
| 659 | + * The driver always starts tracing at the beginning of the buffer, |
---|
| 660 | + * the only reason why we would get a wrap around is when the buffer |
---|
| 661 | + * is full. Sync the entire buffer in one go for this case. |
---|
| 662 | + */ |
---|
| 663 | + if (etr_buf->offset + etr_buf->len > etr_buf->size) |
---|
| 664 | + dma_sync_single_for_cpu(real_dev, flat_buf->daddr, |
---|
| 665 | + etr_buf->size, DMA_FROM_DEVICE); |
---|
| 666 | + else |
---|
| 667 | + dma_sync_single_for_cpu(real_dev, |
---|
| 668 | + flat_buf->daddr + etr_buf->offset, |
---|
| 669 | + etr_buf->len, DMA_FROM_DEVICE); |
---|
640 | 670 | } |
---|
641 | 671 | |
---|
642 | 672 | static ssize_t tmc_etr_get_data_flat_buf(struct etr_buf *etr_buf, |
---|
.. | .. |
---|
668 | 698 | void **pages) |
---|
669 | 699 | { |
---|
670 | 700 | struct etr_sg_table *etr_table; |
---|
| 701 | + struct device *dev = &drvdata->csdev->dev; |
---|
671 | 702 | |
---|
672 | | - etr_table = tmc_init_etr_sg_table(drvdata->dev, node, |
---|
| 703 | + etr_table = tmc_init_etr_sg_table(dev, node, |
---|
673 | 704 | etr_buf->size, pages); |
---|
674 | 705 | if (IS_ERR(etr_table)) |
---|
675 | 706 | return -ENOMEM; |
---|
.. | .. |
---|
753 | 784 | if (!IS_ENABLED(CONFIG_CORESIGHT_CATU)) |
---|
754 | 785 | return NULL; |
---|
755 | 786 | |
---|
756 | | - for (i = 0; i < etr->nr_outport; i++) { |
---|
757 | | - tmp = etr->conns[i].child_dev; |
---|
| 787 | + for (i = 0; i < etr->pdata->nr_outport; i++) { |
---|
| 788 | + tmp = etr->pdata->conns[i].child_dev; |
---|
758 | 789 | if (tmp && coresight_is_catu_device(tmp)) |
---|
759 | 790 | return tmp; |
---|
760 | 791 | } |
---|
761 | 792 | |
---|
762 | 793 | return NULL; |
---|
763 | 794 | } |
---|
| 795 | +EXPORT_SYMBOL_GPL(tmc_etr_get_catu_device); |
---|
764 | 796 | |
---|
765 | 797 | static inline int tmc_etr_enable_catu(struct tmc_drvdata *drvdata, |
---|
766 | 798 | struct etr_buf *etr_buf) |
---|
.. | .. |
---|
783 | 815 | static const struct etr_buf_operations *etr_buf_ops[] = { |
---|
784 | 816 | [ETR_MODE_FLAT] = &etr_flat_buf_ops, |
---|
785 | 817 | [ETR_MODE_ETR_SG] = &etr_sg_buf_ops, |
---|
786 | | - [ETR_MODE_CATU] = IS_ENABLED(CONFIG_CORESIGHT_CATU) |
---|
787 | | - ? &etr_catu_buf_ops : NULL, |
---|
| 818 | + [ETR_MODE_CATU] = NULL, |
---|
788 | 819 | }; |
---|
| 820 | + |
---|
| 821 | +void tmc_etr_set_catu_ops(const struct etr_buf_operations *catu) |
---|
| 822 | +{ |
---|
| 823 | + etr_buf_ops[ETR_MODE_CATU] = catu; |
---|
| 824 | +} |
---|
| 825 | +EXPORT_SYMBOL_GPL(tmc_etr_set_catu_ops); |
---|
| 826 | + |
---|
| 827 | +void tmc_etr_remove_catu_ops(void) |
---|
| 828 | +{ |
---|
| 829 | + etr_buf_ops[ETR_MODE_CATU] = NULL; |
---|
| 830 | +} |
---|
| 831 | +EXPORT_SYMBOL_GPL(tmc_etr_remove_catu_ops); |
---|
789 | 832 | |
---|
790 | 833 | static inline int tmc_etr_mode_alloc_buf(int mode, |
---|
791 | 834 | struct tmc_drvdata *drvdata, |
---|
.. | .. |
---|
825 | 868 | bool has_etr_sg, has_iommu; |
---|
826 | 869 | bool has_sg, has_catu; |
---|
827 | 870 | struct etr_buf *etr_buf; |
---|
| 871 | + struct device *dev = &drvdata->csdev->dev; |
---|
828 | 872 | |
---|
829 | 873 | has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG); |
---|
830 | | - has_iommu = iommu_get_domain_for_dev(drvdata->dev); |
---|
| 874 | + has_iommu = iommu_get_domain_for_dev(dev->parent); |
---|
831 | 875 | has_catu = !!tmc_etr_get_catu_device(drvdata); |
---|
832 | 876 | |
---|
833 | 877 | has_sg = has_catu || has_etr_sg; |
---|
.. | .. |
---|
866 | 910 | } |
---|
867 | 911 | |
---|
868 | 912 | refcount_set(&etr_buf->refcount, 1); |
---|
869 | | - dev_dbg(drvdata->dev, "allocated buffer of size %ldKB in mode %d\n", |
---|
| 913 | + dev_dbg(dev, "allocated buffer of size %ldKB in mode %d\n", |
---|
870 | 914 | (unsigned long)size >> 10, etr_buf->mode); |
---|
871 | 915 | return etr_buf; |
---|
872 | 916 | } |
---|
.. | .. |
---|
901 | 945 | |
---|
902 | 946 | len = tmc_etr_buf_get_data(etr_buf, offset, |
---|
903 | 947 | CORESIGHT_BARRIER_PKT_SIZE, &bufp); |
---|
904 | | - if (WARN_ON(len < CORESIGHT_BARRIER_PKT_SIZE)) |
---|
| 948 | + if (WARN_ON(len < 0 || len < CORESIGHT_BARRIER_PKT_SIZE)) |
---|
905 | 949 | return -EINVAL; |
---|
906 | 950 | coresight_insert_barrier_packet(bufp); |
---|
907 | 951 | return offset + CORESIGHT_BARRIER_PKT_SIZE; |
---|
.. | .. |
---|
1015 | 1059 | rc = tmc_etr_enable_catu(drvdata, etr_buf); |
---|
1016 | 1060 | if (rc) |
---|
1017 | 1061 | return rc; |
---|
1018 | | - rc = coresight_claim_device(drvdata->base); |
---|
| 1062 | + rc = coresight_claim_device(drvdata->csdev); |
---|
1019 | 1063 | if (!rc) { |
---|
1020 | 1064 | drvdata->etr_buf = etr_buf; |
---|
1021 | 1065 | __tmc_etr_enable_hw(drvdata); |
---|
.. | .. |
---|
1104 | 1148 | |
---|
1105 | 1149 | } |
---|
1106 | 1150 | |
---|
1107 | | -static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata) |
---|
| 1151 | +void tmc_etr_disable_hw(struct tmc_drvdata *drvdata) |
---|
1108 | 1152 | { |
---|
1109 | 1153 | __tmc_etr_disable_hw(drvdata); |
---|
1110 | 1154 | /* Disable CATU device if this ETR is connected to one */ |
---|
1111 | 1155 | tmc_etr_disable_catu(drvdata); |
---|
1112 | | - coresight_disclaim_device(drvdata->base); |
---|
| 1156 | + coresight_disclaim_device(drvdata->csdev); |
---|
1113 | 1157 | /* Reset the ETR buf used by hardware */ |
---|
1114 | 1158 | drvdata->etr_buf = NULL; |
---|
1115 | 1159 | } |
---|
.. | .. |
---|
1181 | 1225 | tmc_etr_free_sysfs_buf(free_buf); |
---|
1182 | 1226 | |
---|
1183 | 1227 | if (!ret) |
---|
1184 | | - dev_dbg(drvdata->dev, "TMC-ETR enabled\n"); |
---|
| 1228 | + dev_dbg(&csdev->dev, "TMC-ETR enabled\n"); |
---|
1185 | 1229 | |
---|
1186 | 1230 | return ret; |
---|
1187 | 1231 | } |
---|
.. | .. |
---|
1207 | 1251 | * than the size requested via sysfs. |
---|
1208 | 1252 | */ |
---|
1209 | 1253 | if ((nr_pages << PAGE_SHIFT) > drvdata->size) { |
---|
1210 | | - etr_buf = tmc_alloc_etr_buf(drvdata, (nr_pages << PAGE_SHIFT), |
---|
| 1254 | + etr_buf = tmc_alloc_etr_buf(drvdata, ((ssize_t)nr_pages << PAGE_SHIFT), |
---|
1211 | 1255 | 0, node, NULL); |
---|
1212 | 1256 | if (!IS_ERR(etr_buf)) |
---|
1213 | 1257 | goto done; |
---|
.. | .. |
---|
1362 | 1406 | etr_perf = tmc_etr_setup_perf_buf(drvdata, event, |
---|
1363 | 1407 | nr_pages, pages, snapshot); |
---|
1364 | 1408 | if (IS_ERR(etr_perf)) { |
---|
1365 | | - dev_dbg(drvdata->dev, "Unable to allocate ETR buffer\n"); |
---|
| 1409 | + dev_dbg(&csdev->dev, "Unable to allocate ETR buffer\n"); |
---|
1366 | 1410 | return NULL; |
---|
1367 | 1411 | } |
---|
1368 | 1412 | |
---|
.. | .. |
---|
1527 | 1571 | |
---|
1528 | 1572 | /* Insert barrier packets at the beginning, if there was an overflow */ |
---|
1529 | 1573 | if (lost) |
---|
1530 | | - tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset); |
---|
| 1574 | + tmc_etr_buf_insert_barrier_packet(etr_buf, offset); |
---|
1531 | 1575 | tmc_etr_sync_perf_buffer(etr_perf, offset, size); |
---|
1532 | 1576 | |
---|
1533 | 1577 | /* |
---|
.. | .. |
---|
1538 | 1582 | */ |
---|
1539 | 1583 | if (etr_perf->snapshot) |
---|
1540 | 1584 | handle->head += size; |
---|
| 1585 | + |
---|
| 1586 | + /* |
---|
| 1587 | + * Ensure that the AUX trace data is visible before the aux_head |
---|
| 1588 | + * is updated via perf_aux_output_end(), as expected by the |
---|
| 1589 | + * perf ring buffer. |
---|
| 1590 | + */ |
---|
| 1591 | + smp_wmb(); |
---|
| 1592 | + |
---|
1541 | 1593 | out: |
---|
1542 | 1594 | /* |
---|
1543 | 1595 | * Don't set the TRUNCATED flag in snapshot mode because 1) the |
---|
.. | .. |
---|
1647 | 1699 | |
---|
1648 | 1700 | spin_unlock_irqrestore(&drvdata->spinlock, flags); |
---|
1649 | 1701 | |
---|
1650 | | - dev_dbg(drvdata->dev, "TMC-ETR disabled\n"); |
---|
| 1702 | + dev_dbg(&csdev->dev, "TMC-ETR disabled\n"); |
---|
1651 | 1703 | return 0; |
---|
1652 | 1704 | } |
---|
1653 | 1705 | |
---|