| .. | .. |
|---|
| 162 | 162 | struct device *dev, enum dma_data_direction dir) |
|---|
| 163 | 163 | { |
|---|
| 164 | 164 | int i; |
|---|
| 165 | + struct device *real_dev = dev->parent; |
|---|
| 165 | 166 | |
|---|
| 166 | 167 | for (i = 0; i < tmc_pages->nr_pages; i++) { |
|---|
| 167 | 168 | if (tmc_pages->daddrs && tmc_pages->daddrs[i]) |
|---|
| 168 | | - dma_unmap_page(dev, tmc_pages->daddrs[i], |
|---|
| 169 | + dma_unmap_page(real_dev, tmc_pages->daddrs[i], |
|---|
| 169 | 170 | PAGE_SIZE, dir); |
|---|
| 170 | 171 | if (tmc_pages->pages && tmc_pages->pages[i]) |
|---|
| 171 | 172 | __free_page(tmc_pages->pages[i]); |
|---|
| .. | .. |
|---|
| 193 | 194 | int i, nr_pages; |
|---|
| 194 | 195 | dma_addr_t paddr; |
|---|
| 195 | 196 | struct page *page; |
|---|
| 197 | + struct device *real_dev = dev->parent; |
|---|
| 196 | 198 | |
|---|
| 197 | 199 | nr_pages = tmc_pages->nr_pages; |
|---|
| 198 | 200 | tmc_pages->daddrs = kcalloc(nr_pages, sizeof(*tmc_pages->daddrs), |
|---|
| .. | .. |
|---|
| 218 | 220 | if (!page) |
|---|
| 219 | 221 | goto err; |
|---|
| 220 | 222 | } |
|---|
| 221 | | - paddr = dma_map_page(dev, page, 0, PAGE_SIZE, dir); |
|---|
| 222 | | - if (dma_mapping_error(dev, paddr)) |
|---|
| 223 | + paddr = dma_map_page(real_dev, page, 0, PAGE_SIZE, dir); |
|---|
| 224 | + if (dma_mapping_error(real_dev, paddr)) |
|---|
| 223 | 225 | goto err; |
|---|
| 224 | 226 | tmc_pages->daddrs[i] = paddr; |
|---|
| 225 | 227 | tmc_pages->pages[i] = page; |
|---|
| .. | .. |
|---|
| 255 | 257 | tmc_free_table_pages(sg_table); |
|---|
| 256 | 258 | tmc_free_data_pages(sg_table); |
|---|
| 257 | 259 | } |
|---|
| 260 | +EXPORT_SYMBOL_GPL(tmc_free_sg_table); |
|---|
| 258 | 261 | |
|---|
| 259 | 262 | /* |
|---|
| 260 | 263 | * Alloc pages for the table. Since this will be used by the device, |
|---|
| .. | .. |
|---|
| 306 | 309 | * and data buffers. TMC writes to the data buffers and reads from the SG |
|---|
| 307 | 310 | * Table pages. |
|---|
| 308 | 311 | * |
|---|
| 309 | | - * @dev - Device to which page should be DMA mapped. |
|---|
| 312 | + * @dev - Coresight device to which page should be DMA mapped. |
|---|
| 310 | 313 | * @node - Numa node for mem allocations |
|---|
| 311 | 314 | * @nr_tpages - Number of pages for the table entries. |
|---|
| 312 | 315 | * @nr_dpages - Number of pages for Data buffer. |
|---|
| .. | .. |
|---|
| 340 | 343 | |
|---|
| 341 | 344 | return sg_table; |
|---|
| 342 | 345 | } |
|---|
| 346 | +EXPORT_SYMBOL_GPL(tmc_alloc_sg_table); |
|---|
| 343 | 347 | |
|---|
| 344 | 348 | /* |
|---|
| 345 | 349 | * tmc_sg_table_sync_data_range: Sync the data buffer written |
|---|
| .. | .. |
|---|
| 350 | 354 | { |
|---|
| 351 | 355 | int i, index, start; |
|---|
| 352 | 356 | int npages = DIV_ROUND_UP(size, PAGE_SIZE); |
|---|
| 353 | | - struct device *dev = table->dev; |
|---|
| 357 | + struct device *real_dev = table->dev->parent; |
|---|
| 354 | 358 | struct tmc_pages *data = &table->data_pages; |
|---|
| 355 | 359 | |
|---|
| 356 | 360 | start = offset >> PAGE_SHIFT; |
|---|
| 357 | 361 | for (i = start; i < (start + npages); i++) { |
|---|
| 358 | 362 | index = i % data->nr_pages; |
|---|
| 359 | | - dma_sync_single_for_cpu(dev, data->daddrs[index], |
|---|
| 363 | + dma_sync_single_for_cpu(real_dev, data->daddrs[index], |
|---|
| 360 | 364 | PAGE_SIZE, DMA_FROM_DEVICE); |
|---|
| 361 | 365 | } |
|---|
| 362 | 366 | } |
|---|
| 367 | +EXPORT_SYMBOL_GPL(tmc_sg_table_sync_data_range); |
|---|
| 363 | 368 | |
|---|
| 364 | 369 | /* tmc_sg_sync_table: Sync the page table */ |
|---|
| 365 | 370 | void tmc_sg_table_sync_table(struct tmc_sg_table *sg_table) |
|---|
| 366 | 371 | { |
|---|
| 367 | 372 | int i; |
|---|
| 368 | | - struct device *dev = sg_table->dev; |
|---|
| 373 | + struct device *real_dev = sg_table->dev->parent; |
|---|
| 369 | 374 | struct tmc_pages *table_pages = &sg_table->table_pages; |
|---|
| 370 | 375 | |
|---|
| 371 | 376 | for (i = 0; i < table_pages->nr_pages; i++) |
|---|
| 372 | | - dma_sync_single_for_device(dev, table_pages->daddrs[i], |
|---|
| 377 | + dma_sync_single_for_device(real_dev, table_pages->daddrs[i], |
|---|
| 373 | 378 | PAGE_SIZE, DMA_TO_DEVICE); |
|---|
| 374 | 379 | } |
|---|
| 380 | +EXPORT_SYMBOL_GPL(tmc_sg_table_sync_table); |
|---|
| 375 | 381 | |
|---|
| 376 | 382 | /* |
|---|
| 377 | 383 | * tmc_sg_table_get_data: Get the buffer pointer for data @offset |
|---|
| .. | .. |
|---|
| 401 | 407 | *bufpp = page_address(data_pages->pages[pg_idx]) + pg_offset; |
|---|
| 402 | 408 | return len; |
|---|
| 403 | 409 | } |
|---|
| 410 | +EXPORT_SYMBOL_GPL(tmc_sg_table_get_data); |
|---|
| 404 | 411 | |
|---|
| 405 | 412 | #ifdef ETR_SG_DEBUG |
|---|
| 406 | 413 | /* Map a dma address to virtual address */ |
|---|
| .. | .. |
|---|
| 592 | 599 | void **pages) |
|---|
| 593 | 600 | { |
|---|
| 594 | 601 | struct etr_flat_buf *flat_buf; |
|---|
| 602 | + struct device *real_dev = drvdata->csdev->dev.parent; |
|---|
| 595 | 603 | |
|---|
| 596 | 604 | /* We cannot reuse existing pages for flat buf */ |
|---|
| 597 | 605 | if (pages) |
|---|
| .. | .. |
|---|
| 601 | 609 | if (!flat_buf) |
|---|
| 602 | 610 | return -ENOMEM; |
|---|
| 603 | 611 | |
|---|
| 604 | | - flat_buf->vaddr = dma_alloc_coherent(drvdata->dev, etr_buf->size, |
|---|
| 605 | | - &flat_buf->daddr, GFP_KERNEL); |
|---|
| 612 | + flat_buf->vaddr = dma_alloc_noncoherent(real_dev, etr_buf->size, |
|---|
| 613 | + &flat_buf->daddr, |
|---|
| 614 | + DMA_FROM_DEVICE, GFP_KERNEL); |
|---|
| 606 | 615 | if (!flat_buf->vaddr) { |
|---|
| 607 | 616 | kfree(flat_buf); |
|---|
| 608 | 617 | return -ENOMEM; |
|---|
| 609 | 618 | } |
|---|
| 610 | 619 | |
|---|
| 611 | 620 | flat_buf->size = etr_buf->size; |
|---|
| 612 | | - flat_buf->dev = drvdata->dev; |
|---|
| 621 | + flat_buf->dev = &drvdata->csdev->dev; |
|---|
| 613 | 622 | etr_buf->hwaddr = flat_buf->daddr; |
|---|
| 614 | 623 | etr_buf->mode = ETR_MODE_FLAT; |
|---|
| 615 | 624 | etr_buf->private = flat_buf; |
|---|
| .. | .. |
|---|
| 620 | 629 | { |
|---|
| 621 | 630 | struct etr_flat_buf *flat_buf = etr_buf->private; |
|---|
| 622 | 631 | |
|---|
| 623 | | - if (flat_buf && flat_buf->daddr) |
|---|
| 624 | | - dma_free_coherent(flat_buf->dev, flat_buf->size, |
|---|
| 625 | | - flat_buf->vaddr, flat_buf->daddr); |
|---|
| 632 | + if (flat_buf && flat_buf->daddr) { |
|---|
| 633 | + struct device *real_dev = flat_buf->dev->parent; |
|---|
| 634 | + |
|---|
| 635 | + dma_free_noncoherent(real_dev, etr_buf->size, |
|---|
| 636 | + flat_buf->vaddr, flat_buf->daddr, |
|---|
| 637 | + DMA_FROM_DEVICE); |
|---|
| 638 | + } |
|---|
| 626 | 639 | kfree(flat_buf); |
|---|
| 627 | 640 | } |
|---|
| 628 | 641 | |
|---|
| 629 | 642 | static void tmc_etr_sync_flat_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp) |
|---|
| 630 | 643 | { |
|---|
| 644 | + struct etr_flat_buf *flat_buf = etr_buf->private; |
|---|
| 645 | + struct device *real_dev = flat_buf->dev->parent; |
|---|
| 646 | + |
|---|
| 631 | 647 | /* |
|---|
| 632 | 648 | * Adjust the buffer to point to the beginning of the trace data |
|---|
| 633 | 649 | * and update the available trace data. |
|---|
| .. | .. |
|---|
| 637 | 653 | etr_buf->len = etr_buf->size; |
|---|
| 638 | 654 | else |
|---|
| 639 | 655 | etr_buf->len = rwp - rrp; |
|---|
| 656 | + |
|---|
| 657 | + /* |
|---|
| 658 | + * The driver always starts tracing at the beginning of the buffer, |
|---|
| 659 | + * the only reason why we would get a wrap around is when the buffer |
|---|
| 660 | + * is full. Sync the entire buffer in one go for this case. |
|---|
| 661 | + */ |
|---|
| 662 | + if (etr_buf->offset + etr_buf->len > etr_buf->size) |
|---|
| 663 | + dma_sync_single_for_cpu(real_dev, flat_buf->daddr, |
|---|
| 664 | + etr_buf->size, DMA_FROM_DEVICE); |
|---|
| 665 | + else |
|---|
| 666 | + dma_sync_single_for_cpu(real_dev, |
|---|
| 667 | + flat_buf->daddr + etr_buf->offset, |
|---|
| 668 | + etr_buf->len, DMA_FROM_DEVICE); |
|---|
| 640 | 669 | } |
|---|
| 641 | 670 | |
|---|
| 642 | 671 | static ssize_t tmc_etr_get_data_flat_buf(struct etr_buf *etr_buf, |
|---|
| .. | .. |
|---|
| 668 | 697 | void **pages) |
|---|
| 669 | 698 | { |
|---|
| 670 | 699 | struct etr_sg_table *etr_table; |
|---|
| 700 | + struct device *dev = &drvdata->csdev->dev; |
|---|
| 671 | 701 | |
|---|
| 672 | | - etr_table = tmc_init_etr_sg_table(drvdata->dev, node, |
|---|
| 702 | + etr_table = tmc_init_etr_sg_table(dev, node, |
|---|
| 673 | 703 | etr_buf->size, pages); |
|---|
| 674 | 704 | if (IS_ERR(etr_table)) |
|---|
| 675 | 705 | return -ENOMEM; |
|---|
| .. | .. |
|---|
| 753 | 783 | if (!IS_ENABLED(CONFIG_CORESIGHT_CATU)) |
|---|
| 754 | 784 | return NULL; |
|---|
| 755 | 785 | |
|---|
| 756 | | - for (i = 0; i < etr->nr_outport; i++) { |
|---|
| 757 | | - tmp = etr->conns[i].child_dev; |
|---|
| 786 | + for (i = 0; i < etr->pdata->nr_outport; i++) { |
|---|
| 787 | + tmp = etr->pdata->conns[i].child_dev; |
|---|
| 758 | 788 | if (tmp && coresight_is_catu_device(tmp)) |
|---|
| 759 | 789 | return tmp; |
|---|
| 760 | 790 | } |
|---|
| 761 | 791 | |
|---|
| 762 | 792 | return NULL; |
|---|
| 763 | 793 | } |
|---|
| 794 | +EXPORT_SYMBOL_GPL(tmc_etr_get_catu_device); |
|---|
| 764 | 795 | |
|---|
| 765 | 796 | static inline int tmc_etr_enable_catu(struct tmc_drvdata *drvdata, |
|---|
| 766 | 797 | struct etr_buf *etr_buf) |
|---|
| .. | .. |
|---|
| 783 | 814 | static const struct etr_buf_operations *etr_buf_ops[] = { |
|---|
| 784 | 815 | [ETR_MODE_FLAT] = &etr_flat_buf_ops, |
|---|
| 785 | 816 | [ETR_MODE_ETR_SG] = &etr_sg_buf_ops, |
|---|
| 786 | | - [ETR_MODE_CATU] = IS_ENABLED(CONFIG_CORESIGHT_CATU) |
|---|
| 787 | | - ? &etr_catu_buf_ops : NULL, |
|---|
| 817 | + [ETR_MODE_CATU] = NULL, |
|---|
| 788 | 818 | }; |
|---|
| 819 | + |
|---|
| 820 | +void tmc_etr_set_catu_ops(const struct etr_buf_operations *catu) |
|---|
| 821 | +{ |
|---|
| 822 | + etr_buf_ops[ETR_MODE_CATU] = catu; |
|---|
| 823 | +} |
|---|
| 824 | +EXPORT_SYMBOL_GPL(tmc_etr_set_catu_ops); |
|---|
| 825 | + |
|---|
| 826 | +void tmc_etr_remove_catu_ops(void) |
|---|
| 827 | +{ |
|---|
| 828 | + etr_buf_ops[ETR_MODE_CATU] = NULL; |
|---|
| 829 | +} |
|---|
| 830 | +EXPORT_SYMBOL_GPL(tmc_etr_remove_catu_ops); |
|---|
| 789 | 831 | |
|---|
| 790 | 832 | static inline int tmc_etr_mode_alloc_buf(int mode, |
|---|
| 791 | 833 | struct tmc_drvdata *drvdata, |
|---|
| .. | .. |
|---|
| 825 | 867 | bool has_etr_sg, has_iommu; |
|---|
| 826 | 868 | bool has_sg, has_catu; |
|---|
| 827 | 869 | struct etr_buf *etr_buf; |
|---|
| 870 | + struct device *dev = &drvdata->csdev->dev; |
|---|
| 828 | 871 | |
|---|
| 829 | 872 | has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG); |
|---|
| 830 | | - has_iommu = iommu_get_domain_for_dev(drvdata->dev); |
|---|
| 873 | + has_iommu = iommu_get_domain_for_dev(dev->parent); |
|---|
| 831 | 874 | has_catu = !!tmc_etr_get_catu_device(drvdata); |
|---|
| 832 | 875 | |
|---|
| 833 | 876 | has_sg = has_catu || has_etr_sg; |
|---|
| .. | .. |
|---|
| 866 | 909 | } |
|---|
| 867 | 910 | |
|---|
| 868 | 911 | refcount_set(&etr_buf->refcount, 1); |
|---|
| 869 | | - dev_dbg(drvdata->dev, "allocated buffer of size %ldKB in mode %d\n", |
|---|
| 912 | + dev_dbg(dev, "allocated buffer of size %ldKB in mode %d\n", |
|---|
| 870 | 913 | (unsigned long)size >> 10, etr_buf->mode); |
|---|
| 871 | 914 | return etr_buf; |
|---|
| 872 | 915 | } |
|---|
| .. | .. |
|---|
| 1015 | 1058 | rc = tmc_etr_enable_catu(drvdata, etr_buf); |
|---|
| 1016 | 1059 | if (rc) |
|---|
| 1017 | 1060 | return rc; |
|---|
| 1018 | | - rc = coresight_claim_device(drvdata->base); |
|---|
| 1061 | + rc = coresight_claim_device(drvdata->csdev); |
|---|
| 1019 | 1062 | if (!rc) { |
|---|
| 1020 | 1063 | drvdata->etr_buf = etr_buf; |
|---|
| 1021 | 1064 | __tmc_etr_enable_hw(drvdata); |
|---|
| .. | .. |
|---|
| 1104 | 1147 | |
|---|
| 1105 | 1148 | } |
|---|
| 1106 | 1149 | |
|---|
| 1107 | | -static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata) |
|---|
| 1150 | +void tmc_etr_disable_hw(struct tmc_drvdata *drvdata) |
|---|
| 1108 | 1151 | { |
|---|
| 1109 | 1152 | __tmc_etr_disable_hw(drvdata); |
|---|
| 1110 | 1153 | /* Disable CATU device if this ETR is connected to one */ |
|---|
| 1111 | 1154 | tmc_etr_disable_catu(drvdata); |
|---|
| 1112 | | - coresight_disclaim_device(drvdata->base); |
|---|
| 1155 | + coresight_disclaim_device(drvdata->csdev); |
|---|
| 1113 | 1156 | /* Reset the ETR buf used by hardware */ |
|---|
| 1114 | 1157 | drvdata->etr_buf = NULL; |
|---|
| 1115 | 1158 | } |
|---|
| .. | .. |
|---|
| 1181 | 1224 | tmc_etr_free_sysfs_buf(free_buf); |
|---|
| 1182 | 1225 | |
|---|
| 1183 | 1226 | if (!ret) |
|---|
| 1184 | | - dev_dbg(drvdata->dev, "TMC-ETR enabled\n"); |
|---|
| 1227 | + dev_dbg(&csdev->dev, "TMC-ETR enabled\n"); |
|---|
| 1185 | 1228 | |
|---|
| 1186 | 1229 | return ret; |
|---|
| 1187 | 1230 | } |
|---|
| .. | .. |
|---|
| 1362 | 1405 | etr_perf = tmc_etr_setup_perf_buf(drvdata, event, |
|---|
| 1363 | 1406 | nr_pages, pages, snapshot); |
|---|
| 1364 | 1407 | if (IS_ERR(etr_perf)) { |
|---|
| 1365 | | - dev_dbg(drvdata->dev, "Unable to allocate ETR buffer\n"); |
|---|
| 1408 | + dev_dbg(&csdev->dev, "Unable to allocate ETR buffer\n"); |
|---|
| 1366 | 1409 | return NULL; |
|---|
| 1367 | 1410 | } |
|---|
| 1368 | 1411 | |
|---|
| .. | .. |
|---|
| 1527 | 1570 | |
|---|
| 1528 | 1571 | /* Insert barrier packets at the beginning, if there was an overflow */ |
|---|
| 1529 | 1572 | if (lost) |
|---|
| 1530 | | - tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset); |
|---|
| 1573 | + tmc_etr_buf_insert_barrier_packet(etr_buf, offset); |
|---|
| 1531 | 1574 | tmc_etr_sync_perf_buffer(etr_perf, offset, size); |
|---|
| 1532 | 1575 | |
|---|
| 1533 | 1576 | /* |
|---|
| .. | .. |
|---|
| 1538 | 1581 | */ |
|---|
| 1539 | 1582 | if (etr_perf->snapshot) |
|---|
| 1540 | 1583 | handle->head += size; |
|---|
| 1584 | + |
|---|
| 1585 | + /* |
|---|
| 1586 | + * Ensure that the AUX trace data is visible before the aux_head |
|---|
| 1587 | + * is updated via perf_aux_output_end(), as expected by the |
|---|
| 1588 | + * perf ring buffer. |
|---|
| 1589 | + */ |
|---|
| 1590 | + smp_wmb(); |
|---|
| 1591 | + |
|---|
| 1541 | 1592 | out: |
|---|
| 1542 | 1593 | /* |
|---|
| 1543 | 1594 | * Don't set the TRUNCATED flag in snapshot mode because 1) the |
|---|
| .. | .. |
|---|
| 1647 | 1698 | |
|---|
| 1648 | 1699 | spin_unlock_irqrestore(&drvdata->spinlock, flags); |
|---|
| 1649 | 1700 | |
|---|
| 1650 | | - dev_dbg(drvdata->dev, "TMC-ETR disabled\n"); |
|---|
| 1701 | + dev_dbg(&csdev->dev, "TMC-ETR disabled\n"); |
|---|
| 1651 | 1702 | return 0; |
|---|
| 1652 | 1703 | } |
|---|
| 1653 | 1704 | |
|---|