.. | .. |
---|
57 | 57 | }; |
---|
58 | 58 | |
---|
59 | 59 | struct dmtimer_clocksource { |
---|
60 | | - struct clocksource dev; |
---|
| 60 | + struct clocksource_user_mmio mmio; |
---|
61 | 61 | struct dmtimer_systimer t; |
---|
62 | 62 | unsigned int loadval; |
---|
63 | 63 | }; |
---|
.. | .. |
---|
437 | 437 | struct dmtimer_systimer *t = &clkevt->t; |
---|
438 | 438 | |
---|
439 | 439 | writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_stat); |
---|
440 | | - clkevt->dev.event_handler(&clkevt->dev); |
---|
| 440 | + clockevents_handle_event(&clkevt->dev); |
---|
441 | 441 | |
---|
442 | 442 | return IRQ_HANDLED; |
---|
443 | 443 | } |
---|
.. | .. |
---|
548 | 548 | * We mostly use cpuidle_coupled with ARM local timers for runtime, |
---|
549 | 549 | * so there's probably no use for CLOCK_EVT_FEAT_DYNIRQ here. |
---|
550 | 550 | */ |
---|
551 | | - dev->features = features; |
---|
| 551 | + dev->features = features | CLOCK_EVT_FEAT_PIPELINE; |
---|
552 | 552 | dev->rating = rating; |
---|
553 | 553 | dev->set_next_event = dmtimer_set_next_event; |
---|
554 | 554 | dev->set_state_shutdown = dmtimer_clockevent_shutdown; |
---|
.. | .. |
---|
706 | 706 | static struct dmtimer_clocksource * |
---|
707 | 707 | to_dmtimer_clocksource(struct clocksource *cs) |
---|
708 | 708 | { |
---|
709 | | - return container_of(cs, struct dmtimer_clocksource, dev); |
---|
710 | | -} |
---|
711 | | - |
---|
712 | | -static u64 dmtimer_clocksource_read_cycles(struct clocksource *cs) |
---|
713 | | -{ |
---|
714 | | - struct dmtimer_clocksource *clksrc = to_dmtimer_clocksource(cs); |
---|
715 | | - struct dmtimer_systimer *t = &clksrc->t; |
---|
716 | | - |
---|
717 | | - return (u64)readl_relaxed(t->base + t->counter); |
---|
| 709 | + return container_of(cs, struct dmtimer_clocksource, mmio.mmio.clksrc); |
---|
718 | 710 | } |
---|
719 | 711 | |
---|
720 | 712 | static void __iomem *dmtimer_sched_clock_counter; |
---|
.. | .. |
---|
753 | 745 | static int __init dmtimer_clocksource_init(struct device_node *np) |
---|
754 | 746 | { |
---|
755 | 747 | struct dmtimer_clocksource *clksrc; |
---|
| 748 | + struct clocksource_mmio_regs mmr; |
---|
756 | 749 | struct dmtimer_systimer *t; |
---|
757 | 750 | struct clocksource *dev; |
---|
758 | 751 | int error; |
---|
.. | .. |
---|
761 | 754 | if (!clksrc) |
---|
762 | 755 | return -ENOMEM; |
---|
763 | 756 | |
---|
764 | | - dev = &clksrc->dev; |
---|
| 757 | + dev = &clksrc->mmio.mmio.clksrc; |
---|
765 | 758 | t = &clksrc->t; |
---|
766 | 759 | |
---|
767 | 760 | error = dmtimer_systimer_setup(np, t); |
---|
.. | .. |
---|
770 | 763 | |
---|
771 | 764 | dev->name = "dmtimer"; |
---|
772 | 765 | dev->rating = 300; |
---|
773 | | - dev->read = dmtimer_clocksource_read_cycles; |
---|
| 766 | + dev->read = clocksource_mmio_readl_up, |
---|
774 | 767 | dev->mask = CLOCKSOURCE_MASK(32); |
---|
775 | 768 | dev->flags = CLOCK_SOURCE_IS_CONTINUOUS; |
---|
776 | 769 | |
---|
.. | .. |
---|
793 | 786 | sched_clock_register(dmtimer_read_sched_clock, 32, t->rate); |
---|
794 | 787 | } |
---|
795 | 788 | |
---|
796 | | - if (clocksource_register_hz(dev, t->rate)) |
---|
| 789 | + mmr.reg_lower = t->base + t->counter; |
---|
| 790 | + mmr.bits_lower = 32; |
---|
| 791 | + mmr.reg_upper = 0; |
---|
| 792 | + mmr.bits_upper = 0; |
---|
| 793 | + mmr.revmap = NULL; |
---|
| 794 | + |
---|
| 795 | + if (clocksource_user_mmio_init(&clksrc->mmio, &mmr, t->rate)) |
---|
797 | 796 | pr_err("Could not register clocksource %pOF\n", np); |
---|
798 | 797 | |
---|
799 | 798 | return 0; |
---|