.. | .. |
---|
89 | 89 | |
---|
90 | 90 | static u64 read_internal_timer(const struct cyclecounter *cc) |
---|
91 | 91 | { |
---|
92 | | - struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles); |
---|
| 92 | + struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles); |
---|
| 93 | + struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer); |
---|
93 | 94 | struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, |
---|
94 | 95 | clock); |
---|
95 | 96 | |
---|
.. | .. |
---|
100 | 101 | { |
---|
101 | 102 | struct mlx5_ib_clock_info *clock_info = mdev->clock_info; |
---|
102 | 103 | struct mlx5_clock *clock = &mdev->clock; |
---|
| 104 | + struct mlx5_timer *timer; |
---|
103 | 105 | u32 sign; |
---|
104 | 106 | |
---|
105 | 107 | if (!clock_info) |
---|
.. | .. |
---|
109 | 111 | smp_store_mb(clock_info->sign, |
---|
110 | 112 | sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING); |
---|
111 | 113 | |
---|
112 | | - clock_info->cycles = clock->tc.cycle_last; |
---|
113 | | - clock_info->mult = clock->cycles.mult; |
---|
114 | | - clock_info->nsec = clock->tc.nsec; |
---|
115 | | - clock_info->frac = clock->tc.frac; |
---|
| 114 | + timer = &clock->timer; |
---|
| 115 | + clock_info->cycles = timer->tc.cycle_last; |
---|
| 116 | + clock_info->mult = timer->cycles.mult; |
---|
| 117 | + clock_info->nsec = timer->tc.nsec; |
---|
| 118 | + clock_info->frac = timer->tc.frac; |
---|
116 | 119 | |
---|
117 | 120 | smp_store_release(&clock_info->sign, |
---|
118 | 121 | sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2); |
---|
.. | .. |
---|
151 | 154 | { |
---|
152 | 155 | struct delayed_work *dwork = to_delayed_work(work); |
---|
153 | 156 | struct mlx5_core_dev *mdev; |
---|
| 157 | + struct mlx5_timer *timer; |
---|
154 | 158 | struct mlx5_clock *clock; |
---|
155 | 159 | unsigned long flags; |
---|
156 | 160 | |
---|
157 | | - clock = container_of(dwork, struct mlx5_clock, overflow_work); |
---|
| 161 | + timer = container_of(dwork, struct mlx5_timer, overflow_work); |
---|
| 162 | + clock = container_of(timer, struct mlx5_clock, timer); |
---|
158 | 163 | mdev = container_of(clock, struct mlx5_core_dev, clock); |
---|
| 164 | + |
---|
| 165 | + if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) |
---|
| 166 | + goto out; |
---|
| 167 | + |
---|
159 | 168 | write_seqlock_irqsave(&clock->lock, flags); |
---|
160 | | - timecounter_read(&clock->tc); |
---|
| 169 | + timecounter_read(&timer->tc); |
---|
161 | 170 | mlx5_update_clock_info_page(mdev); |
---|
162 | 171 | write_sequnlock_irqrestore(&clock->lock, flags); |
---|
163 | | - schedule_delayed_work(&clock->overflow_work, clock->overflow_period); |
---|
| 172 | + |
---|
| 173 | +out: |
---|
| 174 | + schedule_delayed_work(&timer->overflow_work, timer->overflow_period); |
---|
164 | 175 | } |
---|
165 | 176 | |
---|
166 | 177 | static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts) |
---|
167 | 178 | { |
---|
168 | 179 | struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); |
---|
| 180 | + struct mlx5_timer *timer = &clock->timer; |
---|
169 | 181 | u64 ns = timespec64_to_ns(ts); |
---|
170 | 182 | struct mlx5_core_dev *mdev; |
---|
171 | 183 | unsigned long flags; |
---|
172 | 184 | |
---|
173 | 185 | mdev = container_of(clock, struct mlx5_core_dev, clock); |
---|
174 | 186 | write_seqlock_irqsave(&clock->lock, flags); |
---|
175 | | - timecounter_init(&clock->tc, &clock->cycles, ns); |
---|
| 187 | + timecounter_init(&timer->tc, &timer->cycles, ns); |
---|
176 | 188 | mlx5_update_clock_info_page(mdev); |
---|
177 | 189 | write_sequnlock_irqrestore(&clock->lock, flags); |
---|
178 | 190 | |
---|
.. | .. |
---|
183 | 195 | struct ptp_system_timestamp *sts) |
---|
184 | 196 | { |
---|
185 | 197 | struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); |
---|
| 198 | + struct mlx5_timer *timer = &clock->timer; |
---|
186 | 199 | struct mlx5_core_dev *mdev; |
---|
187 | 200 | unsigned long flags; |
---|
188 | 201 | u64 cycles, ns; |
---|
.. | .. |
---|
190 | 203 | mdev = container_of(clock, struct mlx5_core_dev, clock); |
---|
191 | 204 | write_seqlock_irqsave(&clock->lock, flags); |
---|
192 | 205 | cycles = mlx5_read_internal_timer(mdev, sts); |
---|
193 | | - ns = timecounter_cyc2time(&clock->tc, cycles); |
---|
| 206 | + ns = timecounter_cyc2time(&timer->tc, cycles); |
---|
194 | 207 | write_sequnlock_irqrestore(&clock->lock, flags); |
---|
195 | 208 | |
---|
196 | 209 | *ts = ns_to_timespec64(ns); |
---|
.. | .. |
---|
201 | 214 | static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) |
---|
202 | 215 | { |
---|
203 | 216 | struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); |
---|
| 217 | + struct mlx5_timer *timer = &clock->timer; |
---|
204 | 218 | struct mlx5_core_dev *mdev; |
---|
205 | 219 | unsigned long flags; |
---|
206 | 220 | |
---|
207 | 221 | mdev = container_of(clock, struct mlx5_core_dev, clock); |
---|
208 | 222 | write_seqlock_irqsave(&clock->lock, flags); |
---|
209 | | - timecounter_adjtime(&clock->tc, delta); |
---|
| 223 | + timecounter_adjtime(&timer->tc, delta); |
---|
210 | 224 | mlx5_update_clock_info_page(mdev); |
---|
211 | 225 | write_sequnlock_irqrestore(&clock->lock, flags); |
---|
212 | 226 | |
---|
.. | .. |
---|
216 | 230 | static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) |
---|
217 | 231 | { |
---|
218 | 232 | struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); |
---|
| 233 | + struct mlx5_timer *timer = &clock->timer; |
---|
219 | 234 | struct mlx5_core_dev *mdev; |
---|
220 | 235 | unsigned long flags; |
---|
221 | 236 | int neg_adj = 0; |
---|
222 | 237 | u32 diff; |
---|
223 | 238 | u64 adj; |
---|
224 | 239 | |
---|
225 | | - |
---|
226 | 240 | if (delta < 0) { |
---|
227 | 241 | neg_adj = 1; |
---|
228 | 242 | delta = -delta; |
---|
229 | 243 | } |
---|
230 | 244 | |
---|
231 | | - adj = clock->nominal_c_mult; |
---|
| 245 | + adj = timer->nominal_c_mult; |
---|
232 | 246 | adj *= delta; |
---|
233 | 247 | diff = div_u64(adj, 1000000000ULL); |
---|
234 | 248 | |
---|
235 | 249 | mdev = container_of(clock, struct mlx5_core_dev, clock); |
---|
236 | 250 | write_seqlock_irqsave(&clock->lock, flags); |
---|
237 | | - timecounter_read(&clock->tc); |
---|
238 | | - clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff : |
---|
239 | | - clock->nominal_c_mult + diff; |
---|
| 251 | + timecounter_read(&timer->tc); |
---|
| 252 | + timer->cycles.mult = neg_adj ? timer->nominal_c_mult - diff : |
---|
| 253 | + timer->nominal_c_mult + diff; |
---|
240 | 254 | mlx5_update_clock_info_page(mdev); |
---|
241 | 255 | write_sequnlock_irqrestore(&clock->lock, flags); |
---|
242 | 256 | |
---|
.. | .. |
---|
313 | 327 | container_of(ptp, struct mlx5_clock, ptp_info); |
---|
314 | 328 | struct mlx5_core_dev *mdev = |
---|
315 | 329 | container_of(clock, struct mlx5_core_dev, clock); |
---|
| 330 | + struct mlx5_timer *timer = &clock->timer; |
---|
316 | 331 | u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; |
---|
317 | 332 | u64 nsec_now, nsec_delta, time_stamp = 0; |
---|
318 | 333 | u64 cycles_now, cycles_delta; |
---|
.. | .. |
---|
355 | 370 | ns = timespec64_to_ns(&ts); |
---|
356 | 371 | cycles_now = mlx5_read_internal_timer(mdev, NULL); |
---|
357 | 372 | write_seqlock_irqsave(&clock->lock, flags); |
---|
358 | | - nsec_now = timecounter_cyc2time(&clock->tc, cycles_now); |
---|
| 373 | + nsec_now = timecounter_cyc2time(&timer->tc, cycles_now); |
---|
359 | 374 | nsec_delta = ns - nsec_now; |
---|
360 | | - cycles_delta = div64_u64(nsec_delta << clock->cycles.shift, |
---|
361 | | - clock->cycles.mult); |
---|
| 375 | + cycles_delta = div64_u64(nsec_delta << timer->cycles.shift, |
---|
| 376 | + timer->cycles.mult); |
---|
362 | 377 | write_sequnlock_irqrestore(&clock->lock, flags); |
---|
363 | 378 | time_stamp = cycles_now + cycles_delta; |
---|
364 | 379 | field_select = MLX5_MTPPS_FS_PIN_MODE | |
---|
.. | .. |
---|
440 | 455 | static const struct ptp_clock_info mlx5_ptp_clock_info = { |
---|
441 | 456 | .owner = THIS_MODULE, |
---|
442 | 457 | .name = "mlx5_ptp", |
---|
443 | | - .max_adj = 100000000, |
---|
| 458 | + .max_adj = 50000000, |
---|
444 | 459 | .n_alarm = 0, |
---|
445 | 460 | .n_ext_ts = 0, |
---|
446 | 461 | .n_per_out = 0, |
---|
.. | .. |
---|
541 | 556 | unsigned long type, void *data) |
---|
542 | 557 | { |
---|
543 | 558 | struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb); |
---|
| 559 | + struct mlx5_timer *timer = &clock->timer; |
---|
544 | 560 | struct ptp_clock_event ptp_event; |
---|
545 | 561 | u64 cycles_now, cycles_delta; |
---|
546 | 562 | u64 nsec_now, nsec_delta, ns; |
---|
.. | .. |
---|
575 | 591 | ts.tv_nsec = 0; |
---|
576 | 592 | ns = timespec64_to_ns(&ts); |
---|
577 | 593 | write_seqlock_irqsave(&clock->lock, flags); |
---|
578 | | - nsec_now = timecounter_cyc2time(&clock->tc, cycles_now); |
---|
| 594 | + nsec_now = timecounter_cyc2time(&timer->tc, cycles_now); |
---|
579 | 595 | nsec_delta = ns - nsec_now; |
---|
580 | | - cycles_delta = div64_u64(nsec_delta << clock->cycles.shift, |
---|
581 | | - clock->cycles.mult); |
---|
| 596 | + cycles_delta = div64_u64(nsec_delta << timer->cycles.shift, |
---|
| 597 | + timer->cycles.mult); |
---|
582 | 598 | clock->pps_info.start[pin] = cycles_now + cycles_delta; |
---|
583 | 599 | write_sequnlock_irqrestore(&clock->lock, flags); |
---|
584 | 600 | schedule_work(&clock->pps_info.out_work); |
---|
.. | .. |
---|
591 | 607 | return NOTIFY_OK; |
---|
592 | 608 | } |
---|
593 | 609 | |
---|
594 | | -void mlx5_init_clock(struct mlx5_core_dev *mdev) |
---|
| 610 | +static void mlx5_timecounter_init(struct mlx5_core_dev *mdev) |
---|
595 | 611 | { |
---|
596 | 612 | struct mlx5_clock *clock = &mdev->clock; |
---|
597 | | - u64 overflow_cycles; |
---|
598 | | - u64 ns; |
---|
599 | | - u64 frac = 0; |
---|
| 613 | + struct mlx5_timer *timer = &clock->timer; |
---|
600 | 614 | u32 dev_freq; |
---|
601 | 615 | |
---|
602 | 616 | dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz); |
---|
603 | | - if (!dev_freq) { |
---|
604 | | - mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n"); |
---|
605 | | - return; |
---|
606 | | - } |
---|
607 | | - seqlock_init(&clock->lock); |
---|
608 | | - clock->cycles.read = read_internal_timer; |
---|
609 | | - clock->cycles.shift = MLX5_CYCLES_SHIFT; |
---|
610 | | - clock->cycles.mult = clocksource_khz2mult(dev_freq, |
---|
611 | | - clock->cycles.shift); |
---|
612 | | - clock->nominal_c_mult = clock->cycles.mult; |
---|
613 | | - clock->cycles.mask = CLOCKSOURCE_MASK(41); |
---|
| 617 | + timer->cycles.read = read_internal_timer; |
---|
| 618 | + timer->cycles.shift = MLX5_CYCLES_SHIFT; |
---|
| 619 | + timer->cycles.mult = clocksource_khz2mult(dev_freq, |
---|
| 620 | + timer->cycles.shift); |
---|
| 621 | + timer->nominal_c_mult = timer->cycles.mult; |
---|
| 622 | + timer->cycles.mask = CLOCKSOURCE_MASK(41); |
---|
614 | 623 | |
---|
615 | | - timecounter_init(&clock->tc, &clock->cycles, |
---|
| 624 | + timecounter_init(&timer->tc, &timer->cycles, |
---|
616 | 625 | ktime_to_ns(ktime_get_real())); |
---|
| 626 | +} |
---|
| 627 | + |
---|
| 628 | +static void mlx5_init_overflow_period(struct mlx5_clock *clock) |
---|
| 629 | +{ |
---|
| 630 | + struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock); |
---|
| 631 | + struct mlx5_ib_clock_info *clock_info = mdev->clock_info; |
---|
| 632 | + struct mlx5_timer *timer = &clock->timer; |
---|
| 633 | + u64 overflow_cycles; |
---|
| 634 | + u64 frac = 0; |
---|
| 635 | + u64 ns; |
---|
617 | 636 | |
---|
618 | 637 | /* Calculate period in seconds to call the overflow watchdog - to make |
---|
619 | 638 | * sure counter is checked at least twice every wrap around. |
---|
.. | .. |
---|
622 | 641 | * multiplied by clock multiplier where the result doesn't exceed |
---|
623 | 642 | * 64bits. |
---|
624 | 643 | */ |
---|
625 | | - overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult); |
---|
626 | | - overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3)); |
---|
| 644 | + overflow_cycles = div64_u64(~0ULL >> 1, timer->cycles.mult); |
---|
| 645 | + overflow_cycles = min(overflow_cycles, div_u64(timer->cycles.mask, 3)); |
---|
627 | 646 | |
---|
628 | | - ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles, |
---|
| 647 | + ns = cyclecounter_cyc2ns(&timer->cycles, overflow_cycles, |
---|
629 | 648 | frac, &frac); |
---|
630 | 649 | do_div(ns, NSEC_PER_SEC / HZ); |
---|
631 | | - clock->overflow_period = ns; |
---|
| 650 | + timer->overflow_period = ns; |
---|
632 | 651 | |
---|
633 | | - mdev->clock_info = |
---|
634 | | - (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL); |
---|
635 | | - if (mdev->clock_info) { |
---|
636 | | - mdev->clock_info->nsec = clock->tc.nsec; |
---|
637 | | - mdev->clock_info->cycles = clock->tc.cycle_last; |
---|
638 | | - mdev->clock_info->mask = clock->cycles.mask; |
---|
639 | | - mdev->clock_info->mult = clock->nominal_c_mult; |
---|
640 | | - mdev->clock_info->shift = clock->cycles.shift; |
---|
641 | | - mdev->clock_info->frac = clock->tc.frac; |
---|
642 | | - mdev->clock_info->overflow_period = clock->overflow_period; |
---|
| 652 | + INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow); |
---|
| 653 | + if (timer->overflow_period) |
---|
| 654 | + schedule_delayed_work(&timer->overflow_work, 0); |
---|
| 655 | + else |
---|
| 656 | + mlx5_core_warn(mdev, |
---|
| 657 | + "invalid overflow period, overflow_work is not scheduled\n"); |
---|
| 658 | + |
---|
| 659 | + if (clock_info) |
---|
| 660 | + clock_info->overflow_period = timer->overflow_period; |
---|
| 661 | +} |
---|
| 662 | + |
---|
| 663 | +static void mlx5_init_clock_info(struct mlx5_core_dev *mdev) |
---|
| 664 | +{ |
---|
| 665 | + struct mlx5_clock *clock = &mdev->clock; |
---|
| 666 | + struct mlx5_ib_clock_info *info; |
---|
| 667 | + struct mlx5_timer *timer; |
---|
| 668 | + |
---|
| 669 | + mdev->clock_info = (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL); |
---|
| 670 | + if (!mdev->clock_info) { |
---|
| 671 | + mlx5_core_warn(mdev, "Failed to allocate IB clock info page\n"); |
---|
| 672 | + return; |
---|
643 | 673 | } |
---|
644 | 674 | |
---|
| 675 | + info = mdev->clock_info; |
---|
| 676 | + timer = &clock->timer; |
---|
| 677 | + |
---|
| 678 | + info->nsec = timer->tc.nsec; |
---|
| 679 | + info->cycles = timer->tc.cycle_last; |
---|
| 680 | + info->mask = timer->cycles.mask; |
---|
| 681 | + info->mult = timer->nominal_c_mult; |
---|
| 682 | + info->shift = timer->cycles.shift; |
---|
| 683 | + info->frac = timer->tc.frac; |
---|
| 684 | +} |
---|
| 685 | + |
---|
| 686 | +void mlx5_init_clock(struct mlx5_core_dev *mdev) |
---|
| 687 | +{ |
---|
| 688 | + struct mlx5_clock *clock = &mdev->clock; |
---|
| 689 | + |
---|
| 690 | + if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) { |
---|
| 691 | + mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n"); |
---|
| 692 | + return; |
---|
| 693 | + } |
---|
| 694 | + |
---|
| 695 | + seqlock_init(&clock->lock); |
---|
| 696 | + |
---|
| 697 | + mlx5_timecounter_init(mdev); |
---|
| 698 | + mlx5_init_clock_info(mdev); |
---|
| 699 | + mlx5_init_overflow_period(clock); |
---|
645 | 700 | INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out); |
---|
646 | | - INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow); |
---|
647 | | - if (clock->overflow_period) |
---|
648 | | - schedule_delayed_work(&clock->overflow_work, 0); |
---|
649 | | - else |
---|
650 | | - mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n"); |
---|
651 | 701 | |
---|
652 | 702 | /* Configure the PHC */ |
---|
653 | 703 | clock->ptp_info = mlx5_ptp_clock_info; |
---|
.. | .. |
---|
684 | 734 | } |
---|
685 | 735 | |
---|
686 | 736 | cancel_work_sync(&clock->pps_info.out_work); |
---|
687 | | - cancel_delayed_work_sync(&clock->overflow_work); |
---|
| 737 | + cancel_delayed_work_sync(&clock->timer.overflow_work); |
---|
688 | 738 | |
---|
689 | 739 | if (mdev->clock_info) { |
---|
690 | 740 | free_page((unsigned long)mdev->clock_info); |
---|