forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
....@@ -32,7 +32,9 @@
3232
3333 #include <linux/clocksource.h>
3434 #include <linux/highmem.h>
35
+#include <linux/ptp_clock_kernel.h>
3536 #include <rdma/mlx5-abi.h>
37
+#include "lib/eq.h"
3638 #include "en.h"
3739 #include "clock.h"
3840
....@@ -65,19 +67,41 @@
6567 MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
6668 };
6769
70
+static u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
71
+ struct ptp_system_timestamp *sts)
72
+{
73
+ u32 timer_h, timer_h1, timer_l;
74
+
75
+ timer_h = ioread32be(&dev->iseg->internal_timer_h);
76
+ ptp_read_system_prets(sts);
77
+ timer_l = ioread32be(&dev->iseg->internal_timer_l);
78
+ ptp_read_system_postts(sts);
79
+ timer_h1 = ioread32be(&dev->iseg->internal_timer_h);
80
+ if (timer_h != timer_h1) {
81
+ /* wrap around */
82
+ ptp_read_system_prets(sts);
83
+ timer_l = ioread32be(&dev->iseg->internal_timer_l);
84
+ ptp_read_system_postts(sts);
85
+ }
86
+
87
+ return (u64)timer_l | (u64)timer_h1 << 32;
88
+}
89
+
6890 static u64 read_internal_timer(const struct cyclecounter *cc)
6991 {
70
- struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles);
92
+ struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles);
93
+ struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer);
7194 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
7295 clock);
7396
74
- return mlx5_read_internal_timer(mdev) & cc->mask;
97
+ return mlx5_read_internal_timer(mdev, NULL) & cc->mask;
7598 }
7699
77100 static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
78101 {
79102 struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
80103 struct mlx5_clock *clock = &mdev->clock;
104
+ struct mlx5_timer *timer;
81105 u32 sign;
82106
83107 if (!clock_info)
....@@ -87,10 +111,11 @@
87111 smp_store_mb(clock_info->sign,
88112 sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
89113
90
- clock_info->cycles = clock->tc.cycle_last;
91
- clock_info->mult = clock->cycles.mult;
92
- clock_info->nsec = clock->tc.nsec;
93
- clock_info->frac = clock->tc.frac;
114
+ timer = &clock->timer;
115
+ clock_info->cycles = timer->tc.cycle_last;
116
+ clock_info->mult = timer->cycles.mult;
117
+ clock_info->nsec = timer->tc.nsec;
118
+ clock_info->frac = timer->tc.frac;
94119
95120 smp_store_release(&clock_info->sign,
96121 sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
....@@ -111,10 +136,10 @@
111136 for (i = 0; i < clock->ptp_info.n_pins; i++) {
112137 u64 tstart;
113138
114
- write_lock_irqsave(&clock->lock, flags);
139
+ write_seqlock_irqsave(&clock->lock, flags);
115140 tstart = clock->pps_info.start[i];
116141 clock->pps_info.start[i] = 0;
117
- write_unlock_irqrestore(&clock->lock, flags);
142
+ write_sequnlock_irqrestore(&clock->lock, flags);
118143 if (!tstart)
119144 continue;
120145
....@@ -128,43 +153,58 @@
128153 static void mlx5_timestamp_overflow(struct work_struct *work)
129154 {
130155 struct delayed_work *dwork = to_delayed_work(work);
131
- struct mlx5_clock *clock = container_of(dwork, struct mlx5_clock,
132
- overflow_work);
156
+ struct mlx5_core_dev *mdev;
157
+ struct mlx5_timer *timer;
158
+ struct mlx5_clock *clock;
133159 unsigned long flags;
134160
135
- write_lock_irqsave(&clock->lock, flags);
136
- timecounter_read(&clock->tc);
137
- mlx5_update_clock_info_page(clock->mdev);
138
- write_unlock_irqrestore(&clock->lock, flags);
139
- schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
161
+ timer = container_of(dwork, struct mlx5_timer, overflow_work);
162
+ clock = container_of(timer, struct mlx5_clock, timer);
163
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
164
+
165
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
166
+ goto out;
167
+
168
+ write_seqlock_irqsave(&clock->lock, flags);
169
+ timecounter_read(&timer->tc);
170
+ mlx5_update_clock_info_page(mdev);
171
+ write_sequnlock_irqrestore(&clock->lock, flags);
172
+
173
+out:
174
+ schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
140175 }
141176
142
-static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
143
- const struct timespec64 *ts)
177
+static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
144178 {
145
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
146
- ptp_info);
179
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
180
+ struct mlx5_timer *timer = &clock->timer;
147181 u64 ns = timespec64_to_ns(ts);
182
+ struct mlx5_core_dev *mdev;
148183 unsigned long flags;
149184
150
- write_lock_irqsave(&clock->lock, flags);
151
- timecounter_init(&clock->tc, &clock->cycles, ns);
152
- mlx5_update_clock_info_page(clock->mdev);
153
- write_unlock_irqrestore(&clock->lock, flags);
185
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
186
+ write_seqlock_irqsave(&clock->lock, flags);
187
+ timecounter_init(&timer->tc, &timer->cycles, ns);
188
+ mlx5_update_clock_info_page(mdev);
189
+ write_sequnlock_irqrestore(&clock->lock, flags);
154190
155191 return 0;
156192 }
157193
158
-static int mlx5_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
194
+static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
195
+ struct ptp_system_timestamp *sts)
159196 {
160
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
161
- ptp_info);
162
- u64 ns;
197
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
198
+ struct mlx5_timer *timer = &clock->timer;
199
+ struct mlx5_core_dev *mdev;
163200 unsigned long flags;
201
+ u64 cycles, ns;
164202
165
- write_lock_irqsave(&clock->lock, flags);
166
- ns = timecounter_read(&clock->tc);
167
- write_unlock_irqrestore(&clock->lock, flags);
203
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
204
+ write_seqlock_irqsave(&clock->lock, flags);
205
+ cycles = mlx5_read_internal_timer(mdev, sts);
206
+ ns = timecounter_cyc2time(&timer->tc, cycles);
207
+ write_sequnlock_irqrestore(&clock->lock, flags);
168208
169209 *ts = ns_to_timespec64(ns);
170210
....@@ -173,42 +213,46 @@
173213
174214 static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
175215 {
176
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
177
- ptp_info);
216
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
217
+ struct mlx5_timer *timer = &clock->timer;
218
+ struct mlx5_core_dev *mdev;
178219 unsigned long flags;
179220
180
- write_lock_irqsave(&clock->lock, flags);
181
- timecounter_adjtime(&clock->tc, delta);
182
- mlx5_update_clock_info_page(clock->mdev);
183
- write_unlock_irqrestore(&clock->lock, flags);
221
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
222
+ write_seqlock_irqsave(&clock->lock, flags);
223
+ timecounter_adjtime(&timer->tc, delta);
224
+ mlx5_update_clock_info_page(mdev);
225
+ write_sequnlock_irqrestore(&clock->lock, flags);
184226
185227 return 0;
186228 }
187229
188230 static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
189231 {
190
- u64 adj;
191
- u32 diff;
232
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
233
+ struct mlx5_timer *timer = &clock->timer;
234
+ struct mlx5_core_dev *mdev;
192235 unsigned long flags;
193236 int neg_adj = 0;
194
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
195
- ptp_info);
237
+ u32 diff;
238
+ u64 adj;
196239
197240 if (delta < 0) {
198241 neg_adj = 1;
199242 delta = -delta;
200243 }
201244
202
- adj = clock->nominal_c_mult;
245
+ adj = timer->nominal_c_mult;
203246 adj *= delta;
204247 diff = div_u64(adj, 1000000000ULL);
205248
206
- write_lock_irqsave(&clock->lock, flags);
207
- timecounter_read(&clock->tc);
208
- clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
209
- clock->nominal_c_mult + diff;
210
- mlx5_update_clock_info_page(clock->mdev);
211
- write_unlock_irqrestore(&clock->lock, flags);
249
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
250
+ write_seqlock_irqsave(&clock->lock, flags);
251
+ timecounter_read(&timer->tc);
252
+ timer->cycles.mult = neg_adj ? timer->nominal_c_mult - diff :
253
+ timer->nominal_c_mult + diff;
254
+ mlx5_update_clock_info_page(mdev);
255
+ write_sequnlock_irqrestore(&clock->lock, flags);
212256
213257 return 0;
214258 }
....@@ -231,20 +275,33 @@
231275 if (!MLX5_PPS_CAP(mdev))
232276 return -EOPNOTSUPP;
233277
278
+ /* Reject requests with unsupported flags */
279
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
280
+ PTP_RISING_EDGE |
281
+ PTP_FALLING_EDGE |
282
+ PTP_STRICT_FLAGS))
283
+ return -EOPNOTSUPP;
284
+
285
+ /* Reject requests to enable time stamping on both edges. */
286
+ if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
287
+ (rq->extts.flags & PTP_ENABLE_FEATURE) &&
288
+ (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
289
+ return -EOPNOTSUPP;
290
+
234291 if (rq->extts.index >= clock->ptp_info.n_pins)
235292 return -EINVAL;
236293
294
+ pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
295
+ if (pin < 0)
296
+ return -EBUSY;
297
+
237298 if (on) {
238
- pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
239
- if (pin < 0)
240
- return -EBUSY;
241299 pin_mode = MLX5_PIN_MODE_IN;
242300 pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
243301 field_select = MLX5_MTPPS_FS_PIN_MODE |
244302 MLX5_MTPPS_FS_PATTERN |
245303 MLX5_MTPPS_FS_ENABLE;
246304 } else {
247
- pin = rq->extts.index;
248305 field_select = MLX5_MTPPS_FS_ENABLE;
249306 }
250307
....@@ -270,6 +327,7 @@
270327 container_of(ptp, struct mlx5_clock, ptp_info);
271328 struct mlx5_core_dev *mdev =
272329 container_of(clock, struct mlx5_core_dev, clock);
330
+ struct mlx5_timer *timer = &clock->timer;
273331 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
274332 u64 nsec_now, nsec_delta, time_stamp = 0;
275333 u64 cycles_now, cycles_delta;
....@@ -285,15 +343,19 @@
285343 if (!MLX5_PPS_CAP(mdev))
286344 return -EOPNOTSUPP;
287345
346
+ /* Reject requests with unsupported flags */
347
+ if (rq->perout.flags)
348
+ return -EOPNOTSUPP;
349
+
288350 if (rq->perout.index >= clock->ptp_info.n_pins)
289351 return -EINVAL;
290352
291
- if (on) {
292
- pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
293
- rq->perout.index);
294
- if (pin < 0)
295
- return -EBUSY;
353
+ pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
354
+ rq->perout.index);
355
+ if (pin < 0)
356
+ return -EBUSY;
296357
358
+ if (on) {
297359 pin_mode = MLX5_PIN_MODE_OUT;
298360 pattern = MLX5_OUT_PATTERN_PERIODIC;
299361 ts.tv_sec = rq->perout.period.sec;
....@@ -306,20 +368,19 @@
306368 ts.tv_sec = rq->perout.start.sec;
307369 ts.tv_nsec = rq->perout.start.nsec;
308370 ns = timespec64_to_ns(&ts);
309
- cycles_now = mlx5_read_internal_timer(mdev);
310
- write_lock_irqsave(&clock->lock, flags);
311
- nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
371
+ cycles_now = mlx5_read_internal_timer(mdev, NULL);
372
+ write_seqlock_irqsave(&clock->lock, flags);
373
+ nsec_now = timecounter_cyc2time(&timer->tc, cycles_now);
312374 nsec_delta = ns - nsec_now;
313
- cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
314
- clock->cycles.mult);
315
- write_unlock_irqrestore(&clock->lock, flags);
375
+ cycles_delta = div64_u64(nsec_delta << timer->cycles.shift,
376
+ timer->cycles.mult);
377
+ write_sequnlock_irqrestore(&clock->lock, flags);
316378 time_stamp = cycles_now + cycles_delta;
317379 field_select = MLX5_MTPPS_FS_PIN_MODE |
318380 MLX5_MTPPS_FS_PATTERN |
319381 MLX5_MTPPS_FS_ENABLE |
320382 MLX5_MTPPS_FS_TIME_STAMP;
321383 } else {
322
- pin = rq->perout.index;
323384 field_select = MLX5_MTPPS_FS_ENABLE;
324385 }
325386
....@@ -389,14 +450,12 @@
389450 default:
390451 return -EOPNOTSUPP;
391452 }
392
-
393
- return -EOPNOTSUPP;
394453 }
395454
396455 static const struct ptp_clock_info mlx5_ptp_clock_info = {
397456 .owner = THIS_MODULE,
398
- .name = "mlx5_p2p",
399
- .max_adj = 100000000,
457
+ .name = "mlx5_ptp",
458
+ .max_adj = 50000000,
400459 .n_alarm = 0,
401460 .n_ext_ts = 0,
402461 .n_per_out = 0,
....@@ -404,11 +463,44 @@
404463 .pps = 0,
405464 .adjfreq = mlx5_ptp_adjfreq,
406465 .adjtime = mlx5_ptp_adjtime,
407
- .gettime64 = mlx5_ptp_gettime,
466
+ .gettimex64 = mlx5_ptp_gettimex,
408467 .settime64 = mlx5_ptp_settime,
409468 .enable = NULL,
410469 .verify = NULL,
411470 };
471
+
472
+static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
473
+ u32 *mtpps, u32 mtpps_size)
474
+{
475
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {};
476
+
477
+ MLX5_SET(mtpps_reg, in, pin, pin);
478
+
479
+ return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
480
+ mtpps_size, MLX5_REG_MTPPS, 0, 0);
481
+}
482
+
483
+static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
484
+{
485
+ struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
486
+
487
+ u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
488
+ u8 mode;
489
+ int err;
490
+
491
+ err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out));
492
+ if (err || !MLX5_GET(mtpps_reg, out, enable))
493
+ return PTP_PF_NONE;
494
+
495
+ mode = MLX5_GET(mtpps_reg, out, pin_mode);
496
+
497
+ if (mode == MLX5_PIN_MODE_IN)
498
+ return PTP_PF_EXTTS;
499
+ else if (mode == MLX5_PIN_MODE_OUT)
500
+ return PTP_PF_PEROUT;
501
+
502
+ return PTP_PF_NONE;
503
+}
412504
413505 static int mlx5_init_pin_config(struct mlx5_clock *clock)
414506 {
....@@ -429,8 +521,8 @@
429521 sizeof(clock->ptp_info.pin_config[i].name),
430522 "mlx5_pps%d", i);
431523 clock->ptp_info.pin_config[i].index = i;
432
- clock->ptp_info.pin_config[i].func = PTP_PF_NONE;
433
- clock->ptp_info.pin_config[i].chan = i;
524
+ clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
525
+ clock->ptp_info.pin_config[i].chan = 0;
434526 }
435527
436528 return 0;
....@@ -460,17 +552,21 @@
460552 clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
461553 }
462554
463
-void mlx5_pps_event(struct mlx5_core_dev *mdev,
464
- struct mlx5_eqe *eqe)
555
+static int mlx5_pps_event(struct notifier_block *nb,
556
+ unsigned long type, void *data)
465557 {
466
- struct mlx5_clock *clock = &mdev->clock;
558
+ struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
559
+ struct mlx5_timer *timer = &clock->timer;
467560 struct ptp_clock_event ptp_event;
468
- struct timespec64 ts;
469
- u64 nsec_now, nsec_delta;
470561 u64 cycles_now, cycles_delta;
562
+ u64 nsec_now, nsec_delta, ns;
563
+ struct mlx5_eqe *eqe = data;
471564 int pin = eqe->data.pps.pin;
472
- s64 ns;
565
+ struct mlx5_core_dev *mdev;
566
+ struct timespec64 ts;
473567 unsigned long flags;
568
+
569
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
474570
475571 switch (clock->ptp_info.pin_config[pin].func) {
476572 case PTP_PF_EXTTS:
....@@ -485,52 +581,58 @@
485581 } else {
486582 ptp_event.type = PTP_CLOCK_EXTTS;
487583 }
584
+ /* TODOL clock->ptp can be NULL if ptp_clock_register failes */
488585 ptp_clock_event(clock->ptp, &ptp_event);
489586 break;
490587 case PTP_PF_PEROUT:
491
- mlx5_ptp_gettime(&clock->ptp_info, &ts);
492
- cycles_now = mlx5_read_internal_timer(mdev);
588
+ mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
589
+ cycles_now = mlx5_read_internal_timer(mdev, NULL);
493590 ts.tv_sec += 1;
494591 ts.tv_nsec = 0;
495592 ns = timespec64_to_ns(&ts);
496
- write_lock_irqsave(&clock->lock, flags);
497
- nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
593
+ write_seqlock_irqsave(&clock->lock, flags);
594
+ nsec_now = timecounter_cyc2time(&timer->tc, cycles_now);
498595 nsec_delta = ns - nsec_now;
499
- cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
500
- clock->cycles.mult);
596
+ cycles_delta = div64_u64(nsec_delta << timer->cycles.shift,
597
+ timer->cycles.mult);
501598 clock->pps_info.start[pin] = cycles_now + cycles_delta;
599
+ write_sequnlock_irqrestore(&clock->lock, flags);
502600 schedule_work(&clock->pps_info.out_work);
503
- write_unlock_irqrestore(&clock->lock, flags);
504601 break;
505602 default:
506
- mlx5_core_err(mdev, " Unhandled event\n");
603
+ mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
604
+ clock->ptp_info.pin_config[pin].func);
507605 }
606
+
607
+ return NOTIFY_OK;
508608 }
509609
510
-void mlx5_init_clock(struct mlx5_core_dev *mdev)
610
+static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
511611 {
512612 struct mlx5_clock *clock = &mdev->clock;
513
- u64 overflow_cycles;
514
- u64 ns;
515
- u64 frac = 0;
613
+ struct mlx5_timer *timer = &clock->timer;
516614 u32 dev_freq;
517615
518616 dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
519
- if (!dev_freq) {
520
- mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
521
- return;
522
- }
523
- rwlock_init(&clock->lock);
524
- clock->cycles.read = read_internal_timer;
525
- clock->cycles.shift = MLX5_CYCLES_SHIFT;
526
- clock->cycles.mult = clocksource_khz2mult(dev_freq,
527
- clock->cycles.shift);
528
- clock->nominal_c_mult = clock->cycles.mult;
529
- clock->cycles.mask = CLOCKSOURCE_MASK(41);
530
- clock->mdev = mdev;
617
+ timer->cycles.read = read_internal_timer;
618
+ timer->cycles.shift = MLX5_CYCLES_SHIFT;
619
+ timer->cycles.mult = clocksource_khz2mult(dev_freq,
620
+ timer->cycles.shift);
621
+ timer->nominal_c_mult = timer->cycles.mult;
622
+ timer->cycles.mask = CLOCKSOURCE_MASK(41);
531623
532
- timecounter_init(&clock->tc, &clock->cycles,
624
+ timecounter_init(&timer->tc, &timer->cycles,
533625 ktime_to_ns(ktime_get_real()));
626
+}
627
+
628
+static void mlx5_init_overflow_period(struct mlx5_clock *clock)
629
+{
630
+ struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
631
+ struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
632
+ struct mlx5_timer *timer = &clock->timer;
633
+ u64 overflow_cycles;
634
+ u64 frac = 0;
635
+ u64 ns;
534636
535637 /* Calculate period in seconds to call the overflow watchdog - to make
536638 * sure counter is checked at least twice every wrap around.
....@@ -539,39 +641,63 @@
539641 * multiplied by clock multiplier where the result doesn't exceed
540642 * 64bits.
541643 */
542
- overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
543
- overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3));
644
+ overflow_cycles = div64_u64(~0ULL >> 1, timer->cycles.mult);
645
+ overflow_cycles = min(overflow_cycles, div_u64(timer->cycles.mask, 3));
544646
545
- ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
647
+ ns = cyclecounter_cyc2ns(&timer->cycles, overflow_cycles,
546648 frac, &frac);
547649 do_div(ns, NSEC_PER_SEC / HZ);
548
- clock->overflow_period = ns;
650
+ timer->overflow_period = ns;
549651
550
- mdev->clock_info_page = alloc_page(GFP_KERNEL);
551
- if (mdev->clock_info_page) {
552
- mdev->clock_info = kmap(mdev->clock_info_page);
553
- if (!mdev->clock_info) {
554
- __free_page(mdev->clock_info_page);
555
- mlx5_core_warn(mdev, "failed to map clock page\n");
556
- } else {
557
- mdev->clock_info->sign = 0;
558
- mdev->clock_info->nsec = clock->tc.nsec;
559
- mdev->clock_info->cycles = clock->tc.cycle_last;
560
- mdev->clock_info->mask = clock->cycles.mask;
561
- mdev->clock_info->mult = clock->nominal_c_mult;
562
- mdev->clock_info->shift = clock->cycles.shift;
563
- mdev->clock_info->frac = clock->tc.frac;
564
- mdev->clock_info->overflow_period =
565
- clock->overflow_period;
566
- }
652
+ INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow);
653
+ if (timer->overflow_period)
654
+ schedule_delayed_work(&timer->overflow_work, 0);
655
+ else
656
+ mlx5_core_warn(mdev,
657
+ "invalid overflow period, overflow_work is not scheduled\n");
658
+
659
+ if (clock_info)
660
+ clock_info->overflow_period = timer->overflow_period;
661
+}
662
+
663
+static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
664
+{
665
+ struct mlx5_clock *clock = &mdev->clock;
666
+ struct mlx5_ib_clock_info *info;
667
+ struct mlx5_timer *timer;
668
+
669
+ mdev->clock_info = (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
670
+ if (!mdev->clock_info) {
671
+ mlx5_core_warn(mdev, "Failed to allocate IB clock info page\n");
672
+ return;
567673 }
568674
675
+ info = mdev->clock_info;
676
+ timer = &clock->timer;
677
+
678
+ info->nsec = timer->tc.nsec;
679
+ info->cycles = timer->tc.cycle_last;
680
+ info->mask = timer->cycles.mask;
681
+ info->mult = timer->nominal_c_mult;
682
+ info->shift = timer->cycles.shift;
683
+ info->frac = timer->tc.frac;
684
+}
685
+
686
+void mlx5_init_clock(struct mlx5_core_dev *mdev)
687
+{
688
+ struct mlx5_clock *clock = &mdev->clock;
689
+
690
+ if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
691
+ mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
692
+ return;
693
+ }
694
+
695
+ seqlock_init(&clock->lock);
696
+
697
+ mlx5_timecounter_init(mdev);
698
+ mlx5_init_clock_info(mdev);
699
+ mlx5_init_overflow_period(clock);
569700 INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
570
- INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow);
571
- if (clock->overflow_period)
572
- schedule_delayed_work(&clock->overflow_work, 0);
573
- else
574
- mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n");
575701
576702 /* Configure the PHC */
577703 clock->ptp_info = mlx5_ptp_clock_info;
....@@ -589,6 +715,9 @@
589715 PTR_ERR(clock->ptp));
590716 clock->ptp = NULL;
591717 }
718
+
719
+ MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
720
+ mlx5_eq_notifier_register(mdev, &clock->pps_nb);
592721 }
593722
594723 void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
....@@ -598,17 +727,17 @@
598727 if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
599728 return;
600729
730
+ mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
601731 if (clock->ptp) {
602732 ptp_clock_unregister(clock->ptp);
603733 clock->ptp = NULL;
604734 }
605735
606736 cancel_work_sync(&clock->pps_info.out_work);
607
- cancel_delayed_work_sync(&clock->overflow_work);
737
+ cancel_delayed_work_sync(&clock->timer.overflow_work);
608738
609739 if (mdev->clock_info) {
610
- kunmap(mdev->clock_info_page);
611
- __free_page(mdev->clock_info_page);
740
+ free_page((unsigned long)mdev->clock_info);
612741 mdev->clock_info = NULL;
613742 }
614743