forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
....@@ -32,7 +32,9 @@
3232
3333 #include <linux/clocksource.h>
3434 #include <linux/highmem.h>
35
+#include <linux/ptp_clock_kernel.h>
3536 #include <rdma/mlx5-abi.h>
37
+#include "lib/eq.h"
3638 #include "en.h"
3739 #include "clock.h"
3840
....@@ -65,13 +67,33 @@
6567 MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
6668 };
6769
70
+static u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
71
+ struct ptp_system_timestamp *sts)
72
+{
73
+ u32 timer_h, timer_h1, timer_l;
74
+
75
+ timer_h = ioread32be(&dev->iseg->internal_timer_h);
76
+ ptp_read_system_prets(sts);
77
+ timer_l = ioread32be(&dev->iseg->internal_timer_l);
78
+ ptp_read_system_postts(sts);
79
+ timer_h1 = ioread32be(&dev->iseg->internal_timer_h);
80
+ if (timer_h != timer_h1) {
81
+ /* wrap around */
82
+ ptp_read_system_prets(sts);
83
+ timer_l = ioread32be(&dev->iseg->internal_timer_l);
84
+ ptp_read_system_postts(sts);
85
+ }
86
+
87
+ return (u64)timer_l | (u64)timer_h1 << 32;
88
+}
89
+
6890 static u64 read_internal_timer(const struct cyclecounter *cc)
6991 {
7092 struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles);
7193 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
7294 clock);
7395
74
- return mlx5_read_internal_timer(mdev) & cc->mask;
96
+ return mlx5_read_internal_timer(mdev, NULL) & cc->mask;
7597 }
7698
7799 static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
....@@ -111,10 +133,10 @@
111133 for (i = 0; i < clock->ptp_info.n_pins; i++) {
112134 u64 tstart;
113135
114
- write_lock_irqsave(&clock->lock, flags);
136
+ write_seqlock_irqsave(&clock->lock, flags);
115137 tstart = clock->pps_info.start[i];
116138 clock->pps_info.start[i] = 0;
117
- write_unlock_irqrestore(&clock->lock, flags);
139
+ write_sequnlock_irqrestore(&clock->lock, flags);
118140 if (!tstart)
119141 continue;
120142
....@@ -128,43 +150,48 @@
128150 static void mlx5_timestamp_overflow(struct work_struct *work)
129151 {
130152 struct delayed_work *dwork = to_delayed_work(work);
131
- struct mlx5_clock *clock = container_of(dwork, struct mlx5_clock,
132
- overflow_work);
153
+ struct mlx5_core_dev *mdev;
154
+ struct mlx5_clock *clock;
133155 unsigned long flags;
134156
135
- write_lock_irqsave(&clock->lock, flags);
157
+ clock = container_of(dwork, struct mlx5_clock, overflow_work);
158
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
159
+ write_seqlock_irqsave(&clock->lock, flags);
136160 timecounter_read(&clock->tc);
137
- mlx5_update_clock_info_page(clock->mdev);
138
- write_unlock_irqrestore(&clock->lock, flags);
161
+ mlx5_update_clock_info_page(mdev);
162
+ write_sequnlock_irqrestore(&clock->lock, flags);
139163 schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
140164 }
141165
142
-static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
143
- const struct timespec64 *ts)
166
+static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
144167 {
145
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
146
- ptp_info);
168
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
147169 u64 ns = timespec64_to_ns(ts);
170
+ struct mlx5_core_dev *mdev;
148171 unsigned long flags;
149172
150
- write_lock_irqsave(&clock->lock, flags);
173
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
174
+ write_seqlock_irqsave(&clock->lock, flags);
151175 timecounter_init(&clock->tc, &clock->cycles, ns);
152
- mlx5_update_clock_info_page(clock->mdev);
153
- write_unlock_irqrestore(&clock->lock, flags);
176
+ mlx5_update_clock_info_page(mdev);
177
+ write_sequnlock_irqrestore(&clock->lock, flags);
154178
155179 return 0;
156180 }
157181
158
-static int mlx5_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
182
+static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
183
+ struct ptp_system_timestamp *sts)
159184 {
160
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
161
- ptp_info);
162
- u64 ns;
185
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
186
+ struct mlx5_core_dev *mdev;
163187 unsigned long flags;
188
+ u64 cycles, ns;
164189
165
- write_lock_irqsave(&clock->lock, flags);
166
- ns = timecounter_read(&clock->tc);
167
- write_unlock_irqrestore(&clock->lock, flags);
190
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
191
+ write_seqlock_irqsave(&clock->lock, flags);
192
+ cycles = mlx5_read_internal_timer(mdev, sts);
193
+ ns = timecounter_cyc2time(&clock->tc, cycles);
194
+ write_sequnlock_irqrestore(&clock->lock, flags);
168195
169196 *ts = ns_to_timespec64(ns);
170197
....@@ -173,26 +200,28 @@
173200
174201 static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
175202 {
176
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
177
- ptp_info);
203
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
204
+ struct mlx5_core_dev *mdev;
178205 unsigned long flags;
179206
180
- write_lock_irqsave(&clock->lock, flags);
207
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
208
+ write_seqlock_irqsave(&clock->lock, flags);
181209 timecounter_adjtime(&clock->tc, delta);
182
- mlx5_update_clock_info_page(clock->mdev);
183
- write_unlock_irqrestore(&clock->lock, flags);
210
+ mlx5_update_clock_info_page(mdev);
211
+ write_sequnlock_irqrestore(&clock->lock, flags);
184212
185213 return 0;
186214 }
187215
188216 static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
189217 {
190
- u64 adj;
191
- u32 diff;
218
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
219
+ struct mlx5_core_dev *mdev;
192220 unsigned long flags;
193221 int neg_adj = 0;
194
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
195
- ptp_info);
222
+ u32 diff;
223
+ u64 adj;
224
+
196225
197226 if (delta < 0) {
198227 neg_adj = 1;
....@@ -203,12 +232,13 @@
203232 adj *= delta;
204233 diff = div_u64(adj, 1000000000ULL);
205234
206
- write_lock_irqsave(&clock->lock, flags);
235
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
236
+ write_seqlock_irqsave(&clock->lock, flags);
207237 timecounter_read(&clock->tc);
208238 clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
209239 clock->nominal_c_mult + diff;
210
- mlx5_update_clock_info_page(clock->mdev);
211
- write_unlock_irqrestore(&clock->lock, flags);
240
+ mlx5_update_clock_info_page(mdev);
241
+ write_sequnlock_irqrestore(&clock->lock, flags);
212242
213243 return 0;
214244 }
....@@ -231,20 +261,33 @@
231261 if (!MLX5_PPS_CAP(mdev))
232262 return -EOPNOTSUPP;
233263
264
+ /* Reject requests with unsupported flags */
265
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
266
+ PTP_RISING_EDGE |
267
+ PTP_FALLING_EDGE |
268
+ PTP_STRICT_FLAGS))
269
+ return -EOPNOTSUPP;
270
+
271
+ /* Reject requests to enable time stamping on both edges. */
272
+ if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
273
+ (rq->extts.flags & PTP_ENABLE_FEATURE) &&
274
+ (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
275
+ return -EOPNOTSUPP;
276
+
234277 if (rq->extts.index >= clock->ptp_info.n_pins)
235278 return -EINVAL;
236279
280
+ pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
281
+ if (pin < 0)
282
+ return -EBUSY;
283
+
237284 if (on) {
238
- pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
239
- if (pin < 0)
240
- return -EBUSY;
241285 pin_mode = MLX5_PIN_MODE_IN;
242286 pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
243287 field_select = MLX5_MTPPS_FS_PIN_MODE |
244288 MLX5_MTPPS_FS_PATTERN |
245289 MLX5_MTPPS_FS_ENABLE;
246290 } else {
247
- pin = rq->extts.index;
248291 field_select = MLX5_MTPPS_FS_ENABLE;
249292 }
250293
....@@ -285,15 +328,19 @@
285328 if (!MLX5_PPS_CAP(mdev))
286329 return -EOPNOTSUPP;
287330
331
+ /* Reject requests with unsupported flags */
332
+ if (rq->perout.flags)
333
+ return -EOPNOTSUPP;
334
+
288335 if (rq->perout.index >= clock->ptp_info.n_pins)
289336 return -EINVAL;
290337
291
- if (on) {
292
- pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
293
- rq->perout.index);
294
- if (pin < 0)
295
- return -EBUSY;
338
+ pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
339
+ rq->perout.index);
340
+ if (pin < 0)
341
+ return -EBUSY;
296342
343
+ if (on) {
297344 pin_mode = MLX5_PIN_MODE_OUT;
298345 pattern = MLX5_OUT_PATTERN_PERIODIC;
299346 ts.tv_sec = rq->perout.period.sec;
....@@ -306,20 +353,19 @@
306353 ts.tv_sec = rq->perout.start.sec;
307354 ts.tv_nsec = rq->perout.start.nsec;
308355 ns = timespec64_to_ns(&ts);
309
- cycles_now = mlx5_read_internal_timer(mdev);
310
- write_lock_irqsave(&clock->lock, flags);
356
+ cycles_now = mlx5_read_internal_timer(mdev, NULL);
357
+ write_seqlock_irqsave(&clock->lock, flags);
311358 nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
312359 nsec_delta = ns - nsec_now;
313360 cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
314361 clock->cycles.mult);
315
- write_unlock_irqrestore(&clock->lock, flags);
362
+ write_sequnlock_irqrestore(&clock->lock, flags);
316363 time_stamp = cycles_now + cycles_delta;
317364 field_select = MLX5_MTPPS_FS_PIN_MODE |
318365 MLX5_MTPPS_FS_PATTERN |
319366 MLX5_MTPPS_FS_ENABLE |
320367 MLX5_MTPPS_FS_TIME_STAMP;
321368 } else {
322
- pin = rq->perout.index;
323369 field_select = MLX5_MTPPS_FS_ENABLE;
324370 }
325371
....@@ -389,13 +435,11 @@
389435 default:
390436 return -EOPNOTSUPP;
391437 }
392
-
393
- return -EOPNOTSUPP;
394438 }
395439
396440 static const struct ptp_clock_info mlx5_ptp_clock_info = {
397441 .owner = THIS_MODULE,
398
- .name = "mlx5_p2p",
442
+ .name = "mlx5_ptp",
399443 .max_adj = 100000000,
400444 .n_alarm = 0,
401445 .n_ext_ts = 0,
....@@ -404,11 +448,44 @@
404448 .pps = 0,
405449 .adjfreq = mlx5_ptp_adjfreq,
406450 .adjtime = mlx5_ptp_adjtime,
407
- .gettime64 = mlx5_ptp_gettime,
451
+ .gettimex64 = mlx5_ptp_gettimex,
408452 .settime64 = mlx5_ptp_settime,
409453 .enable = NULL,
410454 .verify = NULL,
411455 };
456
+
457
+static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
458
+ u32 *mtpps, u32 mtpps_size)
459
+{
460
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {};
461
+
462
+ MLX5_SET(mtpps_reg, in, pin, pin);
463
+
464
+ return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
465
+ mtpps_size, MLX5_REG_MTPPS, 0, 0);
466
+}
467
+
468
+static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
469
+{
470
+ struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
471
+
472
+ u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
473
+ u8 mode;
474
+ int err;
475
+
476
+ err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out));
477
+ if (err || !MLX5_GET(mtpps_reg, out, enable))
478
+ return PTP_PF_NONE;
479
+
480
+ mode = MLX5_GET(mtpps_reg, out, pin_mode);
481
+
482
+ if (mode == MLX5_PIN_MODE_IN)
483
+ return PTP_PF_EXTTS;
484
+ else if (mode == MLX5_PIN_MODE_OUT)
485
+ return PTP_PF_PEROUT;
486
+
487
+ return PTP_PF_NONE;
488
+}
412489
413490 static int mlx5_init_pin_config(struct mlx5_clock *clock)
414491 {
....@@ -429,8 +506,8 @@
429506 sizeof(clock->ptp_info.pin_config[i].name),
430507 "mlx5_pps%d", i);
431508 clock->ptp_info.pin_config[i].index = i;
432
- clock->ptp_info.pin_config[i].func = PTP_PF_NONE;
433
- clock->ptp_info.pin_config[i].chan = i;
509
+ clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
510
+ clock->ptp_info.pin_config[i].chan = 0;
434511 }
435512
436513 return 0;
....@@ -460,17 +537,20 @@
460537 clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
461538 }
462539
463
-void mlx5_pps_event(struct mlx5_core_dev *mdev,
464
- struct mlx5_eqe *eqe)
540
+static int mlx5_pps_event(struct notifier_block *nb,
541
+ unsigned long type, void *data)
465542 {
466
- struct mlx5_clock *clock = &mdev->clock;
543
+ struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
467544 struct ptp_clock_event ptp_event;
468
- struct timespec64 ts;
469
- u64 nsec_now, nsec_delta;
470545 u64 cycles_now, cycles_delta;
546
+ u64 nsec_now, nsec_delta, ns;
547
+ struct mlx5_eqe *eqe = data;
471548 int pin = eqe->data.pps.pin;
472
- s64 ns;
549
+ struct mlx5_core_dev *mdev;
550
+ struct timespec64 ts;
473551 unsigned long flags;
552
+
553
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
474554
475555 switch (clock->ptp_info.pin_config[pin].func) {
476556 case PTP_PF_EXTTS:
....@@ -485,26 +565,30 @@
485565 } else {
486566 ptp_event.type = PTP_CLOCK_EXTTS;
487567 }
568
+ /* TODOL clock->ptp can be NULL if ptp_clock_register failes */
488569 ptp_clock_event(clock->ptp, &ptp_event);
489570 break;
490571 case PTP_PF_PEROUT:
491
- mlx5_ptp_gettime(&clock->ptp_info, &ts);
492
- cycles_now = mlx5_read_internal_timer(mdev);
572
+ mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
573
+ cycles_now = mlx5_read_internal_timer(mdev, NULL);
493574 ts.tv_sec += 1;
494575 ts.tv_nsec = 0;
495576 ns = timespec64_to_ns(&ts);
496
- write_lock_irqsave(&clock->lock, flags);
577
+ write_seqlock_irqsave(&clock->lock, flags);
497578 nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
498579 nsec_delta = ns - nsec_now;
499580 cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
500581 clock->cycles.mult);
501582 clock->pps_info.start[pin] = cycles_now + cycles_delta;
583
+ write_sequnlock_irqrestore(&clock->lock, flags);
502584 schedule_work(&clock->pps_info.out_work);
503
- write_unlock_irqrestore(&clock->lock, flags);
504585 break;
505586 default:
506
- mlx5_core_err(mdev, " Unhandled event\n");
587
+ mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
588
+ clock->ptp_info.pin_config[pin].func);
507589 }
590
+
591
+ return NOTIFY_OK;
508592 }
509593
510594 void mlx5_init_clock(struct mlx5_core_dev *mdev)
....@@ -520,14 +604,13 @@
520604 mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
521605 return;
522606 }
523
- rwlock_init(&clock->lock);
607
+ seqlock_init(&clock->lock);
524608 clock->cycles.read = read_internal_timer;
525609 clock->cycles.shift = MLX5_CYCLES_SHIFT;
526610 clock->cycles.mult = clocksource_khz2mult(dev_freq,
527611 clock->cycles.shift);
528612 clock->nominal_c_mult = clock->cycles.mult;
529613 clock->cycles.mask = CLOCKSOURCE_MASK(41);
530
- clock->mdev = mdev;
531614
532615 timecounter_init(&clock->tc, &clock->cycles,
533616 ktime_to_ns(ktime_get_real()));
....@@ -547,23 +630,16 @@
547630 do_div(ns, NSEC_PER_SEC / HZ);
548631 clock->overflow_period = ns;
549632
550
- mdev->clock_info_page = alloc_page(GFP_KERNEL);
551
- if (mdev->clock_info_page) {
552
- mdev->clock_info = kmap(mdev->clock_info_page);
553
- if (!mdev->clock_info) {
554
- __free_page(mdev->clock_info_page);
555
- mlx5_core_warn(mdev, "failed to map clock page\n");
556
- } else {
557
- mdev->clock_info->sign = 0;
558
- mdev->clock_info->nsec = clock->tc.nsec;
559
- mdev->clock_info->cycles = clock->tc.cycle_last;
560
- mdev->clock_info->mask = clock->cycles.mask;
561
- mdev->clock_info->mult = clock->nominal_c_mult;
562
- mdev->clock_info->shift = clock->cycles.shift;
563
- mdev->clock_info->frac = clock->tc.frac;
564
- mdev->clock_info->overflow_period =
565
- clock->overflow_period;
566
- }
633
+ mdev->clock_info =
634
+ (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
635
+ if (mdev->clock_info) {
636
+ mdev->clock_info->nsec = clock->tc.nsec;
637
+ mdev->clock_info->cycles = clock->tc.cycle_last;
638
+ mdev->clock_info->mask = clock->cycles.mask;
639
+ mdev->clock_info->mult = clock->nominal_c_mult;
640
+ mdev->clock_info->shift = clock->cycles.shift;
641
+ mdev->clock_info->frac = clock->tc.frac;
642
+ mdev->clock_info->overflow_period = clock->overflow_period;
567643 }
568644
569645 INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
....@@ -589,6 +665,9 @@
589665 PTR_ERR(clock->ptp));
590666 clock->ptp = NULL;
591667 }
668
+
669
+ MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
670
+ mlx5_eq_notifier_register(mdev, &clock->pps_nb);
592671 }
593672
594673 void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
....@@ -598,6 +677,7 @@
598677 if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
599678 return;
600679
680
+ mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
601681 if (clock->ptp) {
602682 ptp_clock_unregister(clock->ptp);
603683 clock->ptp = NULL;
....@@ -607,8 +687,7 @@
607687 cancel_delayed_work_sync(&clock->overflow_work);
608688
609689 if (mdev->clock_info) {
610
- kunmap(mdev->clock_info_page);
611
- __free_page(mdev->clock_info_page);
690
+ free_page((unsigned long)mdev->clock_info);
612691 mdev->clock_info = NULL;
613692 }
614693