.. | .. |
---|
32 | 32 | |
---|
33 | 33 | #include <linux/clocksource.h> |
---|
34 | 34 | #include <linux/highmem.h> |
---|
| 35 | +#include <linux/ptp_clock_kernel.h> |
---|
35 | 36 | #include <rdma/mlx5-abi.h> |
---|
| 37 | +#include "lib/eq.h" |
---|
36 | 38 | #include "en.h" |
---|
37 | 39 | #include "clock.h" |
---|
38 | 40 | |
---|
.. | .. |
---|
65 | 67 | MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7), |
---|
66 | 68 | }; |
---|
67 | 69 | |
---|
| 70 | +static u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev, |
---|
| 71 | + struct ptp_system_timestamp *sts) |
---|
| 72 | +{ |
---|
| 73 | + u32 timer_h, timer_h1, timer_l; |
---|
| 74 | + |
---|
| 75 | + timer_h = ioread32be(&dev->iseg->internal_timer_h); |
---|
| 76 | + ptp_read_system_prets(sts); |
---|
| 77 | + timer_l = ioread32be(&dev->iseg->internal_timer_l); |
---|
| 78 | + ptp_read_system_postts(sts); |
---|
| 79 | + timer_h1 = ioread32be(&dev->iseg->internal_timer_h); |
---|
| 80 | + if (timer_h != timer_h1) { |
---|
| 81 | + /* wrap around */ |
---|
| 82 | + ptp_read_system_prets(sts); |
---|
| 83 | + timer_l = ioread32be(&dev->iseg->internal_timer_l); |
---|
| 84 | + ptp_read_system_postts(sts); |
---|
| 85 | + } |
---|
| 86 | + |
---|
| 87 | + return (u64)timer_l | (u64)timer_h1 << 32; |
---|
| 88 | +} |
---|
| 89 | + |
---|
68 | 90 | static u64 read_internal_timer(const struct cyclecounter *cc) |
---|
69 | 91 | { |
---|
70 | | - struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles); |
---|
| 92 | + struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles); |
---|
| 93 | + struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer); |
---|
71 | 94 | struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, |
---|
72 | 95 | clock); |
---|
73 | 96 | |
---|
74 | | - return mlx5_read_internal_timer(mdev) & cc->mask; |
---|
| 97 | + return mlx5_read_internal_timer(mdev, NULL) & cc->mask; |
---|
75 | 98 | } |
---|
76 | 99 | |
---|
77 | 100 | static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev) |
---|
78 | 101 | { |
---|
79 | 102 | struct mlx5_ib_clock_info *clock_info = mdev->clock_info; |
---|
80 | 103 | struct mlx5_clock *clock = &mdev->clock; |
---|
| 104 | + struct mlx5_timer *timer; |
---|
81 | 105 | u32 sign; |
---|
82 | 106 | |
---|
83 | 107 | if (!clock_info) |
---|
.. | .. |
---|
87 | 111 | smp_store_mb(clock_info->sign, |
---|
88 | 112 | sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING); |
---|
89 | 113 | |
---|
90 | | - clock_info->cycles = clock->tc.cycle_last; |
---|
91 | | - clock_info->mult = clock->cycles.mult; |
---|
92 | | - clock_info->nsec = clock->tc.nsec; |
---|
93 | | - clock_info->frac = clock->tc.frac; |
---|
| 114 | + timer = &clock->timer; |
---|
| 115 | + clock_info->cycles = timer->tc.cycle_last; |
---|
| 116 | + clock_info->mult = timer->cycles.mult; |
---|
| 117 | + clock_info->nsec = timer->tc.nsec; |
---|
| 118 | + clock_info->frac = timer->tc.frac; |
---|
94 | 119 | |
---|
95 | 120 | smp_store_release(&clock_info->sign, |
---|
96 | 121 | sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2); |
---|
.. | .. |
---|
111 | 136 | for (i = 0; i < clock->ptp_info.n_pins; i++) { |
---|
112 | 137 | u64 tstart; |
---|
113 | 138 | |
---|
114 | | - write_lock_irqsave(&clock->lock, flags); |
---|
| 139 | + write_seqlock_irqsave(&clock->lock, flags); |
---|
115 | 140 | tstart = clock->pps_info.start[i]; |
---|
116 | 141 | clock->pps_info.start[i] = 0; |
---|
117 | | - write_unlock_irqrestore(&clock->lock, flags); |
---|
| 142 | + write_sequnlock_irqrestore(&clock->lock, flags); |
---|
118 | 143 | if (!tstart) |
---|
119 | 144 | continue; |
---|
120 | 145 | |
---|
.. | .. |
---|
128 | 153 | static void mlx5_timestamp_overflow(struct work_struct *work) |
---|
129 | 154 | { |
---|
130 | 155 | struct delayed_work *dwork = to_delayed_work(work); |
---|
131 | | - struct mlx5_clock *clock = container_of(dwork, struct mlx5_clock, |
---|
132 | | - overflow_work); |
---|
| 156 | + struct mlx5_core_dev *mdev; |
---|
| 157 | + struct mlx5_timer *timer; |
---|
| 158 | + struct mlx5_clock *clock; |
---|
133 | 159 | unsigned long flags; |
---|
134 | 160 | |
---|
135 | | - write_lock_irqsave(&clock->lock, flags); |
---|
136 | | - timecounter_read(&clock->tc); |
---|
137 | | - mlx5_update_clock_info_page(clock->mdev); |
---|
138 | | - write_unlock_irqrestore(&clock->lock, flags); |
---|
139 | | - schedule_delayed_work(&clock->overflow_work, clock->overflow_period); |
---|
| 161 | + timer = container_of(dwork, struct mlx5_timer, overflow_work); |
---|
| 162 | + clock = container_of(timer, struct mlx5_clock, timer); |
---|
| 163 | + mdev = container_of(clock, struct mlx5_core_dev, clock); |
---|
| 164 | + |
---|
| 165 | + if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) |
---|
| 166 | + goto out; |
---|
| 167 | + |
---|
| 168 | + write_seqlock_irqsave(&clock->lock, flags); |
---|
| 169 | + timecounter_read(&timer->tc); |
---|
| 170 | + mlx5_update_clock_info_page(mdev); |
---|
| 171 | + write_sequnlock_irqrestore(&clock->lock, flags); |
---|
| 172 | + |
---|
| 173 | +out: |
---|
| 174 | + schedule_delayed_work(&timer->overflow_work, timer->overflow_period); |
---|
140 | 175 | } |
---|
141 | 176 | |
---|
142 | | -static int mlx5_ptp_settime(struct ptp_clock_info *ptp, |
---|
143 | | - const struct timespec64 *ts) |
---|
| 177 | +static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts) |
---|
144 | 178 | { |
---|
145 | | - struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, |
---|
146 | | - ptp_info); |
---|
| 179 | + struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); |
---|
| 180 | + struct mlx5_timer *timer = &clock->timer; |
---|
147 | 181 | u64 ns = timespec64_to_ns(ts); |
---|
| 182 | + struct mlx5_core_dev *mdev; |
---|
148 | 183 | unsigned long flags; |
---|
149 | 184 | |
---|
150 | | - write_lock_irqsave(&clock->lock, flags); |
---|
151 | | - timecounter_init(&clock->tc, &clock->cycles, ns); |
---|
152 | | - mlx5_update_clock_info_page(clock->mdev); |
---|
153 | | - write_unlock_irqrestore(&clock->lock, flags); |
---|
| 185 | + mdev = container_of(clock, struct mlx5_core_dev, clock); |
---|
| 186 | + write_seqlock_irqsave(&clock->lock, flags); |
---|
| 187 | + timecounter_init(&timer->tc, &timer->cycles, ns); |
---|
| 188 | + mlx5_update_clock_info_page(mdev); |
---|
| 189 | + write_sequnlock_irqrestore(&clock->lock, flags); |
---|
154 | 190 | |
---|
155 | 191 | return 0; |
---|
156 | 192 | } |
---|
157 | 193 | |
---|
158 | | -static int mlx5_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) |
---|
| 194 | +static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, |
---|
| 195 | + struct ptp_system_timestamp *sts) |
---|
159 | 196 | { |
---|
160 | | - struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, |
---|
161 | | - ptp_info); |
---|
162 | | - u64 ns; |
---|
| 197 | + struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); |
---|
| 198 | + struct mlx5_timer *timer = &clock->timer; |
---|
| 199 | + struct mlx5_core_dev *mdev; |
---|
163 | 200 | unsigned long flags; |
---|
| 201 | + u64 cycles, ns; |
---|
164 | 202 | |
---|
165 | | - write_lock_irqsave(&clock->lock, flags); |
---|
166 | | - ns = timecounter_read(&clock->tc); |
---|
167 | | - write_unlock_irqrestore(&clock->lock, flags); |
---|
| 203 | + mdev = container_of(clock, struct mlx5_core_dev, clock); |
---|
| 204 | + write_seqlock_irqsave(&clock->lock, flags); |
---|
| 205 | + cycles = mlx5_read_internal_timer(mdev, sts); |
---|
| 206 | + ns = timecounter_cyc2time(&timer->tc, cycles); |
---|
| 207 | + write_sequnlock_irqrestore(&clock->lock, flags); |
---|
168 | 208 | |
---|
169 | 209 | *ts = ns_to_timespec64(ns); |
---|
170 | 210 | |
---|
.. | .. |
---|
173 | 213 | |
---|
174 | 214 | static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) |
---|
175 | 215 | { |
---|
176 | | - struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, |
---|
177 | | - ptp_info); |
---|
| 216 | + struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); |
---|
| 217 | + struct mlx5_timer *timer = &clock->timer; |
---|
| 218 | + struct mlx5_core_dev *mdev; |
---|
178 | 219 | unsigned long flags; |
---|
179 | 220 | |
---|
180 | | - write_lock_irqsave(&clock->lock, flags); |
---|
181 | | - timecounter_adjtime(&clock->tc, delta); |
---|
182 | | - mlx5_update_clock_info_page(clock->mdev); |
---|
183 | | - write_unlock_irqrestore(&clock->lock, flags); |
---|
| 221 | + mdev = container_of(clock, struct mlx5_core_dev, clock); |
---|
| 222 | + write_seqlock_irqsave(&clock->lock, flags); |
---|
| 223 | + timecounter_adjtime(&timer->tc, delta); |
---|
| 224 | + mlx5_update_clock_info_page(mdev); |
---|
| 225 | + write_sequnlock_irqrestore(&clock->lock, flags); |
---|
184 | 226 | |
---|
185 | 227 | return 0; |
---|
186 | 228 | } |
---|
187 | 229 | |
---|
188 | 230 | static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) |
---|
189 | 231 | { |
---|
190 | | - u64 adj; |
---|
191 | | - u32 diff; |
---|
| 232 | + struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); |
---|
| 233 | + struct mlx5_timer *timer = &clock->timer; |
---|
| 234 | + struct mlx5_core_dev *mdev; |
---|
192 | 235 | unsigned long flags; |
---|
193 | 236 | int neg_adj = 0; |
---|
194 | | - struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, |
---|
195 | | - ptp_info); |
---|
| 237 | + u32 diff; |
---|
| 238 | + u64 adj; |
---|
196 | 239 | |
---|
197 | 240 | if (delta < 0) { |
---|
198 | 241 | neg_adj = 1; |
---|
199 | 242 | delta = -delta; |
---|
200 | 243 | } |
---|
201 | 244 | |
---|
202 | | - adj = clock->nominal_c_mult; |
---|
| 245 | + adj = timer->nominal_c_mult; |
---|
203 | 246 | adj *= delta; |
---|
204 | 247 | diff = div_u64(adj, 1000000000ULL); |
---|
205 | 248 | |
---|
206 | | - write_lock_irqsave(&clock->lock, flags); |
---|
207 | | - timecounter_read(&clock->tc); |
---|
208 | | - clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff : |
---|
209 | | - clock->nominal_c_mult + diff; |
---|
210 | | - mlx5_update_clock_info_page(clock->mdev); |
---|
211 | | - write_unlock_irqrestore(&clock->lock, flags); |
---|
| 249 | + mdev = container_of(clock, struct mlx5_core_dev, clock); |
---|
| 250 | + write_seqlock_irqsave(&clock->lock, flags); |
---|
| 251 | + timecounter_read(&timer->tc); |
---|
| 252 | + timer->cycles.mult = neg_adj ? timer->nominal_c_mult - diff : |
---|
| 253 | + timer->nominal_c_mult + diff; |
---|
| 254 | + mlx5_update_clock_info_page(mdev); |
---|
| 255 | + write_sequnlock_irqrestore(&clock->lock, flags); |
---|
212 | 256 | |
---|
213 | 257 | return 0; |
---|
214 | 258 | } |
---|
.. | .. |
---|
231 | 275 | if (!MLX5_PPS_CAP(mdev)) |
---|
232 | 276 | return -EOPNOTSUPP; |
---|
233 | 277 | |
---|
| 278 | + /* Reject requests with unsupported flags */ |
---|
| 279 | + if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | |
---|
| 280 | + PTP_RISING_EDGE | |
---|
| 281 | + PTP_FALLING_EDGE | |
---|
| 282 | + PTP_STRICT_FLAGS)) |
---|
| 283 | + return -EOPNOTSUPP; |
---|
| 284 | + |
---|
| 285 | + /* Reject requests to enable time stamping on both edges. */ |
---|
| 286 | + if ((rq->extts.flags & PTP_STRICT_FLAGS) && |
---|
| 287 | + (rq->extts.flags & PTP_ENABLE_FEATURE) && |
---|
| 288 | + (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES) |
---|
| 289 | + return -EOPNOTSUPP; |
---|
| 290 | + |
---|
234 | 291 | if (rq->extts.index >= clock->ptp_info.n_pins) |
---|
235 | 292 | return -EINVAL; |
---|
236 | 293 | |
---|
| 294 | + pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index); |
---|
| 295 | + if (pin < 0) |
---|
| 296 | + return -EBUSY; |
---|
| 297 | + |
---|
237 | 298 | if (on) { |
---|
238 | | - pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index); |
---|
239 | | - if (pin < 0) |
---|
240 | | - return -EBUSY; |
---|
241 | 299 | pin_mode = MLX5_PIN_MODE_IN; |
---|
242 | 300 | pattern = !!(rq->extts.flags & PTP_FALLING_EDGE); |
---|
243 | 301 | field_select = MLX5_MTPPS_FS_PIN_MODE | |
---|
244 | 302 | MLX5_MTPPS_FS_PATTERN | |
---|
245 | 303 | MLX5_MTPPS_FS_ENABLE; |
---|
246 | 304 | } else { |
---|
247 | | - pin = rq->extts.index; |
---|
248 | 305 | field_select = MLX5_MTPPS_FS_ENABLE; |
---|
249 | 306 | } |
---|
250 | 307 | |
---|
.. | .. |
---|
270 | 327 | container_of(ptp, struct mlx5_clock, ptp_info); |
---|
271 | 328 | struct mlx5_core_dev *mdev = |
---|
272 | 329 | container_of(clock, struct mlx5_core_dev, clock); |
---|
| 330 | + struct mlx5_timer *timer = &clock->timer; |
---|
273 | 331 | u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; |
---|
274 | 332 | u64 nsec_now, nsec_delta, time_stamp = 0; |
---|
275 | 333 | u64 cycles_now, cycles_delta; |
---|
.. | .. |
---|
285 | 343 | if (!MLX5_PPS_CAP(mdev)) |
---|
286 | 344 | return -EOPNOTSUPP; |
---|
287 | 345 | |
---|
| 346 | + /* Reject requests with unsupported flags */ |
---|
| 347 | + if (rq->perout.flags) |
---|
| 348 | + return -EOPNOTSUPP; |
---|
| 349 | + |
---|
288 | 350 | if (rq->perout.index >= clock->ptp_info.n_pins) |
---|
289 | 351 | return -EINVAL; |
---|
290 | 352 | |
---|
291 | | - if (on) { |
---|
292 | | - pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, |
---|
293 | | - rq->perout.index); |
---|
294 | | - if (pin < 0) |
---|
295 | | - return -EBUSY; |
---|
| 353 | + pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, |
---|
| 354 | + rq->perout.index); |
---|
| 355 | + if (pin < 0) |
---|
| 356 | + return -EBUSY; |
---|
296 | 357 | |
---|
| 358 | + if (on) { |
---|
297 | 359 | pin_mode = MLX5_PIN_MODE_OUT; |
---|
298 | 360 | pattern = MLX5_OUT_PATTERN_PERIODIC; |
---|
299 | 361 | ts.tv_sec = rq->perout.period.sec; |
---|
.. | .. |
---|
306 | 368 | ts.tv_sec = rq->perout.start.sec; |
---|
307 | 369 | ts.tv_nsec = rq->perout.start.nsec; |
---|
308 | 370 | ns = timespec64_to_ns(&ts); |
---|
309 | | - cycles_now = mlx5_read_internal_timer(mdev); |
---|
310 | | - write_lock_irqsave(&clock->lock, flags); |
---|
311 | | - nsec_now = timecounter_cyc2time(&clock->tc, cycles_now); |
---|
| 371 | + cycles_now = mlx5_read_internal_timer(mdev, NULL); |
---|
| 372 | + write_seqlock_irqsave(&clock->lock, flags); |
---|
| 373 | + nsec_now = timecounter_cyc2time(&timer->tc, cycles_now); |
---|
312 | 374 | nsec_delta = ns - nsec_now; |
---|
313 | | - cycles_delta = div64_u64(nsec_delta << clock->cycles.shift, |
---|
314 | | - clock->cycles.mult); |
---|
315 | | - write_unlock_irqrestore(&clock->lock, flags); |
---|
| 375 | + cycles_delta = div64_u64(nsec_delta << timer->cycles.shift, |
---|
| 376 | + timer->cycles.mult); |
---|
| 377 | + write_sequnlock_irqrestore(&clock->lock, flags); |
---|
316 | 378 | time_stamp = cycles_now + cycles_delta; |
---|
317 | 379 | field_select = MLX5_MTPPS_FS_PIN_MODE | |
---|
318 | 380 | MLX5_MTPPS_FS_PATTERN | |
---|
319 | 381 | MLX5_MTPPS_FS_ENABLE | |
---|
320 | 382 | MLX5_MTPPS_FS_TIME_STAMP; |
---|
321 | 383 | } else { |
---|
322 | | - pin = rq->perout.index; |
---|
323 | 384 | field_select = MLX5_MTPPS_FS_ENABLE; |
---|
324 | 385 | } |
---|
325 | 386 | |
---|
.. | .. |
---|
389 | 450 | default: |
---|
390 | 451 | return -EOPNOTSUPP; |
---|
391 | 452 | } |
---|
392 | | - |
---|
393 | | - return -EOPNOTSUPP; |
---|
394 | 453 | } |
---|
395 | 454 | |
---|
396 | 455 | static const struct ptp_clock_info mlx5_ptp_clock_info = { |
---|
397 | 456 | .owner = THIS_MODULE, |
---|
398 | | - .name = "mlx5_p2p", |
---|
399 | | - .max_adj = 100000000, |
---|
| 457 | + .name = "mlx5_ptp", |
---|
| 458 | + .max_adj = 50000000, |
---|
400 | 459 | .n_alarm = 0, |
---|
401 | 460 | .n_ext_ts = 0, |
---|
402 | 461 | .n_per_out = 0, |
---|
.. | .. |
---|
404 | 463 | .pps = 0, |
---|
405 | 464 | .adjfreq = mlx5_ptp_adjfreq, |
---|
406 | 465 | .adjtime = mlx5_ptp_adjtime, |
---|
407 | | - .gettime64 = mlx5_ptp_gettime, |
---|
| 466 | + .gettimex64 = mlx5_ptp_gettimex, |
---|
408 | 467 | .settime64 = mlx5_ptp_settime, |
---|
409 | 468 | .enable = NULL, |
---|
410 | 469 | .verify = NULL, |
---|
411 | 470 | }; |
---|
| 471 | + |
---|
| 472 | +static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin, |
---|
| 473 | + u32 *mtpps, u32 mtpps_size) |
---|
| 474 | +{ |
---|
| 475 | + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {}; |
---|
| 476 | + |
---|
| 477 | + MLX5_SET(mtpps_reg, in, pin, pin); |
---|
| 478 | + |
---|
| 479 | + return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps, |
---|
| 480 | + mtpps_size, MLX5_REG_MTPPS, 0, 0); |
---|
| 481 | +} |
---|
| 482 | + |
---|
| 483 | +static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin) |
---|
| 484 | +{ |
---|
| 485 | + struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock); |
---|
| 486 | + |
---|
| 487 | + u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {}; |
---|
| 488 | + u8 mode; |
---|
| 489 | + int err; |
---|
| 490 | + |
---|
| 491 | + err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out)); |
---|
| 492 | + if (err || !MLX5_GET(mtpps_reg, out, enable)) |
---|
| 493 | + return PTP_PF_NONE; |
---|
| 494 | + |
---|
| 495 | + mode = MLX5_GET(mtpps_reg, out, pin_mode); |
---|
| 496 | + |
---|
| 497 | + if (mode == MLX5_PIN_MODE_IN) |
---|
| 498 | + return PTP_PF_EXTTS; |
---|
| 499 | + else if (mode == MLX5_PIN_MODE_OUT) |
---|
| 500 | + return PTP_PF_PEROUT; |
---|
| 501 | + |
---|
| 502 | + return PTP_PF_NONE; |
---|
| 503 | +} |
---|
412 | 504 | |
---|
413 | 505 | static int mlx5_init_pin_config(struct mlx5_clock *clock) |
---|
414 | 506 | { |
---|
.. | .. |
---|
429 | 521 | sizeof(clock->ptp_info.pin_config[i].name), |
---|
430 | 522 | "mlx5_pps%d", i); |
---|
431 | 523 | clock->ptp_info.pin_config[i].index = i; |
---|
432 | | - clock->ptp_info.pin_config[i].func = PTP_PF_NONE; |
---|
433 | | - clock->ptp_info.pin_config[i].chan = i; |
---|
| 524 | + clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i); |
---|
| 525 | + clock->ptp_info.pin_config[i].chan = 0; |
---|
434 | 526 | } |
---|
435 | 527 | |
---|
436 | 528 | return 0; |
---|
.. | .. |
---|
460 | 552 | clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode); |
---|
461 | 553 | } |
---|
462 | 554 | |
---|
463 | | -void mlx5_pps_event(struct mlx5_core_dev *mdev, |
---|
464 | | - struct mlx5_eqe *eqe) |
---|
| 555 | +static int mlx5_pps_event(struct notifier_block *nb, |
---|
| 556 | + unsigned long type, void *data) |
---|
465 | 557 | { |
---|
466 | | - struct mlx5_clock *clock = &mdev->clock; |
---|
| 558 | + struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb); |
---|
| 559 | + struct mlx5_timer *timer = &clock->timer; |
---|
467 | 560 | struct ptp_clock_event ptp_event; |
---|
468 | | - struct timespec64 ts; |
---|
469 | | - u64 nsec_now, nsec_delta; |
---|
470 | 561 | u64 cycles_now, cycles_delta; |
---|
| 562 | + u64 nsec_now, nsec_delta, ns; |
---|
| 563 | + struct mlx5_eqe *eqe = data; |
---|
471 | 564 | int pin = eqe->data.pps.pin; |
---|
472 | | - s64 ns; |
---|
| 565 | + struct mlx5_core_dev *mdev; |
---|
| 566 | + struct timespec64 ts; |
---|
473 | 567 | unsigned long flags; |
---|
| 568 | + |
---|
| 569 | + mdev = container_of(clock, struct mlx5_core_dev, clock); |
---|
474 | 570 | |
---|
475 | 571 | switch (clock->ptp_info.pin_config[pin].func) { |
---|
476 | 572 | case PTP_PF_EXTTS: |
---|
.. | .. |
---|
485 | 581 | } else { |
---|
486 | 582 | ptp_event.type = PTP_CLOCK_EXTTS; |
---|
487 | 583 | } |
---|
| 584 | + /* TODOL clock->ptp can be NULL if ptp_clock_register failes */ |
---|
488 | 585 | ptp_clock_event(clock->ptp, &ptp_event); |
---|
489 | 586 | break; |
---|
490 | 587 | case PTP_PF_PEROUT: |
---|
491 | | - mlx5_ptp_gettime(&clock->ptp_info, &ts); |
---|
492 | | - cycles_now = mlx5_read_internal_timer(mdev); |
---|
| 588 | + mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL); |
---|
| 589 | + cycles_now = mlx5_read_internal_timer(mdev, NULL); |
---|
493 | 590 | ts.tv_sec += 1; |
---|
494 | 591 | ts.tv_nsec = 0; |
---|
495 | 592 | ns = timespec64_to_ns(&ts); |
---|
496 | | - write_lock_irqsave(&clock->lock, flags); |
---|
497 | | - nsec_now = timecounter_cyc2time(&clock->tc, cycles_now); |
---|
| 593 | + write_seqlock_irqsave(&clock->lock, flags); |
---|
| 594 | + nsec_now = timecounter_cyc2time(&timer->tc, cycles_now); |
---|
498 | 595 | nsec_delta = ns - nsec_now; |
---|
499 | | - cycles_delta = div64_u64(nsec_delta << clock->cycles.shift, |
---|
500 | | - clock->cycles.mult); |
---|
| 596 | + cycles_delta = div64_u64(nsec_delta << timer->cycles.shift, |
---|
| 597 | + timer->cycles.mult); |
---|
501 | 598 | clock->pps_info.start[pin] = cycles_now + cycles_delta; |
---|
| 599 | + write_sequnlock_irqrestore(&clock->lock, flags); |
---|
502 | 600 | schedule_work(&clock->pps_info.out_work); |
---|
503 | | - write_unlock_irqrestore(&clock->lock, flags); |
---|
504 | 601 | break; |
---|
505 | 602 | default: |
---|
506 | | - mlx5_core_err(mdev, " Unhandled event\n"); |
---|
| 603 | + mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n", |
---|
| 604 | + clock->ptp_info.pin_config[pin].func); |
---|
507 | 605 | } |
---|
| 606 | + |
---|
| 607 | + return NOTIFY_OK; |
---|
508 | 608 | } |
---|
509 | 609 | |
---|
510 | | -void mlx5_init_clock(struct mlx5_core_dev *mdev) |
---|
| 610 | +static void mlx5_timecounter_init(struct mlx5_core_dev *mdev) |
---|
511 | 611 | { |
---|
512 | 612 | struct mlx5_clock *clock = &mdev->clock; |
---|
513 | | - u64 overflow_cycles; |
---|
514 | | - u64 ns; |
---|
515 | | - u64 frac = 0; |
---|
| 613 | + struct mlx5_timer *timer = &clock->timer; |
---|
516 | 614 | u32 dev_freq; |
---|
517 | 615 | |
---|
518 | 616 | dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz); |
---|
519 | | - if (!dev_freq) { |
---|
520 | | - mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n"); |
---|
521 | | - return; |
---|
522 | | - } |
---|
523 | | - rwlock_init(&clock->lock); |
---|
524 | | - clock->cycles.read = read_internal_timer; |
---|
525 | | - clock->cycles.shift = MLX5_CYCLES_SHIFT; |
---|
526 | | - clock->cycles.mult = clocksource_khz2mult(dev_freq, |
---|
527 | | - clock->cycles.shift); |
---|
528 | | - clock->nominal_c_mult = clock->cycles.mult; |
---|
529 | | - clock->cycles.mask = CLOCKSOURCE_MASK(41); |
---|
530 | | - clock->mdev = mdev; |
---|
| 617 | + timer->cycles.read = read_internal_timer; |
---|
| 618 | + timer->cycles.shift = MLX5_CYCLES_SHIFT; |
---|
| 619 | + timer->cycles.mult = clocksource_khz2mult(dev_freq, |
---|
| 620 | + timer->cycles.shift); |
---|
| 621 | + timer->nominal_c_mult = timer->cycles.mult; |
---|
| 622 | + timer->cycles.mask = CLOCKSOURCE_MASK(41); |
---|
531 | 623 | |
---|
532 | | - timecounter_init(&clock->tc, &clock->cycles, |
---|
| 624 | + timecounter_init(&timer->tc, &timer->cycles, |
---|
533 | 625 | ktime_to_ns(ktime_get_real())); |
---|
| 626 | +} |
---|
| 627 | + |
---|
| 628 | +static void mlx5_init_overflow_period(struct mlx5_clock *clock) |
---|
| 629 | +{ |
---|
| 630 | + struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock); |
---|
| 631 | + struct mlx5_ib_clock_info *clock_info = mdev->clock_info; |
---|
| 632 | + struct mlx5_timer *timer = &clock->timer; |
---|
| 633 | + u64 overflow_cycles; |
---|
| 634 | + u64 frac = 0; |
---|
| 635 | + u64 ns; |
---|
534 | 636 | |
---|
535 | 637 | /* Calculate period in seconds to call the overflow watchdog - to make |
---|
536 | 638 | * sure counter is checked at least twice every wrap around. |
---|
.. | .. |
---|
539 | 641 | * multiplied by clock multiplier where the result doesn't exceed |
---|
540 | 642 | * 64bits. |
---|
541 | 643 | */ |
---|
542 | | - overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult); |
---|
543 | | - overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3)); |
---|
| 644 | + overflow_cycles = div64_u64(~0ULL >> 1, timer->cycles.mult); |
---|
| 645 | + overflow_cycles = min(overflow_cycles, div_u64(timer->cycles.mask, 3)); |
---|
544 | 646 | |
---|
545 | | - ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles, |
---|
| 647 | + ns = cyclecounter_cyc2ns(&timer->cycles, overflow_cycles, |
---|
546 | 648 | frac, &frac); |
---|
547 | 649 | do_div(ns, NSEC_PER_SEC / HZ); |
---|
548 | | - clock->overflow_period = ns; |
---|
| 650 | + timer->overflow_period = ns; |
---|
549 | 651 | |
---|
550 | | - mdev->clock_info_page = alloc_page(GFP_KERNEL); |
---|
551 | | - if (mdev->clock_info_page) { |
---|
552 | | - mdev->clock_info = kmap(mdev->clock_info_page); |
---|
553 | | - if (!mdev->clock_info) { |
---|
554 | | - __free_page(mdev->clock_info_page); |
---|
555 | | - mlx5_core_warn(mdev, "failed to map clock page\n"); |
---|
556 | | - } else { |
---|
557 | | - mdev->clock_info->sign = 0; |
---|
558 | | - mdev->clock_info->nsec = clock->tc.nsec; |
---|
559 | | - mdev->clock_info->cycles = clock->tc.cycle_last; |
---|
560 | | - mdev->clock_info->mask = clock->cycles.mask; |
---|
561 | | - mdev->clock_info->mult = clock->nominal_c_mult; |
---|
562 | | - mdev->clock_info->shift = clock->cycles.shift; |
---|
563 | | - mdev->clock_info->frac = clock->tc.frac; |
---|
564 | | - mdev->clock_info->overflow_period = |
---|
565 | | - clock->overflow_period; |
---|
566 | | - } |
---|
| 652 | + INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow); |
---|
| 653 | + if (timer->overflow_period) |
---|
| 654 | + schedule_delayed_work(&timer->overflow_work, 0); |
---|
| 655 | + else |
---|
| 656 | + mlx5_core_warn(mdev, |
---|
| 657 | + "invalid overflow period, overflow_work is not scheduled\n"); |
---|
| 658 | + |
---|
| 659 | + if (clock_info) |
---|
| 660 | + clock_info->overflow_period = timer->overflow_period; |
---|
| 661 | +} |
---|
| 662 | + |
---|
| 663 | +static void mlx5_init_clock_info(struct mlx5_core_dev *mdev) |
---|
| 664 | +{ |
---|
| 665 | + struct mlx5_clock *clock = &mdev->clock; |
---|
| 666 | + struct mlx5_ib_clock_info *info; |
---|
| 667 | + struct mlx5_timer *timer; |
---|
| 668 | + |
---|
| 669 | + mdev->clock_info = (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL); |
---|
| 670 | + if (!mdev->clock_info) { |
---|
| 671 | + mlx5_core_warn(mdev, "Failed to allocate IB clock info page\n"); |
---|
| 672 | + return; |
---|
567 | 673 | } |
---|
568 | 674 | |
---|
| 675 | + info = mdev->clock_info; |
---|
| 676 | + timer = &clock->timer; |
---|
| 677 | + |
---|
| 678 | + info->nsec = timer->tc.nsec; |
---|
| 679 | + info->cycles = timer->tc.cycle_last; |
---|
| 680 | + info->mask = timer->cycles.mask; |
---|
| 681 | + info->mult = timer->nominal_c_mult; |
---|
| 682 | + info->shift = timer->cycles.shift; |
---|
| 683 | + info->frac = timer->tc.frac; |
---|
| 684 | +} |
---|
| 685 | + |
---|
| 686 | +void mlx5_init_clock(struct mlx5_core_dev *mdev) |
---|
| 687 | +{ |
---|
| 688 | + struct mlx5_clock *clock = &mdev->clock; |
---|
| 689 | + |
---|
| 690 | + if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) { |
---|
| 691 | + mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n"); |
---|
| 692 | + return; |
---|
| 693 | + } |
---|
| 694 | + |
---|
| 695 | + seqlock_init(&clock->lock); |
---|
| 696 | + |
---|
| 697 | + mlx5_timecounter_init(mdev); |
---|
| 698 | + mlx5_init_clock_info(mdev); |
---|
| 699 | + mlx5_init_overflow_period(clock); |
---|
569 | 700 | INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out); |
---|
570 | | - INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow); |
---|
571 | | - if (clock->overflow_period) |
---|
572 | | - schedule_delayed_work(&clock->overflow_work, 0); |
---|
573 | | - else |
---|
574 | | - mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n"); |
---|
575 | 701 | |
---|
576 | 702 | /* Configure the PHC */ |
---|
577 | 703 | clock->ptp_info = mlx5_ptp_clock_info; |
---|
.. | .. |
---|
589 | 715 | PTR_ERR(clock->ptp)); |
---|
590 | 716 | clock->ptp = NULL; |
---|
591 | 717 | } |
---|
| 718 | + |
---|
| 719 | + MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT); |
---|
| 720 | + mlx5_eq_notifier_register(mdev, &clock->pps_nb); |
---|
592 | 721 | } |
---|
593 | 722 | |
---|
594 | 723 | void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) |
---|
.. | .. |
---|
598 | 727 | if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) |
---|
599 | 728 | return; |
---|
600 | 729 | |
---|
| 730 | + mlx5_eq_notifier_unregister(mdev, &clock->pps_nb); |
---|
601 | 731 | if (clock->ptp) { |
---|
602 | 732 | ptp_clock_unregister(clock->ptp); |
---|
603 | 733 | clock->ptp = NULL; |
---|
604 | 734 | } |
---|
605 | 735 | |
---|
606 | 736 | cancel_work_sync(&clock->pps_info.out_work); |
---|
607 | | - cancel_delayed_work_sync(&clock->overflow_work); |
---|
| 737 | + cancel_delayed_work_sync(&clock->timer.overflow_work); |
---|
608 | 738 | |
---|
609 | 739 | if (mdev->clock_info) { |
---|
610 | | - kunmap(mdev->clock_info_page); |
---|
611 | | - __free_page(mdev->clock_info_page); |
---|
| 740 | + free_page((unsigned long)mdev->clock_info); |
---|
612 | 741 | mdev->clock_info = NULL; |
---|
613 | 742 | } |
---|
614 | 743 | |
---|