2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/clocksource.h>
34 #include <linux/highmem.h>
35 #include <rdma/mlx5-abi.h>
39 MLX5_CYCLES_SHIFT = 23
43 MLX5_PIN_MODE_IN = 0x0,
44 MLX5_PIN_MODE_OUT = 0x1,
48 MLX5_OUT_PATTERN_PULSE = 0x0,
49 MLX5_OUT_PATTERN_PERIODIC = 0x1,
53 MLX5_EVENT_MODE_DISABLE = 0x0,
54 MLX5_EVENT_MODE_REPETETIVE = 0x1,
55 MLX5_EVENT_MODE_ONCE_TILL_ARM = 0x2,
59 MLX5_MTPPS_FS_ENABLE = BIT(0x0),
60 MLX5_MTPPS_FS_PATTERN = BIT(0x2),
61 MLX5_MTPPS_FS_PIN_MODE = BIT(0x3),
62 MLX5_MTPPS_FS_TIME_STAMP = BIT(0x4),
63 MLX5_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
64 MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
67 static u64 read_internal_timer(const struct cyclecounter *cc)
69 struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles);
70 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
73 return mlx5_read_internal_timer(mdev) & cc->mask;
76 static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
78 struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
79 struct mlx5_clock *clock = &mdev->clock;
85 sign = smp_load_acquire(&clock_info->sign);
86 smp_store_mb(clock_info->sign,
87 sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
89 clock_info->cycles = clock->tc.cycle_last;
90 clock_info->mult = clock->cycles.mult;
91 clock_info->nsec = clock->tc.nsec;
92 clock_info->frac = clock->tc.frac;
94 smp_store_release(&clock_info->sign,
95 sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
98 static void mlx5_pps_out(struct work_struct *work)
100 struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
102 struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
104 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
106 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
110 for (i = 0; i < clock->ptp_info.n_pins; i++) {
113 write_lock_irqsave(&clock->lock, flags);
114 tstart = clock->pps_info.start[i];
115 clock->pps_info.start[i] = 0;
116 write_unlock_irqrestore(&clock->lock, flags);
120 MLX5_SET(mtpps_reg, in, pin, i);
121 MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
122 MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
123 mlx5_set_mtpps(mdev, in, sizeof(in));
127 static void mlx5_timestamp_overflow(struct work_struct *work)
129 struct delayed_work *dwork = to_delayed_work(work);
130 struct mlx5_clock *clock = container_of(dwork, struct mlx5_clock,
134 write_lock_irqsave(&clock->lock, flags);
135 timecounter_read(&clock->tc);
136 mlx5_update_clock_info_page(clock->mdev);
137 write_unlock_irqrestore(&clock->lock, flags);
138 schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
141 static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
142 const struct timespec64 *ts)
144 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
146 u64 ns = timespec64_to_ns(ts);
149 write_lock_irqsave(&clock->lock, flags);
150 timecounter_init(&clock->tc, &clock->cycles, ns);
151 mlx5_update_clock_info_page(clock->mdev);
152 write_unlock_irqrestore(&clock->lock, flags);
157 static int mlx5_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
159 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
164 write_lock_irqsave(&clock->lock, flags);
165 ns = timecounter_read(&clock->tc);
166 write_unlock_irqrestore(&clock->lock, flags);
168 *ts = ns_to_timespec64(ns);
173 static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
175 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
179 write_lock_irqsave(&clock->lock, flags);
180 timecounter_adjtime(&clock->tc, delta);
181 mlx5_update_clock_info_page(clock->mdev);
182 write_unlock_irqrestore(&clock->lock, flags);
187 static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
193 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
201 adj = clock->nominal_c_mult;
203 diff = div_u64(adj, 1000000000ULL);
205 write_lock_irqsave(&clock->lock, flags);
206 timecounter_read(&clock->tc);
207 clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
208 clock->nominal_c_mult + diff;
209 mlx5_update_clock_info_page(clock->mdev);
210 write_unlock_irqrestore(&clock->lock, flags);
215 static int mlx5_extts_configure(struct ptp_clock_info *ptp,
216 struct ptp_clock_request *rq,
219 struct mlx5_clock *clock =
220 container_of(ptp, struct mlx5_clock, ptp_info);
221 struct mlx5_core_dev *mdev =
222 container_of(clock, struct mlx5_core_dev, clock);
223 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
224 u32 field_select = 0;
230 if (!MLX5_PPS_CAP(mdev))
233 if (rq->extts.index >= clock->ptp_info.n_pins)
237 pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
240 pin_mode = MLX5_PIN_MODE_IN;
241 pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
242 field_select = MLX5_MTPPS_FS_PIN_MODE |
243 MLX5_MTPPS_FS_PATTERN |
244 MLX5_MTPPS_FS_ENABLE;
246 pin = rq->extts.index;
247 field_select = MLX5_MTPPS_FS_ENABLE;
250 MLX5_SET(mtpps_reg, in, pin, pin);
251 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
252 MLX5_SET(mtpps_reg, in, pattern, pattern);
253 MLX5_SET(mtpps_reg, in, enable, on);
254 MLX5_SET(mtpps_reg, in, field_select, field_select);
256 err = mlx5_set_mtpps(mdev, in, sizeof(in));
260 return mlx5_set_mtppse(mdev, pin, 0,
261 MLX5_EVENT_MODE_REPETETIVE & on);
264 static int mlx5_perout_configure(struct ptp_clock_info *ptp,
265 struct ptp_clock_request *rq,
268 struct mlx5_clock *clock =
269 container_of(ptp, struct mlx5_clock, ptp_info);
270 struct mlx5_core_dev *mdev =
271 container_of(clock, struct mlx5_core_dev, clock);
272 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
273 u64 nsec_now, nsec_delta, time_stamp = 0;
274 u64 cycles_now, cycles_delta;
275 struct timespec64 ts;
277 u32 field_select = 0;
284 if (!MLX5_PPS_CAP(mdev))
287 if (rq->perout.index >= clock->ptp_info.n_pins)
291 pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
296 pin_mode = MLX5_PIN_MODE_OUT;
297 pattern = MLX5_OUT_PATTERN_PERIODIC;
298 ts.tv_sec = rq->perout.period.sec;
299 ts.tv_nsec = rq->perout.period.nsec;
300 ns = timespec64_to_ns(&ts);
302 if ((ns >> 1) != 500000000LL)
305 ts.tv_sec = rq->perout.start.sec;
306 ts.tv_nsec = rq->perout.start.nsec;
307 ns = timespec64_to_ns(&ts);
308 cycles_now = mlx5_read_internal_timer(mdev);
309 write_lock_irqsave(&clock->lock, flags);
310 nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
311 nsec_delta = ns - nsec_now;
312 cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
314 write_unlock_irqrestore(&clock->lock, flags);
315 time_stamp = cycles_now + cycles_delta;
316 field_select = MLX5_MTPPS_FS_PIN_MODE |
317 MLX5_MTPPS_FS_PATTERN |
318 MLX5_MTPPS_FS_ENABLE |
319 MLX5_MTPPS_FS_TIME_STAMP;
321 pin = rq->perout.index;
322 field_select = MLX5_MTPPS_FS_ENABLE;
325 MLX5_SET(mtpps_reg, in, pin, pin);
326 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
327 MLX5_SET(mtpps_reg, in, pattern, pattern);
328 MLX5_SET(mtpps_reg, in, enable, on);
329 MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
330 MLX5_SET(mtpps_reg, in, field_select, field_select);
332 err = mlx5_set_mtpps(mdev, in, sizeof(in));
336 return mlx5_set_mtppse(mdev, pin, 0,
337 MLX5_EVENT_MODE_REPETETIVE & on);
340 static int mlx5_pps_configure(struct ptp_clock_info *ptp,
341 struct ptp_clock_request *rq,
344 struct mlx5_clock *clock =
345 container_of(ptp, struct mlx5_clock, ptp_info);
347 clock->pps_info.enabled = !!on;
351 static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
352 struct ptp_clock_request *rq,
356 case PTP_CLK_REQ_EXTTS:
357 return mlx5_extts_configure(ptp, rq, on);
358 case PTP_CLK_REQ_PEROUT:
359 return mlx5_perout_configure(ptp, rq, on);
360 case PTP_CLK_REQ_PPS:
361 return mlx5_pps_configure(ptp, rq, on);
368 static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
369 enum ptp_pin_function func, unsigned int chan)
371 return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
374 static const struct ptp_clock_info mlx5_ptp_clock_info = {
375 .owner = THIS_MODULE,
377 .max_adj = 100000000,
383 .adjfreq = mlx5_ptp_adjfreq,
384 .adjtime = mlx5_ptp_adjtime,
385 .gettime64 = mlx5_ptp_gettime,
386 .settime64 = mlx5_ptp_settime,
391 static int mlx5_init_pin_config(struct mlx5_clock *clock)
395 clock->ptp_info.pin_config =
396 kzalloc(sizeof(*clock->ptp_info.pin_config) *
397 clock->ptp_info.n_pins, GFP_KERNEL);
398 if (!clock->ptp_info.pin_config)
400 clock->ptp_info.enable = mlx5_ptp_enable;
401 clock->ptp_info.verify = mlx5_ptp_verify;
402 clock->ptp_info.pps = 1;
404 for (i = 0; i < clock->ptp_info.n_pins; i++) {
405 snprintf(clock->ptp_info.pin_config[i].name,
406 sizeof(clock->ptp_info.pin_config[i].name),
408 clock->ptp_info.pin_config[i].index = i;
409 clock->ptp_info.pin_config[i].func = PTP_PF_NONE;
410 clock->ptp_info.pin_config[i].chan = i;
416 static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
418 struct mlx5_clock *clock = &mdev->clock;
419 u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
421 mlx5_query_mtpps(mdev, out, sizeof(out));
423 clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
424 cap_number_of_pps_pins);
425 clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
426 cap_max_num_of_pps_in_pins);
427 clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
428 cap_max_num_of_pps_out_pins);
430 clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
431 clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
432 clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
433 clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
434 clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
435 clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
436 clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
437 clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
440 void mlx5_pps_event(struct mlx5_core_dev *mdev,
441 struct mlx5_eqe *eqe)
443 struct mlx5_clock *clock = &mdev->clock;
444 struct ptp_clock_event ptp_event;
445 struct timespec64 ts;
446 u64 nsec_now, nsec_delta;
447 u64 cycles_now, cycles_delta;
448 int pin = eqe->data.pps.pin;
452 switch (clock->ptp_info.pin_config[pin].func) {
454 ptp_event.index = pin;
455 ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
456 be64_to_cpu(eqe->data.pps.time_stamp));
457 if (clock->pps_info.enabled) {
458 ptp_event.type = PTP_CLOCK_PPSUSR;
459 ptp_event.pps_times.ts_real =
460 ns_to_timespec64(ptp_event.timestamp);
462 ptp_event.type = PTP_CLOCK_EXTTS;
464 ptp_clock_event(clock->ptp, &ptp_event);
467 mlx5_ptp_gettime(&clock->ptp_info, &ts);
468 cycles_now = mlx5_read_internal_timer(mdev);
471 ns = timespec64_to_ns(&ts);
472 write_lock_irqsave(&clock->lock, flags);
473 nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
474 nsec_delta = ns - nsec_now;
475 cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
477 clock->pps_info.start[pin] = cycles_now + cycles_delta;
478 schedule_work(&clock->pps_info.out_work);
479 write_unlock_irqrestore(&clock->lock, flags);
482 mlx5_core_err(mdev, " Unhandled event\n");
486 void mlx5_init_clock(struct mlx5_core_dev *mdev)
488 struct mlx5_clock *clock = &mdev->clock;
493 dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
495 mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
498 rwlock_init(&clock->lock);
499 clock->cycles.read = read_internal_timer;
500 clock->cycles.shift = MLX5_CYCLES_SHIFT;
501 clock->cycles.mult = clocksource_khz2mult(dev_freq,
502 clock->cycles.shift);
503 clock->nominal_c_mult = clock->cycles.mult;
504 clock->cycles.mask = CLOCKSOURCE_MASK(41);
507 timecounter_init(&clock->tc, &clock->cycles,
508 ktime_to_ns(ktime_get_real()));
510 /* Calculate period in seconds to call the overflow watchdog - to make
511 * sure counter is checked at least once every wrap around.
513 ns = cyclecounter_cyc2ns(&clock->cycles, clock->cycles.mask,
515 do_div(ns, NSEC_PER_SEC / 2 / HZ);
516 clock->overflow_period = ns;
518 mdev->clock_info_page = alloc_page(GFP_KERNEL);
519 if (mdev->clock_info_page) {
520 mdev->clock_info = kmap(mdev->clock_info_page);
521 if (!mdev->clock_info) {
522 __free_page(mdev->clock_info_page);
523 mlx5_core_warn(mdev, "failed to map clock page\n");
525 mdev->clock_info->sign = 0;
526 mdev->clock_info->nsec = clock->tc.nsec;
527 mdev->clock_info->cycles = clock->tc.cycle_last;
528 mdev->clock_info->mask = clock->cycles.mask;
529 mdev->clock_info->mult = clock->nominal_c_mult;
530 mdev->clock_info->shift = clock->cycles.shift;
531 mdev->clock_info->frac = clock->tc.frac;
532 mdev->clock_info->overflow_period =
533 clock->overflow_period;
537 INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
538 INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow);
539 if (clock->overflow_period)
540 schedule_delayed_work(&clock->overflow_work, 0);
542 mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n");
544 /* Configure the PHC */
545 clock->ptp_info = mlx5_ptp_clock_info;
547 /* Initialize 1PPS data structures */
548 if (MLX5_PPS_CAP(mdev))
549 mlx5_get_pps_caps(mdev);
550 if (clock->ptp_info.n_pins)
551 mlx5_init_pin_config(clock);
553 clock->ptp = ptp_clock_register(&clock->ptp_info,
555 if (IS_ERR(clock->ptp)) {
556 mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
557 PTR_ERR(clock->ptp));
562 void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
564 struct mlx5_clock *clock = &mdev->clock;
566 if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
570 ptp_clock_unregister(clock->ptp);
574 cancel_work_sync(&clock->pps_info.out_work);
575 cancel_delayed_work_sync(&clock->overflow_work);
577 if (mdev->clock_info) {
578 kunmap(mdev->clock_info_page);
579 __free_page(mdev->clock_info_page);
580 mdev->clock_info = NULL;
583 kfree(clock->ptp_info.pin_config);