2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/clocksource.h>
37 MLX5E_CYCLES_SHIFT
= 23
41 MLX5E_PIN_MODE_IN
= 0x0,
42 MLX5E_PIN_MODE_OUT
= 0x1,
46 MLX5E_OUT_PATTERN_PULSE
= 0x0,
47 MLX5E_OUT_PATTERN_PERIODIC
= 0x1,
51 MLX5E_EVENT_MODE_DISABLE
= 0x0,
52 MLX5E_EVENT_MODE_REPETETIVE
= 0x1,
53 MLX5E_EVENT_MODE_ONCE_TILL_ARM
= 0x2,
57 MLX5E_MTPPS_FS_ENABLE
= BIT(0x0),
58 MLX5E_MTPPS_FS_PATTERN
= BIT(0x2),
59 MLX5E_MTPPS_FS_PIN_MODE
= BIT(0x3),
60 MLX5E_MTPPS_FS_TIME_STAMP
= BIT(0x4),
61 MLX5E_MTPPS_FS_OUT_PULSE_DURATION
= BIT(0x5),
62 MLX5E_MTPPS_FS_ENH_OUT_PER_ADJ
= BIT(0x7),
65 void mlx5e_fill_hwstamp(struct mlx5e_tstamp
*tstamp
, u64 timestamp
,
66 struct skb_shared_hwtstamps
*hwts
)
70 read_lock(&tstamp
->lock
);
71 nsec
= timecounter_cyc2time(&tstamp
->clock
, timestamp
);
72 read_unlock(&tstamp
->lock
);
74 hwts
->hwtstamp
= ns_to_ktime(nsec
);
77 static u64
mlx5e_read_internal_timer(const struct cyclecounter
*cc
)
79 struct mlx5e_tstamp
*tstamp
= container_of(cc
, struct mlx5e_tstamp
,
82 return mlx5_read_internal_timer(tstamp
->mdev
) & cc
->mask
;
85 static void mlx5e_pps_out(struct work_struct
*work
)
87 struct mlx5e_pps
*pps_info
= container_of(work
, struct mlx5e_pps
,
89 struct mlx5e_tstamp
*tstamp
= container_of(pps_info
, struct mlx5e_tstamp
,
91 u32 in
[MLX5_ST_SZ_DW(mtpps_reg
)] = {0};
95 for (i
= 0; i
< tstamp
->ptp_info
.n_pins
; i
++) {
98 write_lock_irqsave(&tstamp
->lock
, flags
);
99 tstart
= tstamp
->pps_info
.start
[i
];
100 tstamp
->pps_info
.start
[i
] = 0;
101 write_unlock_irqrestore(&tstamp
->lock
, flags
);
105 MLX5_SET(mtpps_reg
, in
, pin
, i
);
106 MLX5_SET64(mtpps_reg
, in
, time_stamp
, tstart
);
107 MLX5_SET(mtpps_reg
, in
, field_select
, MLX5E_MTPPS_FS_TIME_STAMP
);
108 mlx5_set_mtpps(tstamp
->mdev
, in
, sizeof(in
));
112 static void mlx5e_timestamp_overflow(struct work_struct
*work
)
114 struct delayed_work
*dwork
= to_delayed_work(work
);
115 struct mlx5e_tstamp
*tstamp
= container_of(dwork
, struct mlx5e_tstamp
,
117 struct mlx5e_priv
*priv
= container_of(tstamp
, struct mlx5e_priv
, tstamp
);
120 write_lock_irqsave(&tstamp
->lock
, flags
);
121 timecounter_read(&tstamp
->clock
);
122 write_unlock_irqrestore(&tstamp
->lock
, flags
);
123 queue_delayed_work(priv
->wq
, &tstamp
->overflow_work
,
124 msecs_to_jiffies(tstamp
->overflow_period
* 1000));
127 int mlx5e_hwstamp_set(struct mlx5e_priv
*priv
, struct ifreq
*ifr
)
129 struct hwtstamp_config config
;
132 if (!MLX5_CAP_GEN(priv
->mdev
, device_frequency_khz
))
135 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
138 /* TX HW timestamp */
139 switch (config
.tx_type
) {
140 case HWTSTAMP_TX_OFF
:
147 mutex_lock(&priv
->state_lock
);
148 /* RX HW timestamp */
149 switch (config
.rx_filter
) {
150 case HWTSTAMP_FILTER_NONE
:
151 /* Reset CQE compression to Admin default */
152 mlx5e_modify_rx_cqe_compression_locked(priv
, priv
->channels
.params
.rx_cqe_compress_def
);
154 case HWTSTAMP_FILTER_ALL
:
155 case HWTSTAMP_FILTER_SOME
:
156 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
157 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
158 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
159 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
160 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
161 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
162 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
163 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
164 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
165 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
166 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
167 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
168 case HWTSTAMP_FILTER_NTP_ALL
:
169 /* Disable CQE compression */
170 netdev_warn(priv
->netdev
, "Disabling cqe compression");
171 err
= mlx5e_modify_rx_cqe_compression_locked(priv
, false);
173 netdev_err(priv
->netdev
, "Failed disabling cqe compression err=%d\n", err
);
174 mutex_unlock(&priv
->state_lock
);
177 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
180 mutex_unlock(&priv
->state_lock
);
184 memcpy(&priv
->tstamp
.hwtstamp_config
, &config
, sizeof(config
));
185 mutex_unlock(&priv
->state_lock
);
187 return copy_to_user(ifr
->ifr_data
, &config
,
188 sizeof(config
)) ? -EFAULT
: 0;
191 int mlx5e_hwstamp_get(struct mlx5e_priv
*priv
, struct ifreq
*ifr
)
193 struct hwtstamp_config
*cfg
= &priv
->tstamp
.hwtstamp_config
;
195 if (!MLX5_CAP_GEN(priv
->mdev
, device_frequency_khz
))
198 return copy_to_user(ifr
->ifr_data
, cfg
, sizeof(*cfg
)) ? -EFAULT
: 0;
201 static int mlx5e_ptp_settime(struct ptp_clock_info
*ptp
,
202 const struct timespec64
*ts
)
204 struct mlx5e_tstamp
*tstamp
= container_of(ptp
, struct mlx5e_tstamp
,
206 u64 ns
= timespec64_to_ns(ts
);
209 write_lock_irqsave(&tstamp
->lock
, flags
);
210 timecounter_init(&tstamp
->clock
, &tstamp
->cycles
, ns
);
211 write_unlock_irqrestore(&tstamp
->lock
, flags
);
216 static int mlx5e_ptp_gettime(struct ptp_clock_info
*ptp
,
217 struct timespec64
*ts
)
219 struct mlx5e_tstamp
*tstamp
= container_of(ptp
, struct mlx5e_tstamp
,
224 write_lock_irqsave(&tstamp
->lock
, flags
);
225 ns
= timecounter_read(&tstamp
->clock
);
226 write_unlock_irqrestore(&tstamp
->lock
, flags
);
228 *ts
= ns_to_timespec64(ns
);
233 static int mlx5e_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
235 struct mlx5e_tstamp
*tstamp
= container_of(ptp
, struct mlx5e_tstamp
,
239 write_lock_irqsave(&tstamp
->lock
, flags
);
240 timecounter_adjtime(&tstamp
->clock
, delta
);
241 write_unlock_irqrestore(&tstamp
->lock
, flags
);
246 static int mlx5e_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 delta
)
252 struct mlx5e_tstamp
*tstamp
= container_of(ptp
, struct mlx5e_tstamp
,
260 adj
= tstamp
->nominal_c_mult
;
262 diff
= div_u64(adj
, 1000000000ULL);
264 write_lock_irqsave(&tstamp
->lock
, flags
);
265 timecounter_read(&tstamp
->clock
);
266 tstamp
->cycles
.mult
= neg_adj
? tstamp
->nominal_c_mult
- diff
:
267 tstamp
->nominal_c_mult
+ diff
;
268 write_unlock_irqrestore(&tstamp
->lock
, flags
);
273 static int mlx5e_extts_configure(struct ptp_clock_info
*ptp
,
274 struct ptp_clock_request
*rq
,
277 struct mlx5e_tstamp
*tstamp
=
278 container_of(ptp
, struct mlx5e_tstamp
, ptp_info
);
279 struct mlx5e_priv
*priv
=
280 container_of(tstamp
, struct mlx5e_priv
, tstamp
);
281 u32 in
[MLX5_ST_SZ_DW(mtpps_reg
)] = {0};
282 u32 field_select
= 0;
288 if (!MLX5_PPS_CAP(priv
->mdev
))
291 if (rq
->extts
.index
>= tstamp
->ptp_info
.n_pins
)
295 pin
= ptp_find_pin(tstamp
->ptp
, PTP_PF_EXTTS
, rq
->extts
.index
);
298 pin_mode
= MLX5E_PIN_MODE_IN
;
299 pattern
= !!(rq
->extts
.flags
& PTP_FALLING_EDGE
);
300 field_select
= MLX5E_MTPPS_FS_PIN_MODE
|
301 MLX5E_MTPPS_FS_PATTERN
|
302 MLX5E_MTPPS_FS_ENABLE
;
304 pin
= rq
->extts
.index
;
305 field_select
= MLX5E_MTPPS_FS_ENABLE
;
308 MLX5_SET(mtpps_reg
, in
, pin
, pin
);
309 MLX5_SET(mtpps_reg
, in
, pin_mode
, pin_mode
);
310 MLX5_SET(mtpps_reg
, in
, pattern
, pattern
);
311 MLX5_SET(mtpps_reg
, in
, enable
, on
);
312 MLX5_SET(mtpps_reg
, in
, field_select
, field_select
);
314 err
= mlx5_set_mtpps(priv
->mdev
, in
, sizeof(in
));
318 return mlx5_set_mtppse(priv
->mdev
, pin
, 0,
319 MLX5E_EVENT_MODE_REPETETIVE
& on
);
322 static int mlx5e_perout_configure(struct ptp_clock_info
*ptp
,
323 struct ptp_clock_request
*rq
,
326 struct mlx5e_tstamp
*tstamp
=
327 container_of(ptp
, struct mlx5e_tstamp
, ptp_info
);
328 struct mlx5e_priv
*priv
=
329 container_of(tstamp
, struct mlx5e_priv
, tstamp
);
330 u32 in
[MLX5_ST_SZ_DW(mtpps_reg
)] = {0};
331 u64 nsec_now
, nsec_delta
, time_stamp
= 0;
332 u64 cycles_now
, cycles_delta
;
333 struct timespec64 ts
;
335 u32 field_select
= 0;
342 if (!MLX5_PPS_CAP(priv
->mdev
))
345 if (rq
->perout
.index
>= tstamp
->ptp_info
.n_pins
)
349 pin
= ptp_find_pin(tstamp
->ptp
, PTP_PF_PEROUT
,
354 pin_mode
= MLX5E_PIN_MODE_OUT
;
355 pattern
= MLX5E_OUT_PATTERN_PERIODIC
;
356 ts
.tv_sec
= rq
->perout
.period
.sec
;
357 ts
.tv_nsec
= rq
->perout
.period
.nsec
;
358 ns
= timespec64_to_ns(&ts
);
360 if ((ns
>> 1) != 500000000LL)
363 ts
.tv_sec
= rq
->perout
.start
.sec
;
364 ts
.tv_nsec
= rq
->perout
.start
.nsec
;
365 ns
= timespec64_to_ns(&ts
);
366 cycles_now
= mlx5_read_internal_timer(tstamp
->mdev
);
367 write_lock_irqsave(&tstamp
->lock
, flags
);
368 nsec_now
= timecounter_cyc2time(&tstamp
->clock
, cycles_now
);
369 nsec_delta
= ns
- nsec_now
;
370 cycles_delta
= div64_u64(nsec_delta
<< tstamp
->cycles
.shift
,
371 tstamp
->cycles
.mult
);
372 write_unlock_irqrestore(&tstamp
->lock
, flags
);
373 time_stamp
= cycles_now
+ cycles_delta
;
374 field_select
= MLX5E_MTPPS_FS_PIN_MODE
|
375 MLX5E_MTPPS_FS_PATTERN
|
376 MLX5E_MTPPS_FS_ENABLE
|
377 MLX5E_MTPPS_FS_TIME_STAMP
;
379 pin
= rq
->perout
.index
;
380 field_select
= MLX5E_MTPPS_FS_ENABLE
;
383 MLX5_SET(mtpps_reg
, in
, pin
, pin
);
384 MLX5_SET(mtpps_reg
, in
, pin_mode
, pin_mode
);
385 MLX5_SET(mtpps_reg
, in
, pattern
, pattern
);
386 MLX5_SET(mtpps_reg
, in
, enable
, on
);
387 MLX5_SET64(mtpps_reg
, in
, time_stamp
, time_stamp
);
388 MLX5_SET(mtpps_reg
, in
, field_select
, field_select
);
390 err
= mlx5_set_mtpps(priv
->mdev
, in
, sizeof(in
));
394 return mlx5_set_mtppse(priv
->mdev
, pin
, 0,
395 MLX5E_EVENT_MODE_REPETETIVE
& on
);
398 static int mlx5e_pps_configure(struct ptp_clock_info
*ptp
,
399 struct ptp_clock_request
*rq
,
402 struct mlx5e_tstamp
*tstamp
=
403 container_of(ptp
, struct mlx5e_tstamp
, ptp_info
);
405 tstamp
->pps_info
.enabled
= !!on
;
409 static int mlx5e_ptp_enable(struct ptp_clock_info
*ptp
,
410 struct ptp_clock_request
*rq
,
414 case PTP_CLK_REQ_EXTTS
:
415 return mlx5e_extts_configure(ptp
, rq
, on
);
416 case PTP_CLK_REQ_PEROUT
:
417 return mlx5e_perout_configure(ptp
, rq
, on
);
418 case PTP_CLK_REQ_PPS
:
419 return mlx5e_pps_configure(ptp
, rq
, on
);
426 static int mlx5e_ptp_verify(struct ptp_clock_info
*ptp
, unsigned int pin
,
427 enum ptp_pin_function func
, unsigned int chan
)
429 return (func
== PTP_PF_PHYSYNC
) ? -EOPNOTSUPP
: 0;
432 static const struct ptp_clock_info mlx5e_ptp_clock_info
= {
433 .owner
= THIS_MODULE
,
434 .max_adj
= 100000000,
440 .adjfreq
= mlx5e_ptp_adjfreq
,
441 .adjtime
= mlx5e_ptp_adjtime
,
442 .gettime64
= mlx5e_ptp_gettime
,
443 .settime64
= mlx5e_ptp_settime
,
448 static void mlx5e_timestamp_init_config(struct mlx5e_tstamp
*tstamp
)
450 tstamp
->hwtstamp_config
.tx_type
= HWTSTAMP_TX_OFF
;
451 tstamp
->hwtstamp_config
.rx_filter
= HWTSTAMP_FILTER_NONE
;
454 static int mlx5e_init_pin_config(struct mlx5e_tstamp
*tstamp
)
458 tstamp
->ptp_info
.pin_config
=
459 kzalloc(sizeof(*tstamp
->ptp_info
.pin_config
) *
460 tstamp
->ptp_info
.n_pins
, GFP_KERNEL
);
461 if (!tstamp
->ptp_info
.pin_config
)
463 tstamp
->ptp_info
.enable
= mlx5e_ptp_enable
;
464 tstamp
->ptp_info
.verify
= mlx5e_ptp_verify
;
465 tstamp
->ptp_info
.pps
= 1;
467 for (i
= 0; i
< tstamp
->ptp_info
.n_pins
; i
++) {
468 snprintf(tstamp
->ptp_info
.pin_config
[i
].name
,
469 sizeof(tstamp
->ptp_info
.pin_config
[i
].name
),
471 tstamp
->ptp_info
.pin_config
[i
].index
= i
;
472 tstamp
->ptp_info
.pin_config
[i
].func
= PTP_PF_NONE
;
473 tstamp
->ptp_info
.pin_config
[i
].chan
= i
;
479 static void mlx5e_get_pps_caps(struct mlx5e_priv
*priv
,
480 struct mlx5e_tstamp
*tstamp
)
482 u32 out
[MLX5_ST_SZ_DW(mtpps_reg
)] = {0};
484 mlx5_query_mtpps(priv
->mdev
, out
, sizeof(out
));
486 tstamp
->ptp_info
.n_pins
= MLX5_GET(mtpps_reg
, out
,
487 cap_number_of_pps_pins
);
488 tstamp
->ptp_info
.n_ext_ts
= MLX5_GET(mtpps_reg
, out
,
489 cap_max_num_of_pps_in_pins
);
490 tstamp
->ptp_info
.n_per_out
= MLX5_GET(mtpps_reg
, out
,
491 cap_max_num_of_pps_out_pins
);
493 tstamp
->pps_info
.pin_caps
[0] = MLX5_GET(mtpps_reg
, out
, cap_pin_0_mode
);
494 tstamp
->pps_info
.pin_caps
[1] = MLX5_GET(mtpps_reg
, out
, cap_pin_1_mode
);
495 tstamp
->pps_info
.pin_caps
[2] = MLX5_GET(mtpps_reg
, out
, cap_pin_2_mode
);
496 tstamp
->pps_info
.pin_caps
[3] = MLX5_GET(mtpps_reg
, out
, cap_pin_3_mode
);
497 tstamp
->pps_info
.pin_caps
[4] = MLX5_GET(mtpps_reg
, out
, cap_pin_4_mode
);
498 tstamp
->pps_info
.pin_caps
[5] = MLX5_GET(mtpps_reg
, out
, cap_pin_5_mode
);
499 tstamp
->pps_info
.pin_caps
[6] = MLX5_GET(mtpps_reg
, out
, cap_pin_6_mode
);
500 tstamp
->pps_info
.pin_caps
[7] = MLX5_GET(mtpps_reg
, out
, cap_pin_7_mode
);
503 void mlx5e_pps_event_handler(struct mlx5e_priv
*priv
,
504 struct ptp_clock_event
*event
)
506 struct net_device
*netdev
= priv
->netdev
;
507 struct mlx5e_tstamp
*tstamp
= &priv
->tstamp
;
508 struct timespec64 ts
;
509 u64 nsec_now
, nsec_delta
;
510 u64 cycles_now
, cycles_delta
;
511 int pin
= event
->index
;
515 switch (tstamp
->ptp_info
.pin_config
[pin
].func
) {
517 if (tstamp
->pps_info
.enabled
) {
518 event
->type
= PTP_CLOCK_PPSUSR
;
519 event
->pps_times
.ts_real
= ns_to_timespec64(event
->timestamp
);
521 event
->type
= PTP_CLOCK_EXTTS
;
523 ptp_clock_event(tstamp
->ptp
, event
);
526 mlx5e_ptp_gettime(&tstamp
->ptp_info
, &ts
);
527 cycles_now
= mlx5_read_internal_timer(tstamp
->mdev
);
530 ns
= timespec64_to_ns(&ts
);
531 write_lock_irqsave(&tstamp
->lock
, flags
);
532 nsec_now
= timecounter_cyc2time(&tstamp
->clock
, cycles_now
);
533 nsec_delta
= ns
- nsec_now
;
534 cycles_delta
= div64_u64(nsec_delta
<< tstamp
->cycles
.shift
,
535 tstamp
->cycles
.mult
);
536 tstamp
->pps_info
.start
[pin
] = cycles_now
+ cycles_delta
;
537 queue_work(priv
->wq
, &tstamp
->pps_info
.out_work
);
538 write_unlock_irqrestore(&tstamp
->lock
, flags
);
541 netdev_err(netdev
, "%s: Unhandled event\n", __func__
);
545 void mlx5e_timestamp_init(struct mlx5e_priv
*priv
)
547 struct mlx5e_tstamp
*tstamp
= &priv
->tstamp
;
552 mlx5e_timestamp_init_config(tstamp
);
553 dev_freq
= MLX5_CAP_GEN(priv
->mdev
, device_frequency_khz
);
555 mlx5_core_warn(priv
->mdev
, "invalid device_frequency_khz, aborting HW clock init\n");
558 rwlock_init(&tstamp
->lock
);
559 tstamp
->cycles
.read
= mlx5e_read_internal_timer
;
560 tstamp
->cycles
.shift
= MLX5E_CYCLES_SHIFT
;
561 tstamp
->cycles
.mult
= clocksource_khz2mult(dev_freq
,
562 tstamp
->cycles
.shift
);
563 tstamp
->nominal_c_mult
= tstamp
->cycles
.mult
;
564 tstamp
->cycles
.mask
= CLOCKSOURCE_MASK(41);
565 tstamp
->mdev
= priv
->mdev
;
567 timecounter_init(&tstamp
->clock
, &tstamp
->cycles
,
568 ktime_to_ns(ktime_get_real()));
570 /* Calculate period in seconds to call the overflow watchdog - to make
571 * sure counter is checked at least once every wrap around.
573 ns
= cyclecounter_cyc2ns(&tstamp
->cycles
, tstamp
->cycles
.mask
,
575 do_div(ns
, NSEC_PER_SEC
/ 2 / HZ
);
576 tstamp
->overflow_period
= ns
;
578 INIT_WORK(&tstamp
->pps_info
.out_work
, mlx5e_pps_out
);
579 INIT_DELAYED_WORK(&tstamp
->overflow_work
, mlx5e_timestamp_overflow
);
580 if (tstamp
->overflow_period
)
581 queue_delayed_work(priv
->wq
, &tstamp
->overflow_work
, 0);
583 mlx5_core_warn(priv
->mdev
, "invalid overflow period, overflow_work is not scheduled\n");
585 /* Configure the PHC */
586 tstamp
->ptp_info
= mlx5e_ptp_clock_info
;
587 snprintf(tstamp
->ptp_info
.name
, 16, "mlx5 ptp");
589 /* Initialize 1PPS data structures */
590 if (MLX5_PPS_CAP(priv
->mdev
))
591 mlx5e_get_pps_caps(priv
, tstamp
);
592 if (tstamp
->ptp_info
.n_pins
)
593 mlx5e_init_pin_config(tstamp
);
595 tstamp
->ptp
= ptp_clock_register(&tstamp
->ptp_info
,
596 &priv
->mdev
->pdev
->dev
);
597 if (IS_ERR(tstamp
->ptp
)) {
598 mlx5_core_warn(priv
->mdev
, "ptp_clock_register failed %ld\n",
599 PTR_ERR(tstamp
->ptp
));
604 void mlx5e_timestamp_cleanup(struct mlx5e_priv
*priv
)
606 struct mlx5e_tstamp
*tstamp
= &priv
->tstamp
;
608 if (!MLX5_CAP_GEN(priv
->mdev
, device_frequency_khz
))
611 if (priv
->tstamp
.ptp
) {
612 ptp_clock_unregister(priv
->tstamp
.ptp
);
613 priv
->tstamp
.ptp
= NULL
;
616 cancel_work_sync(&tstamp
->pps_info
.out_work
);
617 cancel_delayed_work_sync(&tstamp
->overflow_work
);
618 kfree(tstamp
->ptp_info
.pin_config
);