]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
Merge tag 'linux-kselftest-4.13-rc6-fixes' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_clock.c
1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/clocksource.h>
34 #include "en.h"
35
36 enum {
37 MLX5E_CYCLES_SHIFT = 23
38 };
39
40 enum {
41 MLX5E_PIN_MODE_IN = 0x0,
42 MLX5E_PIN_MODE_OUT = 0x1,
43 };
44
45 enum {
46 MLX5E_OUT_PATTERN_PULSE = 0x0,
47 MLX5E_OUT_PATTERN_PERIODIC = 0x1,
48 };
49
50 enum {
51 MLX5E_EVENT_MODE_DISABLE = 0x0,
52 MLX5E_EVENT_MODE_REPETETIVE = 0x1,
53 MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2,
54 };
55
56 enum {
57 MLX5E_MTPPS_FS_ENABLE = BIT(0x0),
58 MLX5E_MTPPS_FS_PATTERN = BIT(0x2),
59 MLX5E_MTPPS_FS_PIN_MODE = BIT(0x3),
60 MLX5E_MTPPS_FS_TIME_STAMP = BIT(0x4),
61 MLX5E_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
62 MLX5E_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
63 };
64
65 void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp,
66 struct skb_shared_hwtstamps *hwts)
67 {
68 u64 nsec;
69
70 read_lock(&tstamp->lock);
71 nsec = timecounter_cyc2time(&tstamp->clock, timestamp);
72 read_unlock(&tstamp->lock);
73
74 hwts->hwtstamp = ns_to_ktime(nsec);
75 }
76
77 static u64 mlx5e_read_internal_timer(const struct cyclecounter *cc)
78 {
79 struct mlx5e_tstamp *tstamp = container_of(cc, struct mlx5e_tstamp,
80 cycles);
81
82 return mlx5_read_internal_timer(tstamp->mdev) & cc->mask;
83 }
84
85 static void mlx5e_pps_out(struct work_struct *work)
86 {
87 struct mlx5e_pps *pps_info = container_of(work, struct mlx5e_pps,
88 out_work);
89 struct mlx5e_tstamp *tstamp = container_of(pps_info, struct mlx5e_tstamp,
90 pps_info);
91 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
92 unsigned long flags;
93 int i;
94
95 for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
96 u64 tstart;
97
98 write_lock_irqsave(&tstamp->lock, flags);
99 tstart = tstamp->pps_info.start[i];
100 tstamp->pps_info.start[i] = 0;
101 write_unlock_irqrestore(&tstamp->lock, flags);
102 if (!tstart)
103 continue;
104
105 MLX5_SET(mtpps_reg, in, pin, i);
106 MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
107 MLX5_SET(mtpps_reg, in, field_select, MLX5E_MTPPS_FS_TIME_STAMP);
108 mlx5_set_mtpps(tstamp->mdev, in, sizeof(in));
109 }
110 }
111
112 static void mlx5e_timestamp_overflow(struct work_struct *work)
113 {
114 struct delayed_work *dwork = to_delayed_work(work);
115 struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
116 overflow_work);
117 struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp);
118 unsigned long flags;
119
120 write_lock_irqsave(&tstamp->lock, flags);
121 timecounter_read(&tstamp->clock);
122 write_unlock_irqrestore(&tstamp->lock, flags);
123 queue_delayed_work(priv->wq, &tstamp->overflow_work,
124 msecs_to_jiffies(tstamp->overflow_period * 1000));
125 }
126
127 int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
128 {
129 struct hwtstamp_config config;
130 int err;
131
132 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
133 return -EOPNOTSUPP;
134
135 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
136 return -EFAULT;
137
138 /* TX HW timestamp */
139 switch (config.tx_type) {
140 case HWTSTAMP_TX_OFF:
141 case HWTSTAMP_TX_ON:
142 break;
143 default:
144 return -ERANGE;
145 }
146
147 mutex_lock(&priv->state_lock);
148 /* RX HW timestamp */
149 switch (config.rx_filter) {
150 case HWTSTAMP_FILTER_NONE:
151 /* Reset CQE compression to Admin default */
152 mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
153 break;
154 case HWTSTAMP_FILTER_ALL:
155 case HWTSTAMP_FILTER_SOME:
156 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
157 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
158 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
159 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
160 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
161 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
162 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
163 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
164 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
165 case HWTSTAMP_FILTER_PTP_V2_EVENT:
166 case HWTSTAMP_FILTER_PTP_V2_SYNC:
167 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
168 case HWTSTAMP_FILTER_NTP_ALL:
169 /* Disable CQE compression */
170 netdev_warn(priv->netdev, "Disabling cqe compression");
171 err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
172 if (err) {
173 netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
174 mutex_unlock(&priv->state_lock);
175 return err;
176 }
177 config.rx_filter = HWTSTAMP_FILTER_ALL;
178 break;
179 default:
180 mutex_unlock(&priv->state_lock);
181 return -ERANGE;
182 }
183
184 memcpy(&priv->tstamp.hwtstamp_config, &config, sizeof(config));
185 mutex_unlock(&priv->state_lock);
186
187 return copy_to_user(ifr->ifr_data, &config,
188 sizeof(config)) ? -EFAULT : 0;
189 }
190
191 int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
192 {
193 struct hwtstamp_config *cfg = &priv->tstamp.hwtstamp_config;
194
195 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
196 return -EOPNOTSUPP;
197
198 return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
199 }
200
201 static int mlx5e_ptp_settime(struct ptp_clock_info *ptp,
202 const struct timespec64 *ts)
203 {
204 struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
205 ptp_info);
206 u64 ns = timespec64_to_ns(ts);
207 unsigned long flags;
208
209 write_lock_irqsave(&tstamp->lock, flags);
210 timecounter_init(&tstamp->clock, &tstamp->cycles, ns);
211 write_unlock_irqrestore(&tstamp->lock, flags);
212
213 return 0;
214 }
215
216 static int mlx5e_ptp_gettime(struct ptp_clock_info *ptp,
217 struct timespec64 *ts)
218 {
219 struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
220 ptp_info);
221 u64 ns;
222 unsigned long flags;
223
224 write_lock_irqsave(&tstamp->lock, flags);
225 ns = timecounter_read(&tstamp->clock);
226 write_unlock_irqrestore(&tstamp->lock, flags);
227
228 *ts = ns_to_timespec64(ns);
229
230 return 0;
231 }
232
233 static int mlx5e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
234 {
235 struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
236 ptp_info);
237 unsigned long flags;
238
239 write_lock_irqsave(&tstamp->lock, flags);
240 timecounter_adjtime(&tstamp->clock, delta);
241 write_unlock_irqrestore(&tstamp->lock, flags);
242
243 return 0;
244 }
245
246 static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
247 {
248 u64 adj;
249 u32 diff;
250 unsigned long flags;
251 int neg_adj = 0;
252 struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
253 ptp_info);
254
255 if (delta < 0) {
256 neg_adj = 1;
257 delta = -delta;
258 }
259
260 adj = tstamp->nominal_c_mult;
261 adj *= delta;
262 diff = div_u64(adj, 1000000000ULL);
263
264 write_lock_irqsave(&tstamp->lock, flags);
265 timecounter_read(&tstamp->clock);
266 tstamp->cycles.mult = neg_adj ? tstamp->nominal_c_mult - diff :
267 tstamp->nominal_c_mult + diff;
268 write_unlock_irqrestore(&tstamp->lock, flags);
269
270 return 0;
271 }
272
273 static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
274 struct ptp_clock_request *rq,
275 int on)
276 {
277 struct mlx5e_tstamp *tstamp =
278 container_of(ptp, struct mlx5e_tstamp, ptp_info);
279 struct mlx5e_priv *priv =
280 container_of(tstamp, struct mlx5e_priv, tstamp);
281 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
282 u32 field_select = 0;
283 u8 pin_mode = 0;
284 u8 pattern = 0;
285 int pin = -1;
286 int err = 0;
287
288 if (!MLX5_PPS_CAP(priv->mdev))
289 return -EOPNOTSUPP;
290
291 if (rq->extts.index >= tstamp->ptp_info.n_pins)
292 return -EINVAL;
293
294 if (on) {
295 pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index);
296 if (pin < 0)
297 return -EBUSY;
298 pin_mode = MLX5E_PIN_MODE_IN;
299 pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
300 field_select = MLX5E_MTPPS_FS_PIN_MODE |
301 MLX5E_MTPPS_FS_PATTERN |
302 MLX5E_MTPPS_FS_ENABLE;
303 } else {
304 pin = rq->extts.index;
305 field_select = MLX5E_MTPPS_FS_ENABLE;
306 }
307
308 MLX5_SET(mtpps_reg, in, pin, pin);
309 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
310 MLX5_SET(mtpps_reg, in, pattern, pattern);
311 MLX5_SET(mtpps_reg, in, enable, on);
312 MLX5_SET(mtpps_reg, in, field_select, field_select);
313
314 err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
315 if (err)
316 return err;
317
318 return mlx5_set_mtppse(priv->mdev, pin, 0,
319 MLX5E_EVENT_MODE_REPETETIVE & on);
320 }
321
322 static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
323 struct ptp_clock_request *rq,
324 int on)
325 {
326 struct mlx5e_tstamp *tstamp =
327 container_of(ptp, struct mlx5e_tstamp, ptp_info);
328 struct mlx5e_priv *priv =
329 container_of(tstamp, struct mlx5e_priv, tstamp);
330 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
331 u64 nsec_now, nsec_delta, time_stamp = 0;
332 u64 cycles_now, cycles_delta;
333 struct timespec64 ts;
334 unsigned long flags;
335 u32 field_select = 0;
336 u8 pin_mode = 0;
337 u8 pattern = 0;
338 int pin = -1;
339 int err = 0;
340 s64 ns;
341
342 if (!MLX5_PPS_CAP(priv->mdev))
343 return -EOPNOTSUPP;
344
345 if (rq->perout.index >= tstamp->ptp_info.n_pins)
346 return -EINVAL;
347
348 if (on) {
349 pin = ptp_find_pin(tstamp->ptp, PTP_PF_PEROUT,
350 rq->perout.index);
351 if (pin < 0)
352 return -EBUSY;
353
354 pin_mode = MLX5E_PIN_MODE_OUT;
355 pattern = MLX5E_OUT_PATTERN_PERIODIC;
356 ts.tv_sec = rq->perout.period.sec;
357 ts.tv_nsec = rq->perout.period.nsec;
358 ns = timespec64_to_ns(&ts);
359
360 if ((ns >> 1) != 500000000LL)
361 return -EINVAL;
362
363 ts.tv_sec = rq->perout.start.sec;
364 ts.tv_nsec = rq->perout.start.nsec;
365 ns = timespec64_to_ns(&ts);
366 cycles_now = mlx5_read_internal_timer(tstamp->mdev);
367 write_lock_irqsave(&tstamp->lock, flags);
368 nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
369 nsec_delta = ns - nsec_now;
370 cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
371 tstamp->cycles.mult);
372 write_unlock_irqrestore(&tstamp->lock, flags);
373 time_stamp = cycles_now + cycles_delta;
374 field_select = MLX5E_MTPPS_FS_PIN_MODE |
375 MLX5E_MTPPS_FS_PATTERN |
376 MLX5E_MTPPS_FS_ENABLE |
377 MLX5E_MTPPS_FS_TIME_STAMP;
378 } else {
379 pin = rq->perout.index;
380 field_select = MLX5E_MTPPS_FS_ENABLE;
381 }
382
383 MLX5_SET(mtpps_reg, in, pin, pin);
384 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
385 MLX5_SET(mtpps_reg, in, pattern, pattern);
386 MLX5_SET(mtpps_reg, in, enable, on);
387 MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
388 MLX5_SET(mtpps_reg, in, field_select, field_select);
389
390 err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
391 if (err)
392 return err;
393
394 return mlx5_set_mtppse(priv->mdev, pin, 0,
395 MLX5E_EVENT_MODE_REPETETIVE & on);
396 }
397
398 static int mlx5e_pps_configure(struct ptp_clock_info *ptp,
399 struct ptp_clock_request *rq,
400 int on)
401 {
402 struct mlx5e_tstamp *tstamp =
403 container_of(ptp, struct mlx5e_tstamp, ptp_info);
404
405 tstamp->pps_info.enabled = !!on;
406 return 0;
407 }
408
409 static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
410 struct ptp_clock_request *rq,
411 int on)
412 {
413 switch (rq->type) {
414 case PTP_CLK_REQ_EXTTS:
415 return mlx5e_extts_configure(ptp, rq, on);
416 case PTP_CLK_REQ_PEROUT:
417 return mlx5e_perout_configure(ptp, rq, on);
418 case PTP_CLK_REQ_PPS:
419 return mlx5e_pps_configure(ptp, rq, on);
420 default:
421 return -EOPNOTSUPP;
422 }
423 return 0;
424 }
425
426 static int mlx5e_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
427 enum ptp_pin_function func, unsigned int chan)
428 {
429 return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
430 }
431
432 static const struct ptp_clock_info mlx5e_ptp_clock_info = {
433 .owner = THIS_MODULE,
434 .max_adj = 100000000,
435 .n_alarm = 0,
436 .n_ext_ts = 0,
437 .n_per_out = 0,
438 .n_pins = 0,
439 .pps = 0,
440 .adjfreq = mlx5e_ptp_adjfreq,
441 .adjtime = mlx5e_ptp_adjtime,
442 .gettime64 = mlx5e_ptp_gettime,
443 .settime64 = mlx5e_ptp_settime,
444 .enable = NULL,
445 .verify = NULL,
446 };
447
448 static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp)
449 {
450 tstamp->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
451 tstamp->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
452 }
453
454 static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp)
455 {
456 int i;
457
458 tstamp->ptp_info.pin_config =
459 kzalloc(sizeof(*tstamp->ptp_info.pin_config) *
460 tstamp->ptp_info.n_pins, GFP_KERNEL);
461 if (!tstamp->ptp_info.pin_config)
462 return -ENOMEM;
463 tstamp->ptp_info.enable = mlx5e_ptp_enable;
464 tstamp->ptp_info.verify = mlx5e_ptp_verify;
465 tstamp->ptp_info.pps = 1;
466
467 for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
468 snprintf(tstamp->ptp_info.pin_config[i].name,
469 sizeof(tstamp->ptp_info.pin_config[i].name),
470 "mlx5_pps%d", i);
471 tstamp->ptp_info.pin_config[i].index = i;
472 tstamp->ptp_info.pin_config[i].func = PTP_PF_NONE;
473 tstamp->ptp_info.pin_config[i].chan = i;
474 }
475
476 return 0;
477 }
478
479 static void mlx5e_get_pps_caps(struct mlx5e_priv *priv,
480 struct mlx5e_tstamp *tstamp)
481 {
482 u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
483
484 mlx5_query_mtpps(priv->mdev, out, sizeof(out));
485
486 tstamp->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
487 cap_number_of_pps_pins);
488 tstamp->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
489 cap_max_num_of_pps_in_pins);
490 tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
491 cap_max_num_of_pps_out_pins);
492
493 tstamp->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
494 tstamp->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
495 tstamp->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
496 tstamp->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
497 tstamp->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
498 tstamp->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
499 tstamp->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
500 tstamp->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
501 }
502
503 void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
504 struct ptp_clock_event *event)
505 {
506 struct net_device *netdev = priv->netdev;
507 struct mlx5e_tstamp *tstamp = &priv->tstamp;
508 struct timespec64 ts;
509 u64 nsec_now, nsec_delta;
510 u64 cycles_now, cycles_delta;
511 int pin = event->index;
512 s64 ns;
513 unsigned long flags;
514
515 switch (tstamp->ptp_info.pin_config[pin].func) {
516 case PTP_PF_EXTTS:
517 if (tstamp->pps_info.enabled) {
518 event->type = PTP_CLOCK_PPSUSR;
519 event->pps_times.ts_real = ns_to_timespec64(event->timestamp);
520 } else {
521 event->type = PTP_CLOCK_EXTTS;
522 }
523 ptp_clock_event(tstamp->ptp, event);
524 break;
525 case PTP_PF_PEROUT:
526 mlx5e_ptp_gettime(&tstamp->ptp_info, &ts);
527 cycles_now = mlx5_read_internal_timer(tstamp->mdev);
528 ts.tv_sec += 1;
529 ts.tv_nsec = 0;
530 ns = timespec64_to_ns(&ts);
531 write_lock_irqsave(&tstamp->lock, flags);
532 nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
533 nsec_delta = ns - nsec_now;
534 cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
535 tstamp->cycles.mult);
536 tstamp->pps_info.start[pin] = cycles_now + cycles_delta;
537 queue_work(priv->wq, &tstamp->pps_info.out_work);
538 write_unlock_irqrestore(&tstamp->lock, flags);
539 break;
540 default:
541 netdev_err(netdev, "%s: Unhandled event\n", __func__);
542 }
543 }
544
545 void mlx5e_timestamp_init(struct mlx5e_priv *priv)
546 {
547 struct mlx5e_tstamp *tstamp = &priv->tstamp;
548 u64 ns;
549 u64 frac = 0;
550 u32 dev_freq;
551
552 mlx5e_timestamp_init_config(tstamp);
553 dev_freq = MLX5_CAP_GEN(priv->mdev, device_frequency_khz);
554 if (!dev_freq) {
555 mlx5_core_warn(priv->mdev, "invalid device_frequency_khz, aborting HW clock init\n");
556 return;
557 }
558 rwlock_init(&tstamp->lock);
559 tstamp->cycles.read = mlx5e_read_internal_timer;
560 tstamp->cycles.shift = MLX5E_CYCLES_SHIFT;
561 tstamp->cycles.mult = clocksource_khz2mult(dev_freq,
562 tstamp->cycles.shift);
563 tstamp->nominal_c_mult = tstamp->cycles.mult;
564 tstamp->cycles.mask = CLOCKSOURCE_MASK(41);
565 tstamp->mdev = priv->mdev;
566
567 timecounter_init(&tstamp->clock, &tstamp->cycles,
568 ktime_to_ns(ktime_get_real()));
569
570 /* Calculate period in seconds to call the overflow watchdog - to make
571 * sure counter is checked at least once every wrap around.
572 */
573 ns = cyclecounter_cyc2ns(&tstamp->cycles, tstamp->cycles.mask,
574 frac, &frac);
575 do_div(ns, NSEC_PER_SEC / 2 / HZ);
576 tstamp->overflow_period = ns;
577
578 INIT_WORK(&tstamp->pps_info.out_work, mlx5e_pps_out);
579 INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow);
580 if (tstamp->overflow_period)
581 queue_delayed_work(priv->wq, &tstamp->overflow_work, 0);
582 else
583 mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n");
584
585 /* Configure the PHC */
586 tstamp->ptp_info = mlx5e_ptp_clock_info;
587 snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp");
588
589 /* Initialize 1PPS data structures */
590 if (MLX5_PPS_CAP(priv->mdev))
591 mlx5e_get_pps_caps(priv, tstamp);
592 if (tstamp->ptp_info.n_pins)
593 mlx5e_init_pin_config(tstamp);
594
595 tstamp->ptp = ptp_clock_register(&tstamp->ptp_info,
596 &priv->mdev->pdev->dev);
597 if (IS_ERR(tstamp->ptp)) {
598 mlx5_core_warn(priv->mdev, "ptp_clock_register failed %ld\n",
599 PTR_ERR(tstamp->ptp));
600 tstamp->ptp = NULL;
601 }
602 }
603
604 void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv)
605 {
606 struct mlx5e_tstamp *tstamp = &priv->tstamp;
607
608 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
609 return;
610
611 if (priv->tstamp.ptp) {
612 ptp_clock_unregister(priv->tstamp.ptp);
613 priv->tstamp.ptp = NULL;
614 }
615
616 cancel_work_sync(&tstamp->pps_info.out_work);
617 cancel_delayed_work_sync(&tstamp->overflow_work);
618 kfree(tstamp->ptp_info.pin_config);
619 }