]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/ti/cpts.c
2 * TI Common Platform Time Sync
4 * Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include <linux/err.h>
22 #include <linux/hrtimer.h>
23 #include <linux/module.h>
24 #include <linux/net_tstamp.h>
25 #include <linux/ptp_classify.h>
26 #include <linux/time.h>
27 #include <linux/uaccess.h>
28 #include <linux/workqueue.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
34 #define cpts_read32(c, r) readl_relaxed(&c->reg->r)
35 #define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r)
37 static int event_expired(struct cpts_event
*event
)
39 return time_after(jiffies
, event
->tmo
);
42 static int event_type(struct cpts_event
*event
)
44 return (event
->high
>> EVENT_TYPE_SHIFT
) & EVENT_TYPE_MASK
;
47 static int cpts_fifo_pop(struct cpts
*cpts
, u32
*high
, u32
*low
)
49 u32 r
= cpts_read32(cpts
, intstat_raw
);
51 if (r
& TS_PEND_RAW
) {
52 *high
= cpts_read32(cpts
, event_high
);
53 *low
= cpts_read32(cpts
, event_low
);
54 cpts_write32(cpts
, EVENT_POP
, event_pop
);
60 static int cpts_purge_events(struct cpts
*cpts
)
62 struct list_head
*this, *next
;
63 struct cpts_event
*event
;
66 list_for_each_safe(this, next
, &cpts
->events
) {
67 event
= list_entry(this, struct cpts_event
, list
);
68 if (event_expired(event
)) {
69 list_del_init(&event
->list
);
70 list_add(&event
->list
, &cpts
->pool
);
76 pr_debug("cpts: event pool cleaned up %d\n", removed
);
77 return removed
? 0 : -1;
81 * Returns zero if matching event type was found.
83 static int cpts_fifo_read(struct cpts
*cpts
, int match
)
87 struct cpts_event
*event
;
89 for (i
= 0; i
< CPTS_FIFO_DEPTH
; i
++) {
90 if (cpts_fifo_pop(cpts
, &hi
, &lo
))
93 if (list_empty(&cpts
->pool
) && cpts_purge_events(cpts
)) {
94 pr_err("cpts: event pool empty\n");
98 event
= list_first_entry(&cpts
->pool
, struct cpts_event
, list
);
99 event
->tmo
= jiffies
+ 2;
102 type
= event_type(event
);
107 list_del_init(&event
->list
);
108 list_add_tail(&event
->list
, &cpts
->events
);
115 pr_err("cpts: unknown event type\n");
121 return type
== match
? 0 : -1;
124 static u64
cpts_systim_read(const struct cyclecounter
*cc
)
127 struct cpts_event
*event
;
128 struct list_head
*this, *next
;
129 struct cpts
*cpts
= container_of(cc
, struct cpts
, cc
);
131 cpts_write32(cpts
, TS_PUSH
, ts_push
);
132 if (cpts_fifo_read(cpts
, CPTS_EV_PUSH
))
133 pr_err("cpts: unable to obtain a time stamp\n");
135 list_for_each_safe(this, next
, &cpts
->events
) {
136 event
= list_entry(this, struct cpts_event
, list
);
137 if (event_type(event
) == CPTS_EV_PUSH
) {
138 list_del_init(&event
->list
);
139 list_add(&event
->list
, &cpts
->pool
);
148 /* PTP clock operations */
150 static int cpts_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
156 struct cpts
*cpts
= container_of(ptp
, struct cpts
, info
);
162 mult
= cpts
->cc_mult
;
165 diff
= div_u64(adj
, 1000000000ULL);
167 spin_lock_irqsave(&cpts
->lock
, flags
);
169 timecounter_read(&cpts
->tc
);
171 cpts
->cc
.mult
= neg_adj
? mult
- diff
: mult
+ diff
;
173 spin_unlock_irqrestore(&cpts
->lock
, flags
);
178 static int cpts_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
181 struct cpts
*cpts
= container_of(ptp
, struct cpts
, info
);
183 spin_lock_irqsave(&cpts
->lock
, flags
);
184 timecounter_adjtime(&cpts
->tc
, delta
);
185 spin_unlock_irqrestore(&cpts
->lock
, flags
);
190 static int cpts_ptp_gettime(struct ptp_clock_info
*ptp
, struct timespec64
*ts
)
194 struct cpts
*cpts
= container_of(ptp
, struct cpts
, info
);
196 spin_lock_irqsave(&cpts
->lock
, flags
);
197 ns
= timecounter_read(&cpts
->tc
);
198 spin_unlock_irqrestore(&cpts
->lock
, flags
);
200 *ts
= ns_to_timespec64(ns
);
205 static int cpts_ptp_settime(struct ptp_clock_info
*ptp
,
206 const struct timespec64
*ts
)
210 struct cpts
*cpts
= container_of(ptp
, struct cpts
, info
);
212 ns
= timespec64_to_ns(ts
);
214 spin_lock_irqsave(&cpts
->lock
, flags
);
215 timecounter_init(&cpts
->tc
, &cpts
->cc
, ns
);
216 spin_unlock_irqrestore(&cpts
->lock
, flags
);
221 static int cpts_ptp_enable(struct ptp_clock_info
*ptp
,
222 struct ptp_clock_request
*rq
, int on
)
227 static struct ptp_clock_info cpts_info
= {
228 .owner
= THIS_MODULE
,
229 .name
= "CTPS timer",
234 .adjfreq
= cpts_ptp_adjfreq
,
235 .adjtime
= cpts_ptp_adjtime
,
236 .gettime64
= cpts_ptp_gettime
,
237 .settime64
= cpts_ptp_settime
,
238 .enable
= cpts_ptp_enable
,
241 static void cpts_overflow_check(struct work_struct
*work
)
243 struct timespec64 ts
;
244 struct cpts
*cpts
= container_of(work
, struct cpts
, overflow_work
.work
);
246 cpts_ptp_gettime(&cpts
->info
, &ts
);
247 pr_debug("cpts overflow check at %lld.%09lu\n", ts
.tv_sec
, ts
.tv_nsec
);
248 schedule_delayed_work(&cpts
->overflow_work
, cpts
->ov_check_period
);
251 static int cpts_match(struct sk_buff
*skb
, unsigned int ptp_class
,
252 u16 ts_seqid
, u8 ts_msgtype
)
255 unsigned int offset
= 0;
256 u8
*msgtype
, *data
= skb
->data
;
258 if (ptp_class
& PTP_CLASS_VLAN
)
261 switch (ptp_class
& PTP_CLASS_PMASK
) {
263 offset
+= ETH_HLEN
+ IPV4_HLEN(data
+ offset
) + UDP_HLEN
;
266 offset
+= ETH_HLEN
+ IP6_HLEN
+ UDP_HLEN
;
275 if (skb
->len
+ ETH_HLEN
< offset
+ OFF_PTP_SEQUENCE_ID
+ sizeof(*seqid
))
278 if (unlikely(ptp_class
& PTP_CLASS_V1
))
279 msgtype
= data
+ offset
+ OFF_PTP_CONTROL
;
281 msgtype
= data
+ offset
;
283 seqid
= (u16
*)(data
+ offset
+ OFF_PTP_SEQUENCE_ID
);
285 return (ts_msgtype
== (*msgtype
& 0xf) && ts_seqid
== ntohs(*seqid
));
288 static u64
cpts_find_ts(struct cpts
*cpts
, struct sk_buff
*skb
, int ev_type
)
291 struct cpts_event
*event
;
292 struct list_head
*this, *next
;
293 unsigned int class = ptp_classify_raw(skb
);
298 if (class == PTP_CLASS_NONE
)
301 spin_lock_irqsave(&cpts
->lock
, flags
);
302 cpts_fifo_read(cpts
, CPTS_EV_PUSH
);
303 list_for_each_safe(this, next
, &cpts
->events
) {
304 event
= list_entry(this, struct cpts_event
, list
);
305 if (event_expired(event
)) {
306 list_del_init(&event
->list
);
307 list_add(&event
->list
, &cpts
->pool
);
310 mtype
= (event
->high
>> MESSAGE_TYPE_SHIFT
) & MESSAGE_TYPE_MASK
;
311 seqid
= (event
->high
>> SEQUENCE_ID_SHIFT
) & SEQUENCE_ID_MASK
;
312 if (ev_type
== event_type(event
) &&
313 cpts_match(skb
, class, seqid
, mtype
)) {
314 ns
= timecounter_cyc2time(&cpts
->tc
, event
->low
);
315 list_del_init(&event
->list
);
316 list_add(&event
->list
, &cpts
->pool
);
320 spin_unlock_irqrestore(&cpts
->lock
, flags
);
325 void cpts_rx_timestamp(struct cpts
*cpts
, struct sk_buff
*skb
)
328 struct skb_shared_hwtstamps
*ssh
;
330 if (!cpts
->rx_enable
)
332 ns
= cpts_find_ts(cpts
, skb
, CPTS_EV_RX
);
335 ssh
= skb_hwtstamps(skb
);
336 memset(ssh
, 0, sizeof(*ssh
));
337 ssh
->hwtstamp
= ns_to_ktime(ns
);
339 EXPORT_SYMBOL_GPL(cpts_rx_timestamp
);
341 void cpts_tx_timestamp(struct cpts
*cpts
, struct sk_buff
*skb
)
344 struct skb_shared_hwtstamps ssh
;
346 if (!(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
))
348 ns
= cpts_find_ts(cpts
, skb
, CPTS_EV_TX
);
351 memset(&ssh
, 0, sizeof(ssh
));
352 ssh
.hwtstamp
= ns_to_ktime(ns
);
353 skb_tstamp_tx(skb
, &ssh
);
355 EXPORT_SYMBOL_GPL(cpts_tx_timestamp
);
357 int cpts_register(struct cpts
*cpts
)
361 INIT_LIST_HEAD(&cpts
->events
);
362 INIT_LIST_HEAD(&cpts
->pool
);
363 for (i
= 0; i
< CPTS_MAX_EVENTS
; i
++)
364 list_add(&cpts
->pool_data
[i
].list
, &cpts
->pool
);
366 clk_enable(cpts
->refclk
);
368 cpts_write32(cpts
, CPTS_EN
, control
);
369 cpts_write32(cpts
, TS_PEND_EN
, int_enable
);
371 timecounter_init(&cpts
->tc
, &cpts
->cc
, ktime_to_ns(ktime_get_real()));
373 cpts
->clock
= ptp_clock_register(&cpts
->info
, cpts
->dev
);
374 if (IS_ERR(cpts
->clock
)) {
375 err
= PTR_ERR(cpts
->clock
);
379 cpts
->phc_index
= ptp_clock_index(cpts
->clock
);
381 schedule_delayed_work(&cpts
->overflow_work
, cpts
->ov_check_period
);
385 clk_disable(cpts
->refclk
);
388 EXPORT_SYMBOL_GPL(cpts_register
);
390 void cpts_unregister(struct cpts
*cpts
)
392 if (WARN_ON(!cpts
->clock
))
395 cancel_delayed_work_sync(&cpts
->overflow_work
);
397 ptp_clock_unregister(cpts
->clock
);
400 cpts_write32(cpts
, 0, int_enable
);
401 cpts_write32(cpts
, 0, control
);
403 clk_disable(cpts
->refclk
);
405 EXPORT_SYMBOL_GPL(cpts_unregister
);
407 static void cpts_calc_mult_shift(struct cpts
*cpts
)
409 u64 frac
, maxsec
, ns
;
412 freq
= clk_get_rate(cpts
->refclk
);
414 /* Calc the maximum number of seconds which we can run before
417 maxsec
= cpts
->cc
.mask
;
418 do_div(maxsec
, freq
);
419 /* limit conversation rate to 10 sec as higher values will produce
420 * too small mult factors and so reduce the conversion accuracy
425 /* Calc overflow check period (maxsec / 2) */
426 cpts
->ov_check_period
= (HZ
* maxsec
) / 2;
427 dev_info(cpts
->dev
, "cpts: overflow check period %lu (jiffies)\n",
428 cpts
->ov_check_period
);
430 if (cpts
->cc
.mult
|| cpts
->cc
.shift
)
433 clocks_calc_mult_shift(&cpts
->cc
.mult
, &cpts
->cc
.shift
,
434 freq
, NSEC_PER_SEC
, maxsec
);
437 ns
= cyclecounter_cyc2ns(&cpts
->cc
, freq
, cpts
->cc
.mask
, &frac
);
440 "CPTS: ref_clk_freq:%u calc_mult:%u calc_shift:%u error:%lld nsec/sec\n",
441 freq
, cpts
->cc
.mult
, cpts
->cc
.shift
, (ns
- NSEC_PER_SEC
));
444 static int cpts_of_parse(struct cpts
*cpts
, struct device_node
*node
)
449 if (!of_property_read_u32(node
, "cpts_clock_mult", &prop
))
450 cpts
->cc
.mult
= prop
;
452 if (!of_property_read_u32(node
, "cpts_clock_shift", &prop
))
453 cpts
->cc
.shift
= prop
;
455 if ((cpts
->cc
.mult
&& !cpts
->cc
.shift
) ||
456 (!cpts
->cc
.mult
&& cpts
->cc
.shift
))
462 dev_err(cpts
->dev
, "CPTS: Missing property in the DT.\n");
466 struct cpts
*cpts_create(struct device
*dev
, void __iomem
*regs
,
467 struct device_node
*node
)
472 cpts
= devm_kzalloc(dev
, sizeof(*cpts
), GFP_KERNEL
);
474 return ERR_PTR(-ENOMEM
);
477 cpts
->reg
= (struct cpsw_cpts __iomem
*)regs
;
478 spin_lock_init(&cpts
->lock
);
479 INIT_DELAYED_WORK(&cpts
->overflow_work
, cpts_overflow_check
);
481 ret
= cpts_of_parse(cpts
, node
);
485 cpts
->refclk
= devm_clk_get(dev
, "cpts");
486 if (IS_ERR(cpts
->refclk
)) {
487 dev_err(dev
, "Failed to get cpts refclk\n");
488 return ERR_PTR(PTR_ERR(cpts
->refclk
));
491 clk_prepare(cpts
->refclk
);
493 cpts
->cc
.read
= cpts_systim_read
;
494 cpts
->cc
.mask
= CLOCKSOURCE_MASK(32);
495 cpts
->info
= cpts_info
;
497 cpts_calc_mult_shift(cpts
);
498 /* save cc.mult original value as it can be modified
499 * by cpts_ptp_adjfreq().
501 cpts
->cc_mult
= cpts
->cc
.mult
;
505 EXPORT_SYMBOL_GPL(cpts_create
);
507 void cpts_release(struct cpts
*cpts
)
512 if (WARN_ON(!cpts
->refclk
))
515 clk_unprepare(cpts
->refclk
);
517 EXPORT_SYMBOL_GPL(cpts_release
);
519 MODULE_LICENSE("GPL v2");
520 MODULE_DESCRIPTION("TI CPTS driver");
521 MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");