]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/ti/cpts.c
2 * TI Common Platform Time Sync
4 * Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include <linux/err.h>
22 #include <linux/hrtimer.h>
23 #include <linux/module.h>
24 #include <linux/net_tstamp.h>
25 #include <linux/ptp_classify.h>
26 #include <linux/time.h>
27 #include <linux/uaccess.h>
28 #include <linux/workqueue.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
36 #define cpts_read32(c, r) __raw_readl(&c->reg->r)
37 #define cpts_write32(c, v, r) __raw_writel(v, &c->reg->r)
39 static int event_expired(struct cpts_event
*event
)
41 return time_after(jiffies
, event
->tmo
);
44 static int event_type(struct cpts_event
*event
)
46 return (event
->high
>> EVENT_TYPE_SHIFT
) & EVENT_TYPE_MASK
;
49 static int cpts_fifo_pop(struct cpts
*cpts
, u32
*high
, u32
*low
)
51 u32 r
= cpts_read32(cpts
, intstat_raw
);
53 if (r
& TS_PEND_RAW
) {
54 *high
= cpts_read32(cpts
, event_high
);
55 *low
= cpts_read32(cpts
, event_low
);
56 cpts_write32(cpts
, EVENT_POP
, event_pop
);
63 * Returns zero if matching event type was found.
65 static int cpts_fifo_read(struct cpts
*cpts
, int match
)
69 struct cpts_event
*event
;
71 for (i
= 0; i
< CPTS_FIFO_DEPTH
; i
++) {
72 if (cpts_fifo_pop(cpts
, &hi
, &lo
))
74 if (list_empty(&cpts
->pool
)) {
75 pr_err("cpts: event pool is empty\n");
78 event
= list_first_entry(&cpts
->pool
, struct cpts_event
, list
);
79 event
->tmo
= jiffies
+ 2;
82 type
= event_type(event
);
87 list_del_init(&event
->list
);
88 list_add_tail(&event
->list
, &cpts
->events
);
95 pr_err("cpts: unknown event type\n");
101 return type
== match
? 0 : -1;
104 static cycle_t
cpts_systim_read(const struct cyclecounter
*cc
)
107 struct cpts_event
*event
;
108 struct list_head
*this, *next
;
109 struct cpts
*cpts
= container_of(cc
, struct cpts
, cc
);
111 cpts_write32(cpts
, TS_PUSH
, ts_push
);
112 if (cpts_fifo_read(cpts
, CPTS_EV_PUSH
))
113 pr_err("cpts: unable to obtain a time stamp\n");
115 list_for_each_safe(this, next
, &cpts
->events
) {
116 event
= list_entry(this, struct cpts_event
, list
);
117 if (event_type(event
) == CPTS_EV_PUSH
) {
118 list_del_init(&event
->list
);
119 list_add(&event
->list
, &cpts
->pool
);
128 /* PTP clock operations */
130 static int cpts_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
136 struct cpts
*cpts
= container_of(ptp
, struct cpts
, info
);
142 mult
= cpts
->cc_mult
;
145 diff
= div_u64(adj
, 1000000000ULL);
147 spin_lock_irqsave(&cpts
->lock
, flags
);
149 timecounter_read(&cpts
->tc
);
151 cpts
->cc
.mult
= neg_adj
? mult
- diff
: mult
+ diff
;
153 spin_unlock_irqrestore(&cpts
->lock
, flags
);
158 static int cpts_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
162 struct cpts
*cpts
= container_of(ptp
, struct cpts
, info
);
164 spin_lock_irqsave(&cpts
->lock
, flags
);
165 now
= timecounter_read(&cpts
->tc
);
167 timecounter_init(&cpts
->tc
, &cpts
->cc
, now
);
168 spin_unlock_irqrestore(&cpts
->lock
, flags
);
173 static int cpts_ptp_gettime(struct ptp_clock_info
*ptp
, struct timespec
*ts
)
178 struct cpts
*cpts
= container_of(ptp
, struct cpts
, info
);
180 spin_lock_irqsave(&cpts
->lock
, flags
);
181 ns
= timecounter_read(&cpts
->tc
);
182 spin_unlock_irqrestore(&cpts
->lock
, flags
);
184 ts
->tv_sec
= div_u64_rem(ns
, 1000000000, &remainder
);
185 ts
->tv_nsec
= remainder
;
190 static int cpts_ptp_settime(struct ptp_clock_info
*ptp
,
191 const struct timespec
*ts
)
195 struct cpts
*cpts
= container_of(ptp
, struct cpts
, info
);
197 ns
= ts
->tv_sec
* 1000000000ULL;
200 spin_lock_irqsave(&cpts
->lock
, flags
);
201 timecounter_init(&cpts
->tc
, &cpts
->cc
, ns
);
202 spin_unlock_irqrestore(&cpts
->lock
, flags
);
207 static int cpts_ptp_enable(struct ptp_clock_info
*ptp
,
208 struct ptp_clock_request
*rq
, int on
)
213 static struct ptp_clock_info cpts_info
= {
214 .owner
= THIS_MODULE
,
215 .name
= "CTPS timer",
220 .adjfreq
= cpts_ptp_adjfreq
,
221 .adjtime
= cpts_ptp_adjtime
,
222 .gettime
= cpts_ptp_gettime
,
223 .settime
= cpts_ptp_settime
,
224 .enable
= cpts_ptp_enable
,
227 static void cpts_overflow_check(struct work_struct
*work
)
230 struct cpts
*cpts
= container_of(work
, struct cpts
, overflow_work
.work
);
232 cpts_write32(cpts
, CPTS_EN
, control
);
233 cpts_write32(cpts
, TS_PEND_EN
, int_enable
);
234 cpts_ptp_gettime(&cpts
->info
, &ts
);
235 pr_debug("cpts overflow check at %ld.%09lu\n", ts
.tv_sec
, ts
.tv_nsec
);
236 schedule_delayed_work(&cpts
->overflow_work
, CPTS_OVERFLOW_PERIOD
);
239 #define CPTS_REF_CLOCK_NAME "cpsw_cpts_rft_clk"
241 static void cpts_clk_init(struct cpts
*cpts
)
243 cpts
->refclk
= clk_get(NULL
, CPTS_REF_CLOCK_NAME
);
244 if (IS_ERR(cpts
->refclk
)) {
245 pr_err("Failed to clk_get %s\n", CPTS_REF_CLOCK_NAME
);
249 clk_prepare_enable(cpts
->refclk
);
252 static void cpts_clk_release(struct cpts
*cpts
)
254 clk_disable(cpts
->refclk
);
255 clk_put(cpts
->refclk
);
258 static int cpts_match(struct sk_buff
*skb
, unsigned int ptp_class
,
259 u16 ts_seqid
, u8 ts_msgtype
)
263 u8
*msgtype
, *data
= skb
->data
;
266 case PTP_CLASS_V1_IPV4
:
267 case PTP_CLASS_V2_IPV4
:
268 offset
= ETH_HLEN
+ IPV4_HLEN(data
) + UDP_HLEN
;
270 case PTP_CLASS_V1_IPV6
:
271 case PTP_CLASS_V2_IPV6
:
274 case PTP_CLASS_V2_L2
:
277 case PTP_CLASS_V2_VLAN
:
278 offset
= ETH_HLEN
+ VLAN_HLEN
;
284 if (skb
->len
+ ETH_HLEN
< offset
+ OFF_PTP_SEQUENCE_ID
+ sizeof(*seqid
))
287 if (unlikely(ptp_class
& PTP_CLASS_V1
))
288 msgtype
= data
+ offset
+ OFF_PTP_CONTROL
;
290 msgtype
= data
+ offset
;
292 seqid
= (u16
*)(data
+ offset
+ OFF_PTP_SEQUENCE_ID
);
294 return (ts_msgtype
== (*msgtype
& 0xf) && ts_seqid
== ntohs(*seqid
));
297 static u64
cpts_find_ts(struct cpts
*cpts
, struct sk_buff
*skb
, int ev_type
)
300 struct cpts_event
*event
;
301 struct list_head
*this, *next
;
302 unsigned int class = ptp_classify_raw(skb
);
307 if (class == PTP_CLASS_NONE
)
310 spin_lock_irqsave(&cpts
->lock
, flags
);
311 cpts_fifo_read(cpts
, CPTS_EV_PUSH
);
312 list_for_each_safe(this, next
, &cpts
->events
) {
313 event
= list_entry(this, struct cpts_event
, list
);
314 if (event_expired(event
)) {
315 list_del_init(&event
->list
);
316 list_add(&event
->list
, &cpts
->pool
);
319 mtype
= (event
->high
>> MESSAGE_TYPE_SHIFT
) & MESSAGE_TYPE_MASK
;
320 seqid
= (event
->high
>> SEQUENCE_ID_SHIFT
) & SEQUENCE_ID_MASK
;
321 if (ev_type
== event_type(event
) &&
322 cpts_match(skb
, class, seqid
, mtype
)) {
323 ns
= timecounter_cyc2time(&cpts
->tc
, event
->low
);
324 list_del_init(&event
->list
);
325 list_add(&event
->list
, &cpts
->pool
);
329 spin_unlock_irqrestore(&cpts
->lock
, flags
);
334 void cpts_rx_timestamp(struct cpts
*cpts
, struct sk_buff
*skb
)
337 struct skb_shared_hwtstamps
*ssh
;
339 if (!cpts
->rx_enable
)
341 ns
= cpts_find_ts(cpts
, skb
, CPTS_EV_RX
);
344 ssh
= skb_hwtstamps(skb
);
345 memset(ssh
, 0, sizeof(*ssh
));
346 ssh
->hwtstamp
= ns_to_ktime(ns
);
349 void cpts_tx_timestamp(struct cpts
*cpts
, struct sk_buff
*skb
)
352 struct skb_shared_hwtstamps ssh
;
354 if (!(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
))
356 ns
= cpts_find_ts(cpts
, skb
, CPTS_EV_TX
);
359 memset(&ssh
, 0, sizeof(ssh
));
360 ssh
.hwtstamp
= ns_to_ktime(ns
);
361 skb_tstamp_tx(skb
, &ssh
);
364 #endif /*CONFIG_TI_CPTS*/
366 int cpts_register(struct device
*dev
, struct cpts
*cpts
,
369 #ifdef CONFIG_TI_CPTS
373 cpts
->info
= cpts_info
;
374 cpts
->clock
= ptp_clock_register(&cpts
->info
, dev
);
375 if (IS_ERR(cpts
->clock
)) {
376 err
= PTR_ERR(cpts
->clock
);
380 spin_lock_init(&cpts
->lock
);
382 cpts
->cc
.read
= cpts_systim_read
;
383 cpts
->cc
.mask
= CLOCKSOURCE_MASK(32);
384 cpts
->cc_mult
= mult
;
385 cpts
->cc
.mult
= mult
;
386 cpts
->cc
.shift
= shift
;
388 INIT_LIST_HEAD(&cpts
->events
);
389 INIT_LIST_HEAD(&cpts
->pool
);
390 for (i
= 0; i
< CPTS_MAX_EVENTS
; i
++)
391 list_add(&cpts
->pool_data
[i
].list
, &cpts
->pool
);
394 cpts_write32(cpts
, CPTS_EN
, control
);
395 cpts_write32(cpts
, TS_PEND_EN
, int_enable
);
397 spin_lock_irqsave(&cpts
->lock
, flags
);
398 timecounter_init(&cpts
->tc
, &cpts
->cc
, ktime_to_ns(ktime_get_real()));
399 spin_unlock_irqrestore(&cpts
->lock
, flags
);
401 INIT_DELAYED_WORK(&cpts
->overflow_work
, cpts_overflow_check
);
402 schedule_delayed_work(&cpts
->overflow_work
, CPTS_OVERFLOW_PERIOD
);
404 cpts
->phc_index
= ptp_clock_index(cpts
->clock
);
409 void cpts_unregister(struct cpts
*cpts
)
411 #ifdef CONFIG_TI_CPTS
413 ptp_clock_unregister(cpts
->clock
);
414 cancel_delayed_work_sync(&cpts
->overflow_work
);
417 cpts_clk_release(cpts
);