]>
Commit | Line | Data |
---|---|---|
b711f687 JN |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Intel Quadrature Encoder Peripheral driver | |
4 | * | |
5 | * Copyright (C) 2019-2021 Intel Corporation | |
6 | * | |
7 | * Author: Felipe Balbi (Intel) | |
8 | * Author: Jarkko Nikula <jarkko.nikula@linux.intel.com> | |
9 | * Author: Raymond Tan <raymond.tan@intel.com> | |
10 | */ | |
b711f687 JN |
11 | #include <linux/counter.h> |
12 | #include <linux/kernel.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/mutex.h> | |
15 | #include <linux/pci.h> | |
16 | #include <linux/pm_runtime.h> | |
17 | ||
18 | #define INTEL_QEPCON 0x00 | |
19 | #define INTEL_QEPFLT 0x04 | |
20 | #define INTEL_QEPCOUNT 0x08 | |
21 | #define INTEL_QEPMAX 0x0c | |
22 | #define INTEL_QEPWDT 0x10 | |
23 | #define INTEL_QEPCAPDIV 0x14 | |
24 | #define INTEL_QEPCNTR 0x18 | |
25 | #define INTEL_QEPCAPBUF 0x1c | |
26 | #define INTEL_QEPINT_STAT 0x20 | |
27 | #define INTEL_QEPINT_MASK 0x24 | |
28 | ||
29 | /* QEPCON */ | |
30 | #define INTEL_QEPCON_EN BIT(0) | |
31 | #define INTEL_QEPCON_FLT_EN BIT(1) | |
32 | #define INTEL_QEPCON_EDGE_A BIT(2) | |
33 | #define INTEL_QEPCON_EDGE_B BIT(3) | |
34 | #define INTEL_QEPCON_EDGE_INDX BIT(4) | |
35 | #define INTEL_QEPCON_SWPAB BIT(5) | |
36 | #define INTEL_QEPCON_OP_MODE BIT(6) | |
37 | #define INTEL_QEPCON_PH_ERR BIT(7) | |
38 | #define INTEL_QEPCON_COUNT_RST_MODE BIT(8) | |
39 | #define INTEL_QEPCON_INDX_GATING_MASK GENMASK(10, 9) | |
40 | #define INTEL_QEPCON_INDX_GATING(n) (((n) & 3) << 9) | |
41 | #define INTEL_QEPCON_INDX_PAL_PBL INTEL_QEPCON_INDX_GATING(0) | |
42 | #define INTEL_QEPCON_INDX_PAL_PBH INTEL_QEPCON_INDX_GATING(1) | |
43 | #define INTEL_QEPCON_INDX_PAH_PBL INTEL_QEPCON_INDX_GATING(2) | |
44 | #define INTEL_QEPCON_INDX_PAH_PBH INTEL_QEPCON_INDX_GATING(3) | |
45 | #define INTEL_QEPCON_CAP_MODE BIT(11) | |
46 | #define INTEL_QEPCON_FIFO_THRE_MASK GENMASK(14, 12) | |
47 | #define INTEL_QEPCON_FIFO_THRE(n) ((((n) - 1) & 7) << 12) | |
48 | #define INTEL_QEPCON_FIFO_EMPTY BIT(15) | |
49 | ||
50 | /* QEPFLT */ | |
51 | #define INTEL_QEPFLT_MAX_COUNT(n) ((n) & 0x1fffff) | |
52 | ||
53 | /* QEPINT */ | |
54 | #define INTEL_QEPINT_FIFOCRIT BIT(5) | |
55 | #define INTEL_QEPINT_FIFOENTRY BIT(4) | |
56 | #define INTEL_QEPINT_QEPDIR BIT(3) | |
57 | #define INTEL_QEPINT_QEPRST_UP BIT(2) | |
58 | #define INTEL_QEPINT_QEPRST_DOWN BIT(1) | |
59 | #define INTEL_QEPINT_WDT BIT(0) | |
60 | ||
61 | #define INTEL_QEPINT_MASK_ALL GENMASK(5, 0) | |
62 | ||
63 | #define INTEL_QEP_CLK_PERIOD_NS 10 | |
64 | ||
65 | #define INTEL_QEP_COUNTER_EXT_RW(_name) \ | |
66 | { \ | |
67 | .name = #_name, \ | |
68 | .read = _name##_read, \ | |
69 | .write = _name##_write, \ | |
70 | } | |
71 | ||
72 | struct intel_qep { | |
73 | struct counter_device counter; | |
74 | struct mutex lock; | |
75 | struct device *dev; | |
76 | void __iomem *regs; | |
77 | bool enabled; | |
78 | /* Context save registers */ | |
79 | u32 qepcon; | |
80 | u32 qepflt; | |
81 | u32 qepmax; | |
82 | }; | |
83 | ||
84 | static inline u32 intel_qep_readl(struct intel_qep *qep, u32 offset) | |
85 | { | |
86 | return readl(qep->regs + offset); | |
87 | } | |
88 | ||
89 | static inline void intel_qep_writel(struct intel_qep *qep, | |
90 | u32 offset, u32 value) | |
91 | { | |
92 | writel(value, qep->regs + offset); | |
93 | } | |
94 | ||
95 | static void intel_qep_init(struct intel_qep *qep) | |
96 | { | |
97 | u32 reg; | |
98 | ||
99 | reg = intel_qep_readl(qep, INTEL_QEPCON); | |
100 | reg &= ~INTEL_QEPCON_EN; | |
101 | intel_qep_writel(qep, INTEL_QEPCON, reg); | |
102 | qep->enabled = false; | |
103 | /* | |
104 | * Make sure peripheral is disabled by flushing the write with | |
105 | * a dummy read | |
106 | */ | |
107 | reg = intel_qep_readl(qep, INTEL_QEPCON); | |
108 | ||
109 | reg &= ~(INTEL_QEPCON_OP_MODE | INTEL_QEPCON_FLT_EN); | |
110 | reg |= INTEL_QEPCON_EDGE_A | INTEL_QEPCON_EDGE_B | | |
111 | INTEL_QEPCON_EDGE_INDX | INTEL_QEPCON_COUNT_RST_MODE; | |
112 | intel_qep_writel(qep, INTEL_QEPCON, reg); | |
113 | intel_qep_writel(qep, INTEL_QEPINT_MASK, INTEL_QEPINT_MASK_ALL); | |
114 | } | |
115 | ||
116 | static int intel_qep_count_read(struct counter_device *counter, | |
117 | struct counter_count *count, | |
118 | unsigned long *val) | |
119 | { | |
120 | struct intel_qep *const qep = counter->priv; | |
121 | ||
122 | pm_runtime_get_sync(qep->dev); | |
123 | *val = intel_qep_readl(qep, INTEL_QEPCOUNT); | |
124 | pm_runtime_put(qep->dev); | |
125 | ||
126 | return 0; | |
127 | } | |
128 | ||
394a0150 WBG |
129 | static const enum counter_function intel_qep_count_functions[] = { |
130 | COUNTER_FUNCTION_QUADRATURE_X4, | |
b711f687 JN |
131 | }; |
132 | ||
133 | static int intel_qep_function_get(struct counter_device *counter, | |
134 | struct counter_count *count, | |
135 | size_t *function) | |
136 | { | |
137 | *function = 0; | |
138 | ||
139 | return 0; | |
140 | } | |
141 | ||
142 | static const enum counter_synapse_action intel_qep_synapse_actions[] = { | |
143 | COUNTER_SYNAPSE_ACTION_BOTH_EDGES, | |
144 | }; | |
145 | ||
146 | static int intel_qep_action_get(struct counter_device *counter, | |
147 | struct counter_count *count, | |
148 | struct counter_synapse *synapse, | |
149 | size_t *action) | |
150 | { | |
151 | *action = 0; | |
152 | return 0; | |
153 | } | |
154 | ||
155 | static const struct counter_ops intel_qep_counter_ops = { | |
156 | .count_read = intel_qep_count_read, | |
157 | .function_get = intel_qep_function_get, | |
158 | .action_get = intel_qep_action_get, | |
159 | }; | |
160 | ||
161 | #define INTEL_QEP_SIGNAL(_id, _name) { \ | |
162 | .id = (_id), \ | |
163 | .name = (_name), \ | |
164 | } | |
165 | ||
166 | static struct counter_signal intel_qep_signals[] = { | |
167 | INTEL_QEP_SIGNAL(0, "Phase A"), | |
168 | INTEL_QEP_SIGNAL(1, "Phase B"), | |
169 | INTEL_QEP_SIGNAL(2, "Index"), | |
170 | }; | |
171 | ||
172 | #define INTEL_QEP_SYNAPSE(_signal_id) { \ | |
173 | .actions_list = intel_qep_synapse_actions, \ | |
174 | .num_actions = ARRAY_SIZE(intel_qep_synapse_actions), \ | |
175 | .signal = &intel_qep_signals[(_signal_id)], \ | |
176 | } | |
177 | ||
178 | static struct counter_synapse intel_qep_count_synapses[] = { | |
179 | INTEL_QEP_SYNAPSE(0), | |
180 | INTEL_QEP_SYNAPSE(1), | |
181 | INTEL_QEP_SYNAPSE(2), | |
182 | }; | |
183 | ||
184 | static ssize_t ceiling_read(struct counter_device *counter, | |
185 | struct counter_count *count, | |
186 | void *priv, char *buf) | |
187 | { | |
188 | struct intel_qep *qep = counter->priv; | |
189 | u32 reg; | |
190 | ||
191 | pm_runtime_get_sync(qep->dev); | |
192 | reg = intel_qep_readl(qep, INTEL_QEPMAX); | |
193 | pm_runtime_put(qep->dev); | |
194 | ||
195 | return sysfs_emit(buf, "%u\n", reg); | |
196 | } | |
197 | ||
198 | static ssize_t ceiling_write(struct counter_device *counter, | |
199 | struct counter_count *count, | |
200 | void *priv, const char *buf, size_t len) | |
201 | { | |
202 | struct intel_qep *qep = counter->priv; | |
203 | u32 max; | |
204 | int ret; | |
205 | ||
206 | ret = kstrtou32(buf, 0, &max); | |
207 | if (ret < 0) | |
208 | return ret; | |
209 | ||
210 | mutex_lock(&qep->lock); | |
211 | if (qep->enabled) { | |
212 | ret = -EBUSY; | |
213 | goto out; | |
214 | } | |
215 | ||
216 | pm_runtime_get_sync(qep->dev); | |
217 | intel_qep_writel(qep, INTEL_QEPMAX, max); | |
218 | pm_runtime_put(qep->dev); | |
219 | ret = len; | |
220 | ||
221 | out: | |
222 | mutex_unlock(&qep->lock); | |
223 | return ret; | |
224 | } | |
225 | ||
226 | static ssize_t enable_read(struct counter_device *counter, | |
227 | struct counter_count *count, | |
228 | void *priv, char *buf) | |
229 | { | |
230 | struct intel_qep *qep = counter->priv; | |
231 | ||
232 | return sysfs_emit(buf, "%u\n", qep->enabled); | |
233 | } | |
234 | ||
235 | static ssize_t enable_write(struct counter_device *counter, | |
236 | struct counter_count *count, | |
237 | void *priv, const char *buf, size_t len) | |
238 | { | |
239 | struct intel_qep *qep = counter->priv; | |
240 | u32 reg; | |
241 | bool val, changed; | |
242 | int ret; | |
243 | ||
244 | ret = kstrtobool(buf, &val); | |
245 | if (ret) | |
246 | return ret; | |
247 | ||
248 | mutex_lock(&qep->lock); | |
249 | changed = val ^ qep->enabled; | |
250 | if (!changed) | |
251 | goto out; | |
252 | ||
253 | pm_runtime_get_sync(qep->dev); | |
254 | reg = intel_qep_readl(qep, INTEL_QEPCON); | |
255 | if (val) { | |
256 | /* Enable peripheral and keep runtime PM always on */ | |
257 | reg |= INTEL_QEPCON_EN; | |
258 | pm_runtime_get_noresume(qep->dev); | |
259 | } else { | |
260 | /* Let runtime PM be idle and disable peripheral */ | |
261 | pm_runtime_put_noidle(qep->dev); | |
262 | reg &= ~INTEL_QEPCON_EN; | |
263 | } | |
264 | intel_qep_writel(qep, INTEL_QEPCON, reg); | |
265 | pm_runtime_put(qep->dev); | |
266 | qep->enabled = val; | |
267 | ||
268 | out: | |
269 | mutex_unlock(&qep->lock); | |
270 | return len; | |
271 | } | |
272 | ||
273 | static ssize_t spike_filter_ns_read(struct counter_device *counter, | |
274 | struct counter_count *count, | |
275 | void *priv, char *buf) | |
276 | { | |
277 | struct intel_qep *qep = counter->priv; | |
278 | u32 reg; | |
279 | ||
280 | pm_runtime_get_sync(qep->dev); | |
281 | reg = intel_qep_readl(qep, INTEL_QEPCON); | |
282 | if (!(reg & INTEL_QEPCON_FLT_EN)) { | |
283 | pm_runtime_put(qep->dev); | |
284 | return sysfs_emit(buf, "0\n"); | |
285 | } | |
286 | reg = INTEL_QEPFLT_MAX_COUNT(intel_qep_readl(qep, INTEL_QEPFLT)); | |
287 | pm_runtime_put(qep->dev); | |
288 | ||
289 | return sysfs_emit(buf, "%u\n", (reg + 2) * INTEL_QEP_CLK_PERIOD_NS); | |
290 | } | |
291 | ||
292 | static ssize_t spike_filter_ns_write(struct counter_device *counter, | |
293 | struct counter_count *count, | |
294 | void *priv, const char *buf, size_t len) | |
295 | { | |
296 | struct intel_qep *qep = counter->priv; | |
297 | u32 reg, length; | |
298 | bool enable; | |
299 | int ret; | |
300 | ||
301 | ret = kstrtou32(buf, 0, &length); | |
302 | if (ret < 0) | |
303 | return ret; | |
304 | ||
305 | /* | |
306 | * Spike filter length is (MAX_COUNT + 2) clock periods. | |
307 | * Disable filter when userspace writes 0, enable for valid | |
308 | * nanoseconds values and error out otherwise. | |
309 | */ | |
310 | length /= INTEL_QEP_CLK_PERIOD_NS; | |
311 | if (length == 0) { | |
312 | enable = false; | |
313 | length = 0; | |
314 | } else if (length >= 2) { | |
315 | enable = true; | |
316 | length -= 2; | |
317 | } else { | |
318 | return -EINVAL; | |
319 | } | |
320 | ||
321 | if (length > INTEL_QEPFLT_MAX_COUNT(length)) | |
e2ff3198 | 322 | return -ERANGE; |
b711f687 JN |
323 | |
324 | mutex_lock(&qep->lock); | |
325 | if (qep->enabled) { | |
326 | ret = -EBUSY; | |
327 | goto out; | |
328 | } | |
329 | ||
330 | pm_runtime_get_sync(qep->dev); | |
331 | reg = intel_qep_readl(qep, INTEL_QEPCON); | |
332 | if (enable) | |
333 | reg |= INTEL_QEPCON_FLT_EN; | |
334 | else | |
335 | reg &= ~INTEL_QEPCON_FLT_EN; | |
336 | intel_qep_writel(qep, INTEL_QEPFLT, length); | |
337 | intel_qep_writel(qep, INTEL_QEPCON, reg); | |
338 | pm_runtime_put(qep->dev); | |
339 | ret = len; | |
340 | ||
341 | out: | |
342 | mutex_unlock(&qep->lock); | |
343 | return ret; | |
344 | } | |
345 | ||
346 | static ssize_t preset_enable_read(struct counter_device *counter, | |
347 | struct counter_count *count, | |
348 | void *priv, char *buf) | |
349 | { | |
350 | struct intel_qep *qep = counter->priv; | |
351 | u32 reg; | |
352 | ||
353 | pm_runtime_get_sync(qep->dev); | |
354 | reg = intel_qep_readl(qep, INTEL_QEPCON); | |
355 | pm_runtime_put(qep->dev); | |
356 | return sysfs_emit(buf, "%u\n", !(reg & INTEL_QEPCON_COUNT_RST_MODE)); | |
357 | } | |
358 | ||
359 | static ssize_t preset_enable_write(struct counter_device *counter, | |
360 | struct counter_count *count, | |
361 | void *priv, const char *buf, size_t len) | |
362 | { | |
363 | struct intel_qep *qep = counter->priv; | |
364 | u32 reg; | |
365 | bool val; | |
366 | int ret; | |
367 | ||
368 | ret = kstrtobool(buf, &val); | |
369 | if (ret) | |
370 | return ret; | |
371 | ||
372 | mutex_lock(&qep->lock); | |
373 | if (qep->enabled) { | |
374 | ret = -EBUSY; | |
375 | goto out; | |
376 | } | |
377 | ||
378 | pm_runtime_get_sync(qep->dev); | |
379 | reg = intel_qep_readl(qep, INTEL_QEPCON); | |
380 | if (val) | |
381 | reg &= ~INTEL_QEPCON_COUNT_RST_MODE; | |
382 | else | |
383 | reg |= INTEL_QEPCON_COUNT_RST_MODE; | |
384 | ||
385 | intel_qep_writel(qep, INTEL_QEPCON, reg); | |
386 | pm_runtime_put(qep->dev); | |
387 | ret = len; | |
388 | ||
389 | out: | |
390 | mutex_unlock(&qep->lock); | |
391 | ||
392 | return ret; | |
393 | } | |
394 | ||
395 | static const struct counter_count_ext intel_qep_count_ext[] = { | |
396 | INTEL_QEP_COUNTER_EXT_RW(ceiling), | |
397 | INTEL_QEP_COUNTER_EXT_RW(enable), | |
398 | INTEL_QEP_COUNTER_EXT_RW(spike_filter_ns), | |
399 | INTEL_QEP_COUNTER_EXT_RW(preset_enable) | |
400 | }; | |
401 | ||
402 | static struct counter_count intel_qep_counter_count[] = { | |
403 | { | |
404 | .id = 0, | |
405 | .name = "Channel 1 Count", | |
406 | .functions_list = intel_qep_count_functions, | |
407 | .num_functions = ARRAY_SIZE(intel_qep_count_functions), | |
408 | .synapses = intel_qep_count_synapses, | |
409 | .num_synapses = ARRAY_SIZE(intel_qep_count_synapses), | |
410 | .ext = intel_qep_count_ext, | |
411 | .num_ext = ARRAY_SIZE(intel_qep_count_ext), | |
412 | }, | |
413 | }; | |
414 | ||
415 | static int intel_qep_probe(struct pci_dev *pci, const struct pci_device_id *id) | |
416 | { | |
417 | struct intel_qep *qep; | |
418 | struct device *dev = &pci->dev; | |
419 | void __iomem *regs; | |
420 | int ret; | |
421 | ||
422 | qep = devm_kzalloc(dev, sizeof(*qep), GFP_KERNEL); | |
423 | if (!qep) | |
424 | return -ENOMEM; | |
425 | ||
426 | ret = pcim_enable_device(pci); | |
427 | if (ret) | |
428 | return ret; | |
429 | ||
430 | pci_set_master(pci); | |
431 | ||
432 | ret = pcim_iomap_regions(pci, BIT(0), pci_name(pci)); | |
433 | if (ret) | |
434 | return ret; | |
435 | ||
436 | regs = pcim_iomap_table(pci)[0]; | |
437 | if (!regs) | |
438 | return -ENOMEM; | |
439 | ||
440 | qep->dev = dev; | |
441 | qep->regs = regs; | |
442 | mutex_init(&qep->lock); | |
443 | ||
444 | intel_qep_init(qep); | |
445 | pci_set_drvdata(pci, qep); | |
446 | ||
447 | qep->counter.name = pci_name(pci); | |
448 | qep->counter.parent = dev; | |
449 | qep->counter.ops = &intel_qep_counter_ops; | |
450 | qep->counter.counts = intel_qep_counter_count; | |
451 | qep->counter.num_counts = ARRAY_SIZE(intel_qep_counter_count); | |
452 | qep->counter.signals = intel_qep_signals; | |
453 | qep->counter.num_signals = ARRAY_SIZE(intel_qep_signals); | |
454 | qep->counter.priv = qep; | |
455 | qep->enabled = false; | |
456 | ||
457 | pm_runtime_put(dev); | |
458 | pm_runtime_allow(dev); | |
459 | ||
460 | return devm_counter_register(&pci->dev, &qep->counter); | |
461 | } | |
462 | ||
463 | static void intel_qep_remove(struct pci_dev *pci) | |
464 | { | |
465 | struct intel_qep *qep = pci_get_drvdata(pci); | |
466 | struct device *dev = &pci->dev; | |
467 | ||
468 | pm_runtime_forbid(dev); | |
469 | if (!qep->enabled) | |
470 | pm_runtime_get(dev); | |
471 | ||
472 | intel_qep_writel(qep, INTEL_QEPCON, 0); | |
473 | } | |
474 | ||
ac3bd9d6 | 475 | static int __maybe_unused intel_qep_suspend(struct device *dev) |
b711f687 | 476 | { |
93466212 | 477 | struct pci_dev *pdev = to_pci_dev(dev); |
b711f687 JN |
478 | struct intel_qep *qep = pci_get_drvdata(pdev); |
479 | ||
480 | qep->qepcon = intel_qep_readl(qep, INTEL_QEPCON); | |
481 | qep->qepflt = intel_qep_readl(qep, INTEL_QEPFLT); | |
482 | qep->qepmax = intel_qep_readl(qep, INTEL_QEPMAX); | |
483 | ||
484 | return 0; | |
485 | } | |
486 | ||
ac3bd9d6 | 487 | static int __maybe_unused intel_qep_resume(struct device *dev) |
b711f687 | 488 | { |
93466212 | 489 | struct pci_dev *pdev = to_pci_dev(dev); |
b711f687 JN |
490 | struct intel_qep *qep = pci_get_drvdata(pdev); |
491 | ||
492 | /* | |
493 | * Make sure peripheral is disabled when restoring registers and | |
494 | * control register bits that are writable only when the peripheral | |
495 | * is disabled | |
496 | */ | |
497 | intel_qep_writel(qep, INTEL_QEPCON, 0); | |
498 | intel_qep_readl(qep, INTEL_QEPCON); | |
499 | ||
500 | intel_qep_writel(qep, INTEL_QEPFLT, qep->qepflt); | |
501 | intel_qep_writel(qep, INTEL_QEPMAX, qep->qepmax); | |
502 | intel_qep_writel(qep, INTEL_QEPINT_MASK, INTEL_QEPINT_MASK_ALL); | |
503 | ||
504 | /* Restore all other control register bits except enable status */ | |
505 | intel_qep_writel(qep, INTEL_QEPCON, qep->qepcon & ~INTEL_QEPCON_EN); | |
506 | intel_qep_readl(qep, INTEL_QEPCON); | |
507 | ||
508 | /* Restore enable status */ | |
509 | intel_qep_writel(qep, INTEL_QEPCON, qep->qepcon); | |
510 | ||
511 | return 0; | |
512 | } | |
b711f687 JN |
513 | |
514 | static UNIVERSAL_DEV_PM_OPS(intel_qep_pm_ops, | |
515 | intel_qep_suspend, intel_qep_resume, NULL); | |
516 | ||
517 | static const struct pci_device_id intel_qep_id_table[] = { | |
518 | /* EHL */ | |
519 | { PCI_VDEVICE(INTEL, 0x4bc3), }, | |
520 | { PCI_VDEVICE(INTEL, 0x4b81), }, | |
521 | { PCI_VDEVICE(INTEL, 0x4b82), }, | |
522 | { PCI_VDEVICE(INTEL, 0x4b83), }, | |
523 | { } /* Terminating Entry */ | |
524 | }; | |
525 | MODULE_DEVICE_TABLE(pci, intel_qep_id_table); | |
526 | ||
527 | static struct pci_driver intel_qep_driver = { | |
528 | .name = "intel-qep", | |
529 | .id_table = intel_qep_id_table, | |
530 | .probe = intel_qep_probe, | |
531 | .remove = intel_qep_remove, | |
532 | .driver = { | |
533 | .pm = &intel_qep_pm_ops, | |
534 | } | |
535 | }; | |
536 | ||
537 | module_pci_driver(intel_qep_driver); | |
538 | ||
539 | MODULE_AUTHOR("Felipe Balbi (Intel)"); | |
540 | MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>"); | |
541 | MODULE_AUTHOR("Raymond Tan <raymond.tan@intel.com>"); | |
542 | MODULE_LICENSE("GPL"); | |
543 | MODULE_DESCRIPTION("Intel Quadrature Encoder Peripheral driver"); |