]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/input/rmi4/rmi_spi.c
Input: synaptics-rmi4 - add SPI transport driver
[mirror_ubuntu-zesty-kernel.git] / drivers / input / rmi4 / rmi_spi.c
CommitLineData
8d99758d
AD
1/*
2 * Copyright (c) 2011-2016 Synaptics Incorporated
3 * Copyright (c) 2011 Unixphere
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/rmi.h>
13#include <linux/slab.h>
14#include <linux/spi/spi.h>
15#include <linux/irq.h>
16#include "rmi_driver.h"
17
18#define RMI_SPI_DEFAULT_XFER_BUF_SIZE 64
19
20#define RMI_PAGE_SELECT_REGISTER 0x00FF
21#define RMI_SPI_PAGE(addr) (((addr) >> 8) & 0x80)
22#define RMI_SPI_XFER_SIZE_LIMIT 255
23
24#define BUFFER_SIZE_INCREMENT 32
25
26enum rmi_spi_op {
27 RMI_SPI_WRITE = 0,
28 RMI_SPI_READ,
29 RMI_SPI_V2_READ_UNIFIED,
30 RMI_SPI_V2_READ_SPLIT,
31 RMI_SPI_V2_WRITE,
32};
33
34struct rmi_spi_cmd {
35 enum rmi_spi_op op;
36 u16 addr;
37};
38
39struct rmi_spi_xport {
40 struct rmi_transport_dev xport;
41 struct spi_device *spi;
42
43 struct mutex page_mutex;
44 int page;
45
46 int irq;
47
48 u8 *rx_buf;
49 u8 *tx_buf;
50 int xfer_buf_size;
51
52 struct spi_transfer *rx_xfers;
53 struct spi_transfer *tx_xfers;
54 int rx_xfer_count;
55 int tx_xfer_count;
56};
57
58static int rmi_spi_manage_pools(struct rmi_spi_xport *rmi_spi, int len)
59{
60 struct spi_device *spi = rmi_spi->spi;
61 int buf_size = rmi_spi->xfer_buf_size
62 ? rmi_spi->xfer_buf_size : RMI_SPI_DEFAULT_XFER_BUF_SIZE;
63 struct spi_transfer *xfer_buf;
64 void *buf;
65 void *tmp;
66
67 while (buf_size < len)
68 buf_size *= 2;
69
70 if (buf_size > RMI_SPI_XFER_SIZE_LIMIT)
71 buf_size = RMI_SPI_XFER_SIZE_LIMIT;
72
73 tmp = rmi_spi->rx_buf;
74 buf = devm_kzalloc(&spi->dev, buf_size * 2,
75 GFP_KERNEL | GFP_DMA);
76 if (!buf)
77 return -ENOMEM;
78
79 rmi_spi->rx_buf = buf;
80 rmi_spi->tx_buf = &rmi_spi->rx_buf[buf_size];
81 rmi_spi->xfer_buf_size = buf_size;
82
83 if (tmp)
84 devm_kfree(&spi->dev, tmp);
85
86 if (rmi_spi->xport.pdata.spi_data.read_delay_us)
87 rmi_spi->rx_xfer_count = buf_size;
88 else
89 rmi_spi->rx_xfer_count = 1;
90
91 if (rmi_spi->xport.pdata.spi_data.write_delay_us)
92 rmi_spi->tx_xfer_count = buf_size;
93 else
94 rmi_spi->tx_xfer_count = 1;
95
96 /*
97 * Allocate a pool of spi_transfer buffers for devices which need
98 * per byte delays.
99 */
100 tmp = rmi_spi->rx_xfers;
101 xfer_buf = devm_kzalloc(&spi->dev,
102 (rmi_spi->rx_xfer_count + rmi_spi->tx_xfer_count)
103 * sizeof(struct spi_transfer), GFP_KERNEL);
104 if (!xfer_buf)
105 return -ENOMEM;
106
107 rmi_spi->rx_xfers = xfer_buf;
108 rmi_spi->tx_xfers = &xfer_buf[rmi_spi->rx_xfer_count];
109
110 if (tmp)
111 devm_kfree(&spi->dev, tmp);
112
113 return 0;
114}
115
116static int rmi_spi_xfer(struct rmi_spi_xport *rmi_spi,
117 const struct rmi_spi_cmd *cmd, const u8 *tx_buf,
118 int tx_len, u8 *rx_buf, int rx_len)
119{
120 struct spi_device *spi = rmi_spi->spi;
121 struct rmi_device_platform_data_spi *spi_data =
122 &rmi_spi->xport.pdata.spi_data;
123 struct spi_message msg;
124 struct spi_transfer *xfer;
125 int ret = 0;
126 int len;
127 int cmd_len = 0;
128 int total_tx_len;
129 int i;
130 u16 addr = cmd->addr;
131
132 spi_message_init(&msg);
133
134 switch (cmd->op) {
135 case RMI_SPI_WRITE:
136 case RMI_SPI_READ:
137 cmd_len += 2;
138 break;
139 case RMI_SPI_V2_READ_UNIFIED:
140 case RMI_SPI_V2_READ_SPLIT:
141 case RMI_SPI_V2_WRITE:
142 cmd_len += 4;
143 break;
144 }
145
146 total_tx_len = cmd_len + tx_len;
147 len = max(total_tx_len, rx_len);
148
149 if (len > RMI_SPI_XFER_SIZE_LIMIT)
150 return -EINVAL;
151
152 if (rmi_spi->xfer_buf_size < len)
153 rmi_spi_manage_pools(rmi_spi, len);
154
155 if (addr == 0)
156 /*
157 * SPI needs an address. Use 0x7FF if we want to keep
158 * reading from the last position of the register pointer.
159 */
160 addr = 0x7FF;
161
162 switch (cmd->op) {
163 case RMI_SPI_WRITE:
164 rmi_spi->tx_buf[0] = (addr >> 8);
165 rmi_spi->tx_buf[1] = addr & 0xFF;
166 break;
167 case RMI_SPI_READ:
168 rmi_spi->tx_buf[0] = (addr >> 8) | 0x80;
169 rmi_spi->tx_buf[1] = addr & 0xFF;
170 break;
171 case RMI_SPI_V2_READ_UNIFIED:
172 break;
173 case RMI_SPI_V2_READ_SPLIT:
174 break;
175 case RMI_SPI_V2_WRITE:
176 rmi_spi->tx_buf[0] = 0x40;
177 rmi_spi->tx_buf[1] = (addr >> 8) & 0xFF;
178 rmi_spi->tx_buf[2] = addr & 0xFF;
179 rmi_spi->tx_buf[3] = tx_len;
180 break;
181 }
182
183 if (tx_buf)
184 memcpy(&rmi_spi->tx_buf[cmd_len], tx_buf, tx_len);
185
186 if (rmi_spi->tx_xfer_count > 1) {
187 for (i = 0; i < total_tx_len; i++) {
188 xfer = &rmi_spi->tx_xfers[i];
189 memset(xfer, 0, sizeof(struct spi_transfer));
190 xfer->tx_buf = &rmi_spi->tx_buf[i];
191 xfer->len = 1;
192 xfer->delay_usecs = spi_data->write_delay_us;
193 spi_message_add_tail(xfer, &msg);
194 }
195 } else {
196 xfer = rmi_spi->tx_xfers;
197 memset(xfer, 0, sizeof(struct spi_transfer));
198 xfer->tx_buf = rmi_spi->tx_buf;
199 xfer->len = total_tx_len;
200 spi_message_add_tail(xfer, &msg);
201 }
202
203 rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: cmd: %s tx_buf len: %d tx_buf: %*ph\n",
204 __func__, cmd->op == RMI_SPI_WRITE ? "WRITE" : "READ",
205 total_tx_len, total_tx_len, rmi_spi->tx_buf);
206
207 if (rx_buf) {
208 if (rmi_spi->rx_xfer_count > 1) {
209 for (i = 0; i < rx_len; i++) {
210 xfer = &rmi_spi->rx_xfers[i];
211 memset(xfer, 0, sizeof(struct spi_transfer));
212 xfer->rx_buf = &rmi_spi->rx_buf[i];
213 xfer->len = 1;
214 xfer->delay_usecs = spi_data->read_delay_us;
215 spi_message_add_tail(xfer, &msg);
216 }
217 } else {
218 xfer = rmi_spi->rx_xfers;
219 memset(xfer, 0, sizeof(struct spi_transfer));
220 xfer->rx_buf = rmi_spi->rx_buf;
221 xfer->len = rx_len;
222 spi_message_add_tail(xfer, &msg);
223 }
224 }
225
226 ret = spi_sync(spi, &msg);
227 if (ret < 0) {
228 dev_err(&spi->dev, "spi xfer failed: %d\n", ret);
229 return ret;
230 }
231
232 if (rx_buf) {
233 memcpy(rx_buf, rmi_spi->rx_buf, rx_len);
234 rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: (%d) %*ph\n",
235 __func__, rx_len, rx_len, rx_buf);
236 }
237
238 return 0;
239}
240
241/*
242 * rmi_set_page - Set RMI page
243 * @xport: The pointer to the rmi_transport_dev struct
244 * @page: The new page address.
245 *
246 * RMI devices have 16-bit addressing, but some of the transport
247 * implementations (like SMBus) only have 8-bit addressing. So RMI implements
248 * a page address at 0xff of every page so we can reliable page addresses
249 * every 256 registers.
250 *
251 * The page_mutex lock must be held when this function is entered.
252 *
253 * Returns zero on success, non-zero on failure.
254 */
255static int rmi_set_page(struct rmi_spi_xport *rmi_spi, u8 page)
256{
257 struct rmi_spi_cmd cmd;
258 int ret;
259
260 cmd.op = RMI_SPI_WRITE;
261 cmd.addr = RMI_PAGE_SELECT_REGISTER;
262
263 ret = rmi_spi_xfer(rmi_spi, &cmd, &page, 1, NULL, 0);
264
265 if (ret)
266 rmi_spi->page = page;
267
268 return ret;
269}
270
271static int rmi_spi_write_block(struct rmi_transport_dev *xport, u16 addr,
272 const void *buf, size_t len)
273{
274 struct rmi_spi_xport *rmi_spi =
275 container_of(xport, struct rmi_spi_xport, xport);
276 struct rmi_spi_cmd cmd;
277 int ret;
278
279 mutex_lock(&rmi_spi->page_mutex);
280
281 if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
282 ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
283 if (ret)
284 goto exit;
285 }
286
287 cmd.op = RMI_SPI_WRITE;
288 cmd.addr = addr;
289
290 ret = rmi_spi_xfer(rmi_spi, &cmd, buf, len, NULL, 0);
291
292exit:
293 mutex_unlock(&rmi_spi->page_mutex);
294 return ret;
295}
296
297static int rmi_spi_read_block(struct rmi_transport_dev *xport, u16 addr,
298 void *buf, size_t len)
299{
300 struct rmi_spi_xport *rmi_spi =
301 container_of(xport, struct rmi_spi_xport, xport);
302 struct rmi_spi_cmd cmd;
303 int ret;
304
305 mutex_lock(&rmi_spi->page_mutex);
306
307 if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
308 ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
309 if (ret)
310 goto exit;
311 }
312
313 cmd.op = RMI_SPI_READ;
314 cmd.addr = addr;
315
316 ret = rmi_spi_xfer(rmi_spi, &cmd, NULL, 0, buf, len);
317
318exit:
319 mutex_unlock(&rmi_spi->page_mutex);
320 return ret;
321}
322
323static const struct rmi_transport_ops rmi_spi_ops = {
324 .write_block = rmi_spi_write_block,
325 .read_block = rmi_spi_read_block,
326};
327
328static irqreturn_t rmi_spi_irq(int irq, void *dev_id)
329{
330 struct rmi_spi_xport *rmi_spi = dev_id;
331 struct rmi_device *rmi_dev = rmi_spi->xport.rmi_dev;
332 int ret;
333
334 ret = rmi_process_interrupt_requests(rmi_dev);
335 if (ret)
336 rmi_dbg(RMI_DEBUG_XPORT, &rmi_dev->dev,
337 "Failed to process interrupt request: %d\n", ret);
338
339 return IRQ_HANDLED;
340}
341
342static int rmi_spi_init_irq(struct spi_device *spi)
343{
344 struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
345 int irq_flags = irqd_get_trigger_type(irq_get_irq_data(rmi_spi->irq));
346 int ret;
347
348 if (!irq_flags)
349 irq_flags = IRQF_TRIGGER_LOW;
350
351 ret = devm_request_threaded_irq(&spi->dev, rmi_spi->irq, NULL,
352 rmi_spi_irq, irq_flags | IRQF_ONESHOT,
353 dev_name(&spi->dev), rmi_spi);
354 if (ret < 0) {
355 dev_warn(&spi->dev, "Failed to register interrupt %d\n",
356 rmi_spi->irq);
357 return ret;
358 }
359
360 return 0;
361}
362
363static int rmi_spi_probe(struct spi_device *spi)
364{
365 struct rmi_spi_xport *rmi_spi;
366 struct rmi_device_platform_data *pdata;
367 struct rmi_device_platform_data *spi_pdata = spi->dev.platform_data;
368 int retval;
369
370 if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
371 return -EINVAL;
372
373 rmi_spi = devm_kzalloc(&spi->dev, sizeof(struct rmi_spi_xport),
374 GFP_KERNEL);
375 if (!rmi_spi)
376 return -ENOMEM;
377
378 pdata = &rmi_spi->xport.pdata;
379
380 if (spi_pdata)
381 *pdata = *spi_pdata;
382
383 if (pdata->spi_data.bits_per_word)
384 spi->bits_per_word = pdata->spi_data.bits_per_word;
385
386 if (pdata->spi_data.mode)
387 spi->mode = pdata->spi_data.mode;
388
389 retval = spi_setup(spi);
390 if (retval < 0) {
391 dev_err(&spi->dev, "spi_setup failed!\n");
392 return retval;
393 }
394
395 if (spi->irq > 0)
396 rmi_spi->irq = spi->irq;
397
398 rmi_spi->spi = spi;
399 mutex_init(&rmi_spi->page_mutex);
400
401 rmi_spi->xport.dev = &spi->dev;
402 rmi_spi->xport.proto_name = "spi";
403 rmi_spi->xport.ops = &rmi_spi_ops;
404
405 spi_set_drvdata(spi, rmi_spi);
406
407 retval = rmi_spi_manage_pools(rmi_spi, RMI_SPI_DEFAULT_XFER_BUF_SIZE);
408 if (retval)
409 return retval;
410
411 /*
412 * Setting the page to zero will (a) make sure the PSR is in a
413 * known state, and (b) make sure we can talk to the device.
414 */
415 retval = rmi_set_page(rmi_spi, 0);
416 if (retval) {
417 dev_err(&spi->dev, "Failed to set page select to 0.\n");
418 return retval;
419 }
420
421 retval = rmi_register_transport_device(&rmi_spi->xport);
422 if (retval) {
423 dev_err(&spi->dev, "failed to register transport.\n");
424 return retval;
425 }
426
427 retval = rmi_spi_init_irq(spi);
428 if (retval < 0)
429 return retval;
430
431 dev_info(&spi->dev, "registered RMI SPI driver\n");
432 return 0;
433}
434
435static int rmi_spi_remove(struct spi_device *spi)
436{
437 struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
438
439 rmi_unregister_transport_device(&rmi_spi->xport);
440
441 return 0;
442}
443
444#ifdef CONFIG_PM_SLEEP
445static int rmi_spi_suspend(struct device *dev)
446{
447 struct spi_device *spi = to_spi_device(dev);
448 struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
449 int ret;
450
451 ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev);
452 if (ret)
453 dev_warn(dev, "Failed to resume device: %d\n", ret);
454
455 disable_irq(rmi_spi->irq);
456 if (device_may_wakeup(&spi->dev)) {
457 ret = enable_irq_wake(rmi_spi->irq);
458 if (!ret)
459 dev_warn(dev, "Failed to enable irq for wake: %d\n",
460 ret);
461 }
462 return ret;
463}
464
465static int rmi_spi_resume(struct device *dev)
466{
467 struct spi_device *spi = to_spi_device(dev);
468 struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
469 int ret;
470
471 enable_irq(rmi_spi->irq);
472 if (device_may_wakeup(&spi->dev)) {
473 ret = disable_irq_wake(rmi_spi->irq);
474 if (!ret)
475 dev_warn(dev, "Failed to disable irq for wake: %d\n",
476 ret);
477 }
478
479 ret = rmi_driver_resume(rmi_spi->xport.rmi_dev);
480 if (ret)
481 dev_warn(dev, "Failed to resume device: %d\n", ret);
482
483 return ret;
484}
485#endif
486
487#ifdef CONFIG_PM
488static int rmi_spi_runtime_suspend(struct device *dev)
489{
490 struct spi_device *spi = to_spi_device(dev);
491 struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
492 int ret;
493
494 ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev);
495 if (ret)
496 dev_warn(dev, "Failed to resume device: %d\n", ret);
497
498 disable_irq(rmi_spi->irq);
499
500 return 0;
501}
502
503static int rmi_spi_runtime_resume(struct device *dev)
504{
505 struct spi_device *spi = to_spi_device(dev);
506 struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
507 int ret;
508
509 enable_irq(rmi_spi->irq);
510
511 ret = rmi_driver_resume(rmi_spi->xport.rmi_dev);
512 if (ret)
513 dev_warn(dev, "Failed to resume device: %d\n", ret);
514
515 return 0;
516}
517#endif
518
519static const struct dev_pm_ops rmi_spi_pm = {
520 SET_SYSTEM_SLEEP_PM_OPS(rmi_spi_suspend, rmi_spi_resume)
521 SET_RUNTIME_PM_OPS(rmi_spi_runtime_suspend, rmi_spi_runtime_resume,
522 NULL)
523};
524
525static const struct spi_device_id rmi_id[] = {
526 { "rmi4_spi", 0 },
527 { }
528};
529MODULE_DEVICE_TABLE(spi, rmi_id);
530
531static struct spi_driver rmi_spi_driver = {
532 .driver = {
533 .name = "rmi4_spi",
534 .pm = &rmi_spi_pm,
535 },
536 .id_table = rmi_id,
537 .probe = rmi_spi_probe,
538 .remove = rmi_spi_remove,
539};
540
541module_spi_driver(rmi_spi_driver);
542
543MODULE_AUTHOR("Christopher Heiny <cheiny@synaptics.com>");
544MODULE_AUTHOR("Andrew Duggan <aduggan@synaptics.com>");
545MODULE_DESCRIPTION("RMI SPI driver");
546MODULE_LICENSE("GPL");
547MODULE_VERSION(RMI_DRIVER_VERSION);