]>
Commit | Line | Data |
---|---|---|
8d99758d AD |
1 | /* |
2 | * Copyright (c) 2011-2016 Synaptics Incorporated | |
3 | * Copyright (c) 2011 Unixphere | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License version 2 as published by | |
7 | * the Free Software Foundation. | |
8 | */ | |
9 | ||
10 | #include <linux/kernel.h> | |
11 | #include <linux/module.h> | |
12 | #include <linux/rmi.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/spi/spi.h> | |
15 | #include <linux/irq.h> | |
48147b97 | 16 | #include <linux/of.h> |
8d99758d AD |
17 | #include "rmi_driver.h" |
18 | ||
19 | #define RMI_SPI_DEFAULT_XFER_BUF_SIZE 64 | |
20 | ||
21 | #define RMI_PAGE_SELECT_REGISTER 0x00FF | |
22 | #define RMI_SPI_PAGE(addr) (((addr) >> 8) & 0x80) | |
23 | #define RMI_SPI_XFER_SIZE_LIMIT 255 | |
24 | ||
25 | #define BUFFER_SIZE_INCREMENT 32 | |
26 | ||
27 | enum rmi_spi_op { | |
28 | RMI_SPI_WRITE = 0, | |
29 | RMI_SPI_READ, | |
30 | RMI_SPI_V2_READ_UNIFIED, | |
31 | RMI_SPI_V2_READ_SPLIT, | |
32 | RMI_SPI_V2_WRITE, | |
33 | }; | |
34 | ||
35 | struct rmi_spi_cmd { | |
36 | enum rmi_spi_op op; | |
37 | u16 addr; | |
38 | }; | |
39 | ||
40 | struct rmi_spi_xport { | |
41 | struct rmi_transport_dev xport; | |
42 | struct spi_device *spi; | |
43 | ||
44 | struct mutex page_mutex; | |
45 | int page; | |
46 | ||
47 | int irq; | |
48 | ||
49 | u8 *rx_buf; | |
50 | u8 *tx_buf; | |
51 | int xfer_buf_size; | |
52 | ||
53 | struct spi_transfer *rx_xfers; | |
54 | struct spi_transfer *tx_xfers; | |
55 | int rx_xfer_count; | |
56 | int tx_xfer_count; | |
57 | }; | |
58 | ||
59 | static int rmi_spi_manage_pools(struct rmi_spi_xport *rmi_spi, int len) | |
60 | { | |
61 | struct spi_device *spi = rmi_spi->spi; | |
62 | int buf_size = rmi_spi->xfer_buf_size | |
63 | ? rmi_spi->xfer_buf_size : RMI_SPI_DEFAULT_XFER_BUF_SIZE; | |
64 | struct spi_transfer *xfer_buf; | |
65 | void *buf; | |
66 | void *tmp; | |
67 | ||
68 | while (buf_size < len) | |
69 | buf_size *= 2; | |
70 | ||
71 | if (buf_size > RMI_SPI_XFER_SIZE_LIMIT) | |
72 | buf_size = RMI_SPI_XFER_SIZE_LIMIT; | |
73 | ||
74 | tmp = rmi_spi->rx_buf; | |
75 | buf = devm_kzalloc(&spi->dev, buf_size * 2, | |
76 | GFP_KERNEL | GFP_DMA); | |
77 | if (!buf) | |
78 | return -ENOMEM; | |
79 | ||
80 | rmi_spi->rx_buf = buf; | |
81 | rmi_spi->tx_buf = &rmi_spi->rx_buf[buf_size]; | |
82 | rmi_spi->xfer_buf_size = buf_size; | |
83 | ||
84 | if (tmp) | |
85 | devm_kfree(&spi->dev, tmp); | |
86 | ||
87 | if (rmi_spi->xport.pdata.spi_data.read_delay_us) | |
88 | rmi_spi->rx_xfer_count = buf_size; | |
89 | else | |
90 | rmi_spi->rx_xfer_count = 1; | |
91 | ||
92 | if (rmi_spi->xport.pdata.spi_data.write_delay_us) | |
93 | rmi_spi->tx_xfer_count = buf_size; | |
94 | else | |
95 | rmi_spi->tx_xfer_count = 1; | |
96 | ||
97 | /* | |
98 | * Allocate a pool of spi_transfer buffers for devices which need | |
99 | * per byte delays. | |
100 | */ | |
101 | tmp = rmi_spi->rx_xfers; | |
102 | xfer_buf = devm_kzalloc(&spi->dev, | |
103 | (rmi_spi->rx_xfer_count + rmi_spi->tx_xfer_count) | |
104 | * sizeof(struct spi_transfer), GFP_KERNEL); | |
105 | if (!xfer_buf) | |
106 | return -ENOMEM; | |
107 | ||
108 | rmi_spi->rx_xfers = xfer_buf; | |
109 | rmi_spi->tx_xfers = &xfer_buf[rmi_spi->rx_xfer_count]; | |
110 | ||
111 | if (tmp) | |
112 | devm_kfree(&spi->dev, tmp); | |
113 | ||
114 | return 0; | |
115 | } | |
116 | ||
117 | static int rmi_spi_xfer(struct rmi_spi_xport *rmi_spi, | |
118 | const struct rmi_spi_cmd *cmd, const u8 *tx_buf, | |
119 | int tx_len, u8 *rx_buf, int rx_len) | |
120 | { | |
121 | struct spi_device *spi = rmi_spi->spi; | |
122 | struct rmi_device_platform_data_spi *spi_data = | |
123 | &rmi_spi->xport.pdata.spi_data; | |
124 | struct spi_message msg; | |
125 | struct spi_transfer *xfer; | |
126 | int ret = 0; | |
127 | int len; | |
128 | int cmd_len = 0; | |
129 | int total_tx_len; | |
130 | int i; | |
131 | u16 addr = cmd->addr; | |
132 | ||
133 | spi_message_init(&msg); | |
134 | ||
135 | switch (cmd->op) { | |
136 | case RMI_SPI_WRITE: | |
137 | case RMI_SPI_READ: | |
138 | cmd_len += 2; | |
139 | break; | |
140 | case RMI_SPI_V2_READ_UNIFIED: | |
141 | case RMI_SPI_V2_READ_SPLIT: | |
142 | case RMI_SPI_V2_WRITE: | |
143 | cmd_len += 4; | |
144 | break; | |
145 | } | |
146 | ||
147 | total_tx_len = cmd_len + tx_len; | |
148 | len = max(total_tx_len, rx_len); | |
149 | ||
150 | if (len > RMI_SPI_XFER_SIZE_LIMIT) | |
151 | return -EINVAL; | |
152 | ||
153 | if (rmi_spi->xfer_buf_size < len) | |
154 | rmi_spi_manage_pools(rmi_spi, len); | |
155 | ||
156 | if (addr == 0) | |
157 | /* | |
158 | * SPI needs an address. Use 0x7FF if we want to keep | |
159 | * reading from the last position of the register pointer. | |
160 | */ | |
161 | addr = 0x7FF; | |
162 | ||
163 | switch (cmd->op) { | |
164 | case RMI_SPI_WRITE: | |
165 | rmi_spi->tx_buf[0] = (addr >> 8); | |
166 | rmi_spi->tx_buf[1] = addr & 0xFF; | |
167 | break; | |
168 | case RMI_SPI_READ: | |
169 | rmi_spi->tx_buf[0] = (addr >> 8) | 0x80; | |
170 | rmi_spi->tx_buf[1] = addr & 0xFF; | |
171 | break; | |
172 | case RMI_SPI_V2_READ_UNIFIED: | |
173 | break; | |
174 | case RMI_SPI_V2_READ_SPLIT: | |
175 | break; | |
176 | case RMI_SPI_V2_WRITE: | |
177 | rmi_spi->tx_buf[0] = 0x40; | |
178 | rmi_spi->tx_buf[1] = (addr >> 8) & 0xFF; | |
179 | rmi_spi->tx_buf[2] = addr & 0xFF; | |
180 | rmi_spi->tx_buf[3] = tx_len; | |
181 | break; | |
182 | } | |
183 | ||
184 | if (tx_buf) | |
185 | memcpy(&rmi_spi->tx_buf[cmd_len], tx_buf, tx_len); | |
186 | ||
187 | if (rmi_spi->tx_xfer_count > 1) { | |
188 | for (i = 0; i < total_tx_len; i++) { | |
189 | xfer = &rmi_spi->tx_xfers[i]; | |
190 | memset(xfer, 0, sizeof(struct spi_transfer)); | |
191 | xfer->tx_buf = &rmi_spi->tx_buf[i]; | |
192 | xfer->len = 1; | |
193 | xfer->delay_usecs = spi_data->write_delay_us; | |
194 | spi_message_add_tail(xfer, &msg); | |
195 | } | |
196 | } else { | |
197 | xfer = rmi_spi->tx_xfers; | |
198 | memset(xfer, 0, sizeof(struct spi_transfer)); | |
199 | xfer->tx_buf = rmi_spi->tx_buf; | |
200 | xfer->len = total_tx_len; | |
201 | spi_message_add_tail(xfer, &msg); | |
202 | } | |
203 | ||
204 | rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: cmd: %s tx_buf len: %d tx_buf: %*ph\n", | |
205 | __func__, cmd->op == RMI_SPI_WRITE ? "WRITE" : "READ", | |
206 | total_tx_len, total_tx_len, rmi_spi->tx_buf); | |
207 | ||
208 | if (rx_buf) { | |
209 | if (rmi_spi->rx_xfer_count > 1) { | |
210 | for (i = 0; i < rx_len; i++) { | |
211 | xfer = &rmi_spi->rx_xfers[i]; | |
212 | memset(xfer, 0, sizeof(struct spi_transfer)); | |
213 | xfer->rx_buf = &rmi_spi->rx_buf[i]; | |
214 | xfer->len = 1; | |
215 | xfer->delay_usecs = spi_data->read_delay_us; | |
216 | spi_message_add_tail(xfer, &msg); | |
217 | } | |
218 | } else { | |
219 | xfer = rmi_spi->rx_xfers; | |
220 | memset(xfer, 0, sizeof(struct spi_transfer)); | |
221 | xfer->rx_buf = rmi_spi->rx_buf; | |
222 | xfer->len = rx_len; | |
223 | spi_message_add_tail(xfer, &msg); | |
224 | } | |
225 | } | |
226 | ||
227 | ret = spi_sync(spi, &msg); | |
228 | if (ret < 0) { | |
229 | dev_err(&spi->dev, "spi xfer failed: %d\n", ret); | |
230 | return ret; | |
231 | } | |
232 | ||
233 | if (rx_buf) { | |
234 | memcpy(rx_buf, rmi_spi->rx_buf, rx_len); | |
235 | rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: (%d) %*ph\n", | |
236 | __func__, rx_len, rx_len, rx_buf); | |
237 | } | |
238 | ||
239 | return 0; | |
240 | } | |
241 | ||
242 | /* | |
243 | * rmi_set_page - Set RMI page | |
244 | * @xport: The pointer to the rmi_transport_dev struct | |
245 | * @page: The new page address. | |
246 | * | |
247 | * RMI devices have 16-bit addressing, but some of the transport | |
248 | * implementations (like SMBus) only have 8-bit addressing. So RMI implements | |
249 | * a page address at 0xff of every page so we can reliable page addresses | |
250 | * every 256 registers. | |
251 | * | |
252 | * The page_mutex lock must be held when this function is entered. | |
253 | * | |
254 | * Returns zero on success, non-zero on failure. | |
255 | */ | |
256 | static int rmi_set_page(struct rmi_spi_xport *rmi_spi, u8 page) | |
257 | { | |
258 | struct rmi_spi_cmd cmd; | |
259 | int ret; | |
260 | ||
261 | cmd.op = RMI_SPI_WRITE; | |
262 | cmd.addr = RMI_PAGE_SELECT_REGISTER; | |
263 | ||
264 | ret = rmi_spi_xfer(rmi_spi, &cmd, &page, 1, NULL, 0); | |
265 | ||
266 | if (ret) | |
267 | rmi_spi->page = page; | |
268 | ||
269 | return ret; | |
270 | } | |
271 | ||
272 | static int rmi_spi_write_block(struct rmi_transport_dev *xport, u16 addr, | |
273 | const void *buf, size_t len) | |
274 | { | |
275 | struct rmi_spi_xport *rmi_spi = | |
276 | container_of(xport, struct rmi_spi_xport, xport); | |
277 | struct rmi_spi_cmd cmd; | |
278 | int ret; | |
279 | ||
280 | mutex_lock(&rmi_spi->page_mutex); | |
281 | ||
282 | if (RMI_SPI_PAGE(addr) != rmi_spi->page) { | |
283 | ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr)); | |
284 | if (ret) | |
285 | goto exit; | |
286 | } | |
287 | ||
288 | cmd.op = RMI_SPI_WRITE; | |
289 | cmd.addr = addr; | |
290 | ||
291 | ret = rmi_spi_xfer(rmi_spi, &cmd, buf, len, NULL, 0); | |
292 | ||
293 | exit: | |
294 | mutex_unlock(&rmi_spi->page_mutex); | |
295 | return ret; | |
296 | } | |
297 | ||
298 | static int rmi_spi_read_block(struct rmi_transport_dev *xport, u16 addr, | |
299 | void *buf, size_t len) | |
300 | { | |
301 | struct rmi_spi_xport *rmi_spi = | |
302 | container_of(xport, struct rmi_spi_xport, xport); | |
303 | struct rmi_spi_cmd cmd; | |
304 | int ret; | |
305 | ||
306 | mutex_lock(&rmi_spi->page_mutex); | |
307 | ||
308 | if (RMI_SPI_PAGE(addr) != rmi_spi->page) { | |
309 | ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr)); | |
310 | if (ret) | |
311 | goto exit; | |
312 | } | |
313 | ||
314 | cmd.op = RMI_SPI_READ; | |
315 | cmd.addr = addr; | |
316 | ||
317 | ret = rmi_spi_xfer(rmi_spi, &cmd, NULL, 0, buf, len); | |
318 | ||
319 | exit: | |
320 | mutex_unlock(&rmi_spi->page_mutex); | |
321 | return ret; | |
322 | } | |
323 | ||
324 | static const struct rmi_transport_ops rmi_spi_ops = { | |
325 | .write_block = rmi_spi_write_block, | |
326 | .read_block = rmi_spi_read_block, | |
327 | }; | |
328 | ||
329 | static irqreturn_t rmi_spi_irq(int irq, void *dev_id) | |
330 | { | |
331 | struct rmi_spi_xport *rmi_spi = dev_id; | |
332 | struct rmi_device *rmi_dev = rmi_spi->xport.rmi_dev; | |
333 | int ret; | |
334 | ||
335 | ret = rmi_process_interrupt_requests(rmi_dev); | |
336 | if (ret) | |
337 | rmi_dbg(RMI_DEBUG_XPORT, &rmi_dev->dev, | |
338 | "Failed to process interrupt request: %d\n", ret); | |
339 | ||
340 | return IRQ_HANDLED; | |
341 | } | |
342 | ||
343 | static int rmi_spi_init_irq(struct spi_device *spi) | |
344 | { | |
345 | struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi); | |
346 | int irq_flags = irqd_get_trigger_type(irq_get_irq_data(rmi_spi->irq)); | |
347 | int ret; | |
348 | ||
349 | if (!irq_flags) | |
350 | irq_flags = IRQF_TRIGGER_LOW; | |
351 | ||
352 | ret = devm_request_threaded_irq(&spi->dev, rmi_spi->irq, NULL, | |
353 | rmi_spi_irq, irq_flags | IRQF_ONESHOT, | |
354 | dev_name(&spi->dev), rmi_spi); | |
355 | if (ret < 0) { | |
356 | dev_warn(&spi->dev, "Failed to register interrupt %d\n", | |
357 | rmi_spi->irq); | |
358 | return ret; | |
359 | } | |
360 | ||
361 | return 0; | |
362 | } | |
363 | ||
48147b97 AD |
364 | #ifdef CONFIG_OF |
365 | static int rmi_spi_of_probe(struct spi_device *spi, | |
366 | struct rmi_device_platform_data *pdata) | |
367 | { | |
368 | struct device *dev = &spi->dev; | |
369 | int retval; | |
370 | ||
371 | retval = rmi_of_property_read_u32(dev, | |
372 | &pdata->spi_data.read_delay_us, | |
373 | "spi-rx-delay-us", 1); | |
374 | if (retval) | |
375 | return retval; | |
376 | ||
377 | retval = rmi_of_property_read_u32(dev, | |
378 | &pdata->spi_data.write_delay_us, | |
379 | "spi-tx-delay-us", 1); | |
380 | if (retval) | |
381 | return retval; | |
382 | ||
383 | return 0; | |
384 | } | |
385 | ||
386 | static const struct of_device_id rmi_spi_of_match[] = { | |
387 | { .compatible = "syna,rmi4-spi" }, | |
388 | {}, | |
389 | }; | |
390 | MODULE_DEVICE_TABLE(of, rmi_spi_of_match); | |
391 | #else | |
392 | static inline int rmi_spi_of_probe(struct spi_device *spi, | |
393 | struct rmi_device_platform_data *pdata) | |
394 | { | |
395 | return -ENODEV; | |
396 | } | |
397 | #endif | |
398 | ||
bbc2ceeb GR |
399 | static void rmi_spi_unregister_transport(void *data) |
400 | { | |
401 | struct rmi_spi_xport *rmi_spi = data; | |
402 | ||
403 | rmi_unregister_transport_device(&rmi_spi->xport); | |
404 | } | |
405 | ||
8d99758d AD |
406 | static int rmi_spi_probe(struct spi_device *spi) |
407 | { | |
408 | struct rmi_spi_xport *rmi_spi; | |
409 | struct rmi_device_platform_data *pdata; | |
410 | struct rmi_device_platform_data *spi_pdata = spi->dev.platform_data; | |
411 | int retval; | |
412 | ||
413 | if (spi->master->flags & SPI_MASTER_HALF_DUPLEX) | |
414 | return -EINVAL; | |
415 | ||
416 | rmi_spi = devm_kzalloc(&spi->dev, sizeof(struct rmi_spi_xport), | |
417 | GFP_KERNEL); | |
418 | if (!rmi_spi) | |
419 | return -ENOMEM; | |
420 | ||
421 | pdata = &rmi_spi->xport.pdata; | |
422 | ||
48147b97 AD |
423 | if (spi->dev.of_node) { |
424 | retval = rmi_spi_of_probe(spi, pdata); | |
425 | if (retval) | |
426 | return retval; | |
427 | } else if (spi_pdata) { | |
8d99758d | 428 | *pdata = *spi_pdata; |
48147b97 | 429 | } |
8d99758d AD |
430 | |
431 | if (pdata->spi_data.bits_per_word) | |
432 | spi->bits_per_word = pdata->spi_data.bits_per_word; | |
433 | ||
434 | if (pdata->spi_data.mode) | |
435 | spi->mode = pdata->spi_data.mode; | |
436 | ||
437 | retval = spi_setup(spi); | |
438 | if (retval < 0) { | |
439 | dev_err(&spi->dev, "spi_setup failed!\n"); | |
440 | return retval; | |
441 | } | |
442 | ||
443 | if (spi->irq > 0) | |
444 | rmi_spi->irq = spi->irq; | |
445 | ||
446 | rmi_spi->spi = spi; | |
447 | mutex_init(&rmi_spi->page_mutex); | |
448 | ||
449 | rmi_spi->xport.dev = &spi->dev; | |
450 | rmi_spi->xport.proto_name = "spi"; | |
451 | rmi_spi->xport.ops = &rmi_spi_ops; | |
452 | ||
453 | spi_set_drvdata(spi, rmi_spi); | |
454 | ||
455 | retval = rmi_spi_manage_pools(rmi_spi, RMI_SPI_DEFAULT_XFER_BUF_SIZE); | |
456 | if (retval) | |
457 | return retval; | |
458 | ||
459 | /* | |
460 | * Setting the page to zero will (a) make sure the PSR is in a | |
461 | * known state, and (b) make sure we can talk to the device. | |
462 | */ | |
463 | retval = rmi_set_page(rmi_spi, 0); | |
464 | if (retval) { | |
465 | dev_err(&spi->dev, "Failed to set page select to 0.\n"); | |
466 | return retval; | |
467 | } | |
468 | ||
469 | retval = rmi_register_transport_device(&rmi_spi->xport); | |
470 | if (retval) { | |
471 | dev_err(&spi->dev, "failed to register transport.\n"); | |
472 | return retval; | |
473 | } | |
bbc2ceeb GR |
474 | retval = devm_add_action_or_reset(&spi->dev, |
475 | rmi_spi_unregister_transport, | |
476 | rmi_spi); | |
477 | if (retval) | |
478 | return retval; | |
8d99758d AD |
479 | |
480 | retval = rmi_spi_init_irq(spi); | |
481 | if (retval < 0) | |
482 | return retval; | |
483 | ||
484 | dev_info(&spi->dev, "registered RMI SPI driver\n"); | |
485 | return 0; | |
486 | } | |
487 | ||
8d99758d AD |
488 | #ifdef CONFIG_PM_SLEEP |
489 | static int rmi_spi_suspend(struct device *dev) | |
490 | { | |
491 | struct spi_device *spi = to_spi_device(dev); | |
492 | struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi); | |
493 | int ret; | |
494 | ||
495 | ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev); | |
496 | if (ret) | |
497 | dev_warn(dev, "Failed to resume device: %d\n", ret); | |
498 | ||
499 | disable_irq(rmi_spi->irq); | |
500 | if (device_may_wakeup(&spi->dev)) { | |
501 | ret = enable_irq_wake(rmi_spi->irq); | |
502 | if (!ret) | |
503 | dev_warn(dev, "Failed to enable irq for wake: %d\n", | |
504 | ret); | |
505 | } | |
506 | return ret; | |
507 | } | |
508 | ||
509 | static int rmi_spi_resume(struct device *dev) | |
510 | { | |
511 | struct spi_device *spi = to_spi_device(dev); | |
512 | struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi); | |
513 | int ret; | |
514 | ||
515 | enable_irq(rmi_spi->irq); | |
516 | if (device_may_wakeup(&spi->dev)) { | |
517 | ret = disable_irq_wake(rmi_spi->irq); | |
518 | if (!ret) | |
519 | dev_warn(dev, "Failed to disable irq for wake: %d\n", | |
520 | ret); | |
521 | } | |
522 | ||
523 | ret = rmi_driver_resume(rmi_spi->xport.rmi_dev); | |
524 | if (ret) | |
525 | dev_warn(dev, "Failed to resume device: %d\n", ret); | |
526 | ||
527 | return ret; | |
528 | } | |
529 | #endif | |
530 | ||
531 | #ifdef CONFIG_PM | |
532 | static int rmi_spi_runtime_suspend(struct device *dev) | |
533 | { | |
534 | struct spi_device *spi = to_spi_device(dev); | |
535 | struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi); | |
536 | int ret; | |
537 | ||
538 | ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev); | |
539 | if (ret) | |
540 | dev_warn(dev, "Failed to resume device: %d\n", ret); | |
541 | ||
542 | disable_irq(rmi_spi->irq); | |
543 | ||
544 | return 0; | |
545 | } | |
546 | ||
547 | static int rmi_spi_runtime_resume(struct device *dev) | |
548 | { | |
549 | struct spi_device *spi = to_spi_device(dev); | |
550 | struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi); | |
551 | int ret; | |
552 | ||
553 | enable_irq(rmi_spi->irq); | |
554 | ||
555 | ret = rmi_driver_resume(rmi_spi->xport.rmi_dev); | |
556 | if (ret) | |
557 | dev_warn(dev, "Failed to resume device: %d\n", ret); | |
558 | ||
559 | return 0; | |
560 | } | |
561 | #endif | |
562 | ||
563 | static const struct dev_pm_ops rmi_spi_pm = { | |
564 | SET_SYSTEM_SLEEP_PM_OPS(rmi_spi_suspend, rmi_spi_resume) | |
565 | SET_RUNTIME_PM_OPS(rmi_spi_runtime_suspend, rmi_spi_runtime_resume, | |
566 | NULL) | |
567 | }; | |
568 | ||
569 | static const struct spi_device_id rmi_id[] = { | |
570 | { "rmi4_spi", 0 }, | |
571 | { } | |
572 | }; | |
573 | MODULE_DEVICE_TABLE(spi, rmi_id); | |
574 | ||
575 | static struct spi_driver rmi_spi_driver = { | |
576 | .driver = { | |
577 | .name = "rmi4_spi", | |
578 | .pm = &rmi_spi_pm, | |
48147b97 | 579 | .of_match_table = of_match_ptr(rmi_spi_of_match), |
8d99758d AD |
580 | }, |
581 | .id_table = rmi_id, | |
582 | .probe = rmi_spi_probe, | |
8d99758d AD |
583 | }; |
584 | ||
585 | module_spi_driver(rmi_spi_driver); | |
586 | ||
587 | MODULE_AUTHOR("Christopher Heiny <cheiny@synaptics.com>"); | |
588 | MODULE_AUTHOR("Andrew Duggan <aduggan@synaptics.com>"); | |
589 | MODULE_DESCRIPTION("RMI SPI driver"); | |
590 | MODULE_LICENSE("GPL"); | |
591 | MODULE_VERSION(RMI_DRIVER_VERSION); |