]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/w1/masters/omap_hdq.c
Merge tag 'for-linus-20170825' of git://git.infradead.org/linux-mtd
[mirror_ubuntu-artful-kernel.git] / drivers / w1 / masters / omap_hdq.c
CommitLineData
9f2bc79f
MC
1/*
2 * drivers/w1/masters/omap_hdq.c
3 *
c354a864 4 * Copyright (C) 2007,2012 Texas Instruments, Inc.
9f2bc79f
MC
5 *
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
9 *
10 */
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/platform_device.h>
14#include <linux/interrupt.h>
5a0e3ad6 15#include <linux/slab.h>
9f2bc79f 16#include <linux/err.h>
9f2bc79f 17#include <linux/io.h>
81fa08f2 18#include <linux/sched.h>
c354a864 19#include <linux/pm_runtime.h>
e93762bb 20#include <linux/of.h>
9f2bc79f 21
de0d6dbd 22#include <linux/w1.h>
9f2bc79f
MC
23
24#define MOD_NAME "OMAP_HDQ:"
25
26#define OMAP_HDQ_REVISION 0x00
27#define OMAP_HDQ_TX_DATA 0x04
28#define OMAP_HDQ_RX_DATA 0x08
29#define OMAP_HDQ_CTRL_STATUS 0x0c
e93762bb
V
30#define OMAP_HDQ_CTRL_STATUS_SINGLE BIT(7)
31#define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK BIT(6)
32#define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE BIT(5)
33#define OMAP_HDQ_CTRL_STATUS_GO BIT(4)
34#define OMAP_HDQ_CTRL_STATUS_PRESENCE BIT(3)
35#define OMAP_HDQ_CTRL_STATUS_INITIALIZATION BIT(2)
36#define OMAP_HDQ_CTRL_STATUS_DIR BIT(1)
9f2bc79f 37#define OMAP_HDQ_INT_STATUS 0x10
e93762bb
V
38#define OMAP_HDQ_INT_STATUS_TXCOMPLETE BIT(2)
39#define OMAP_HDQ_INT_STATUS_RXCOMPLETE BIT(1)
40#define OMAP_HDQ_INT_STATUS_TIMEOUT BIT(0)
9f2bc79f 41#define OMAP_HDQ_SYSCONFIG 0x14
e93762bb
V
42#define OMAP_HDQ_SYSCONFIG_SOFTRESET BIT(1)
43#define OMAP_HDQ_SYSCONFIG_AUTOIDLE BIT(0)
44#define OMAP_HDQ_SYSCONFIG_NOIDLE 0x0
9f2bc79f 45#define OMAP_HDQ_SYSSTATUS 0x18
e93762bb 46#define OMAP_HDQ_SYSSTATUS_RESETDONE BIT(0)
9f2bc79f
MC
47
48#define OMAP_HDQ_FLAG_CLEAR 0
49#define OMAP_HDQ_FLAG_SET 1
50#define OMAP_HDQ_TIMEOUT (HZ/5)
51
52#define OMAP_HDQ_MAX_USER 4
53
54static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
50fa2951 55
9f2bc79f 56static int w1_id;
50fa2951
AD
57module_param(w1_id, int, S_IRUSR);
58MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection in HDQ mode");
9f2bc79f
MC
59
60struct hdq_data {
61 struct device *dev;
62 void __iomem *hdq_base;
63 /* lock status update */
64 struct mutex hdq_mutex;
65 int hdq_usecount;
9f2bc79f
MC
66 u8 hdq_irqstatus;
67 /* device lock */
68 spinlock_t hdq_spinlock;
69 /*
70 * Used to control the call to omap_hdq_get and omap_hdq_put.
71 * HDQ Protocol: Write the CMD|REG_address first, followed by
72 * the data wrire or read.
73 */
74 int init_trans;
e93762bb
V
75 int rrw;
76 /* mode: 0-HDQ 1-W1 */
77 int mode;
78
9f2bc79f
MC
79};
80
9f2bc79f
MC
81/* HDQ register I/O routines */
82static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
83{
2acd0894 84 return __raw_readl(hdq_data->hdq_base + offset);
9f2bc79f
MC
85}
86
87static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
88{
2acd0894 89 __raw_writel(val, hdq_data->hdq_base + offset);
9f2bc79f
MC
90}
91
92static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
93 u8 val, u8 mask)
94{
2acd0894 95 u8 new_val = (__raw_readl(hdq_data->hdq_base + offset) & ~mask)
9f2bc79f 96 | (val & mask);
2acd0894 97 __raw_writel(new_val, hdq_data->hdq_base + offset);
9f2bc79f
MC
98
99 return new_val;
100}
101
e93762bb
V
102static void hdq_disable_interrupt(struct hdq_data *hdq_data, u32 offset,
103 u32 mask)
104{
105 u32 ie;
106
107 ie = readl(hdq_data->hdq_base + offset);
108 writel(ie & mask, hdq_data->hdq_base + offset);
109}
110
9f2bc79f
MC
111/*
112 * Wait for one or more bits in flag change.
113 * HDQ_FLAG_SET: wait until any bit in the flag is set.
114 * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
115 * return 0 on success and -ETIMEDOUT in the case of timeout.
116 */
117static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
118 u8 flag, u8 flag_set, u8 *status)
119{
120 int ret = 0;
121 unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
122
123 if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
124 /* wait for the flag clear */
125 while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
126 && time_before(jiffies, timeout)) {
127 schedule_timeout_uninterruptible(1);
128 }
129 if (*status & flag)
130 ret = -ETIMEDOUT;
131 } else if (flag_set == OMAP_HDQ_FLAG_SET) {
132 /* wait for the flag set */
133 while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
134 && time_before(jiffies, timeout)) {
135 schedule_timeout_uninterruptible(1);
136 }
137 if (!(*status & flag))
138 ret = -ETIMEDOUT;
139 } else
140 return -EINVAL;
141
142 return ret;
143}
144
145/* write out a byte and fill *status with HDQ_INT_STATUS */
146static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
147{
148 int ret;
149 u8 tmp_status;
150 unsigned long irqflags;
151
152 *status = 0;
153
154 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
155 /* clear interrupt flags via a dummy read */
156 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
157 /* ISR loads it with new INT_STATUS */
158 hdq_data->hdq_irqstatus = 0;
159 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
160
161 hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
162
163 /* set the GO bit */
164 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
165 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
166 /* wait for the TXCOMPLETE bit */
167 ret = wait_event_timeout(hdq_wait_queue,
168 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
169 if (ret == 0) {
170 dev_dbg(hdq_data->dev, "TX wait elapsed\n");
7b5362a6 171 ret = -ETIMEDOUT;
9f2bc79f
MC
172 goto out;
173 }
174
175 *status = hdq_data->hdq_irqstatus;
176 /* check irqstatus */
177 if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
178 dev_dbg(hdq_data->dev, "timeout waiting for"
7b5362a6 179 " TXCOMPLETE/RXCOMPLETE, %x", *status);
9f2bc79f
MC
180 ret = -ETIMEDOUT;
181 goto out;
182 }
183
184 /* wait for the GO bit return to zero */
185 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
186 OMAP_HDQ_CTRL_STATUS_GO,
187 OMAP_HDQ_FLAG_CLEAR, &tmp_status);
188 if (ret) {
189 dev_dbg(hdq_data->dev, "timeout waiting GO bit"
7b5362a6 190 " return to zero, %x", tmp_status);
9f2bc79f
MC
191 }
192
193out:
194 return ret;
195}
196
197/* HDQ Interrupt service routine */
198static irqreturn_t hdq_isr(int irq, void *_hdq)
199{
200 struct hdq_data *hdq_data = _hdq;
201 unsigned long irqflags;
202
203 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
204 hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
205 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
206 dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
207
208 if (hdq_data->hdq_irqstatus &
209 (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
210 | OMAP_HDQ_INT_STATUS_TIMEOUT)) {
211 /* wake up sleeping process */
212 wake_up(&hdq_wait_queue);
213 }
214
215 return IRQ_HANDLED;
216}
217
e93762bb 218/* W1 search callback function in HDQ mode */
06b0d4dc
SM
219static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
220 u8 search_type, w1_slave_found_callback slave_found)
9f2bc79f
MC
221{
222 u64 module_id, rn_le, cs, id;
223
224 if (w1_id)
225 module_id = w1_id;
226 else
227 module_id = 0x1;
228
229 rn_le = cpu_to_le64(module_id);
230 /*
231 * HDQ might not obey truly the 1-wire spec.
232 * So calculate CRC based on module parameter.
233 */
234 cs = w1_calc_crc8((u8 *)&rn_le, 7);
235 id = (cs << 56) | module_id;
236
06b0d4dc 237 slave_found(master_dev, id);
9f2bc79f
MC
238}
239
240static int _omap_hdq_reset(struct hdq_data *hdq_data)
241{
242 int ret;
243 u8 tmp_status;
244
e93762bb
V
245 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
246 OMAP_HDQ_SYSCONFIG_SOFTRESET);
9f2bc79f 247 /*
e93762bb 248 * Select HDQ/1W mode & enable clocks.
9f2bc79f
MC
249 * It is observed that INT flags can't be cleared via a read and GO/INIT
250 * won't return to zero if interrupt is disabled. So we always enable
251 * interrupt.
252 */
253 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
254 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
255 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
256
257 /* wait for reset to complete */
258 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS,
259 OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status);
260 if (ret)
261 dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x",
262 tmp_status);
263 else {
264 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
265 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
e93762bb
V
266 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK |
267 hdq_data->mode);
9f2bc79f
MC
268 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
269 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
270 }
271
272 return ret;
273}
274
275/* Issue break pulse to the device */
276static int omap_hdq_break(struct hdq_data *hdq_data)
277{
278 int ret = 0;
279 u8 tmp_status;
280 unsigned long irqflags;
281
282 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
283 if (ret < 0) {
284 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
285 ret = -EINTR;
286 goto rtn;
287 }
288
289 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
290 /* clear interrupt flags via a dummy read */
291 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
292 /* ISR loads it with new INT_STATUS */
293 hdq_data->hdq_irqstatus = 0;
294 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
295
296 /* set the INIT and GO bit */
297 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
298 OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
299 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
300 OMAP_HDQ_CTRL_STATUS_GO);
301
302 /* wait for the TIMEOUT bit */
303 ret = wait_event_timeout(hdq_wait_queue,
304 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
305 if (ret == 0) {
306 dev_dbg(hdq_data->dev, "break wait elapsed\n");
307 ret = -EINTR;
308 goto out;
309 }
310
311 tmp_status = hdq_data->hdq_irqstatus;
312 /* check irqstatus */
313 if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
314 dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
315 tmp_status);
316 ret = -ETIMEDOUT;
317 goto out;
318 }
e93762bb
V
319
320 /*
321 * check for the presence detect bit to get
322 * set to show that the slave is responding
323 */
324 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_CTRL_STATUS) &
325 OMAP_HDQ_CTRL_STATUS_PRESENCE)) {
326 dev_dbg(hdq_data->dev, "Presence bit not set\n");
327 ret = -ETIMEDOUT;
328 goto out;
329 }
330
9f2bc79f
MC
331 /*
332 * wait for both INIT and GO bits rerurn to zero.
333 * zero wait time expected for interrupt mode.
334 */
335 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
336 OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
337 OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
338 &tmp_status);
339 if (ret)
340 dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
7b5362a6 341 " return to zero, %x", tmp_status);
9f2bc79f
MC
342
343out:
344 mutex_unlock(&hdq_data->hdq_mutex);
345rtn:
346 return ret;
347}
348
349static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
350{
351 int ret = 0;
352 u8 status;
9f2bc79f
MC
353
354 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
355 if (ret < 0) {
356 ret = -EINTR;
357 goto rtn;
358 }
359
360 if (!hdq_data->hdq_usecount) {
361 ret = -EINVAL;
362 goto out;
363 }
364
365 if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
366 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
367 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
368 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
369 /*
b7e938d0 370 * The RX comes immediately after TX.
9f2bc79f 371 */
b7e938d0
N
372 wait_event_timeout(hdq_wait_queue,
373 (hdq_data->hdq_irqstatus
374 & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
375 OMAP_HDQ_TIMEOUT);
376
9f2bc79f
MC
377 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
378 OMAP_HDQ_CTRL_STATUS_DIR);
379 status = hdq_data->hdq_irqstatus;
380 /* check irqstatus */
381 if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
382 dev_dbg(hdq_data->dev, "timeout waiting for"
7b5362a6 383 " RXCOMPLETE, %x", status);
9f2bc79f
MC
384 ret = -ETIMEDOUT;
385 goto out;
386 }
387 }
388 /* the data is ready. Read it in! */
389 *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
390out:
391 mutex_unlock(&hdq_data->hdq_mutex);
392rtn:
7b5362a6 393 return ret;
9f2bc79f
MC
394
395}
396
e93762bb 397/* Enable clocks and set the controller to HDQ/1W mode */
9f2bc79f
MC
398static int omap_hdq_get(struct hdq_data *hdq_data)
399{
400 int ret = 0;
401
402 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
403 if (ret < 0) {
404 ret = -EINTR;
405 goto rtn;
406 }
407
408 if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) {
409 dev_dbg(hdq_data->dev, "attempt to exceed the max use count");
410 ret = -EINVAL;
411 goto out;
412 } else {
413 hdq_data->hdq_usecount++;
414 try_module_get(THIS_MODULE);
415 if (1 == hdq_data->hdq_usecount) {
c354a864
PW
416
417 pm_runtime_get_sync(hdq_data->dev);
9f2bc79f 418
e93762bb 419 /* make sure HDQ/1W is out of reset */
9f2bc79f
MC
420 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
421 OMAP_HDQ_SYSSTATUS_RESETDONE)) {
422 ret = _omap_hdq_reset(hdq_data);
423 if (ret)
424 /* back up the count */
425 hdq_data->hdq_usecount--;
426 } else {
e93762bb 427 /* select HDQ/1W mode & enable clocks */
9f2bc79f
MC
428 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
429 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
e93762bb
V
430 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK |
431 hdq_data->mode);
9f2bc79f 432 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
e93762bb 433 OMAP_HDQ_SYSCONFIG_NOIDLE);
9f2bc79f
MC
434 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
435 }
436 }
437 }
438
9f2bc79f
MC
439out:
440 mutex_unlock(&hdq_data->hdq_mutex);
441rtn:
442 return ret;
443}
444
445/* Disable clocks to the module */
446static int omap_hdq_put(struct hdq_data *hdq_data)
447{
448 int ret = 0;
449
450 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
451 if (ret < 0)
452 return -EINTR;
453
e93762bb
V
454 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
455 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
9f2bc79f
MC
456 if (0 == hdq_data->hdq_usecount) {
457 dev_dbg(hdq_data->dev, "attempt to decrement use count"
7b5362a6 458 " when it is zero");
9f2bc79f
MC
459 ret = -EINVAL;
460 } else {
461 hdq_data->hdq_usecount--;
462 module_put(THIS_MODULE);
c354a864
PW
463 if (0 == hdq_data->hdq_usecount)
464 pm_runtime_put_sync(hdq_data->dev);
9f2bc79f
MC
465 }
466 mutex_unlock(&hdq_data->hdq_mutex);
467
468 return ret;
469}
470
e93762bb
V
471/*
472 * W1 triplet callback function - used for searching ROM addresses.
473 * Registered only when controller is in 1-wire mode.
474 */
475static u8 omap_w1_triplet(void *_hdq, u8 bdir)
476{
477 u8 id_bit, comp_bit;
478 int err;
479 u8 ret = 0x3; /* no slaves responded */
480 struct hdq_data *hdq_data = _hdq;
481 u8 ctrl = OMAP_HDQ_CTRL_STATUS_SINGLE | OMAP_HDQ_CTRL_STATUS_GO |
482 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK;
483 u8 mask = ctrl | OMAP_HDQ_CTRL_STATUS_DIR;
484
485 omap_hdq_get(_hdq);
486
487 err = mutex_lock_interruptible(&hdq_data->hdq_mutex);
488 if (err < 0) {
489 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
490 goto rtn;
491 }
492
493 hdq_data->hdq_irqstatus = 0;
494 /* read id_bit */
495 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
496 ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
497 err = wait_event_timeout(hdq_wait_queue,
498 (hdq_data->hdq_irqstatus
499 & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
500 OMAP_HDQ_TIMEOUT);
501 if (err == 0) {
502 dev_dbg(hdq_data->dev, "RX wait elapsed\n");
503 goto out;
504 }
505 id_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
506
507 hdq_data->hdq_irqstatus = 0;
508 /* read comp_bit */
509 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
510 ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
511 err = wait_event_timeout(hdq_wait_queue,
512 (hdq_data->hdq_irqstatus
513 & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
514 OMAP_HDQ_TIMEOUT);
515 if (err == 0) {
516 dev_dbg(hdq_data->dev, "RX wait elapsed\n");
517 goto out;
518 }
519 comp_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
520
521 if (id_bit && comp_bit) {
522 ret = 0x03; /* no slaves responded */
523 goto out;
524 }
525 if (!id_bit && !comp_bit) {
526 /* Both bits are valid, take the direction given */
527 ret = bdir ? 0x04 : 0;
528 } else {
529 /* Only one bit is valid, take that direction */
530 bdir = id_bit;
531 ret = id_bit ? 0x05 : 0x02;
532 }
533
534 /* write bdir bit */
535 hdq_reg_out(_hdq, OMAP_HDQ_TX_DATA, bdir);
536 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, ctrl, mask);
537 err = wait_event_timeout(hdq_wait_queue,
538 (hdq_data->hdq_irqstatus
539 & OMAP_HDQ_INT_STATUS_TXCOMPLETE),
540 OMAP_HDQ_TIMEOUT);
541 if (err == 0) {
542 dev_dbg(hdq_data->dev, "TX wait elapsed\n");
543 goto out;
544 }
545
546 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, 0,
547 OMAP_HDQ_CTRL_STATUS_SINGLE);
548
549out:
550 mutex_unlock(&hdq_data->hdq_mutex);
551rtn:
552 omap_hdq_put(_hdq);
553 return ret;
554}
555
556/* reset callback */
557static u8 omap_w1_reset_bus(void *_hdq)
558{
559 omap_hdq_get(_hdq);
560 omap_hdq_break(_hdq);
561 omap_hdq_put(_hdq);
562 return 0;
563}
564
9f2bc79f
MC
565/* Read a byte of data from the device */
566static u8 omap_w1_read_byte(void *_hdq)
567{
568 struct hdq_data *hdq_data = _hdq;
569 u8 val = 0;
570 int ret;
571
e93762bb
V
572 /* First write to initialize the transfer */
573 if (hdq_data->init_trans == 0)
574 omap_hdq_get(hdq_data);
575
9f2bc79f
MC
576 ret = hdq_read_byte(hdq_data, &val);
577 if (ret) {
578 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
579 if (ret < 0) {
580 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
581 return -EINTR;
582 }
583 hdq_data->init_trans = 0;
584 mutex_unlock(&hdq_data->hdq_mutex);
585 omap_hdq_put(hdq_data);
586 return -1;
587 }
588
e93762bb
V
589 hdq_disable_interrupt(hdq_data, OMAP_HDQ_CTRL_STATUS,
590 ~OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
e93762bb 591
9f2bc79f
MC
592 /* Write followed by a read, release the module */
593 if (hdq_data->init_trans) {
594 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
595 if (ret < 0) {
596 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
597 return -EINTR;
598 }
599 hdq_data->init_trans = 0;
600 mutex_unlock(&hdq_data->hdq_mutex);
601 omap_hdq_put(hdq_data);
602 }
603
604 return val;
605}
606
607/* Write a byte of data to the device */
608static void omap_w1_write_byte(void *_hdq, u8 byte)
609{
610 struct hdq_data *hdq_data = _hdq;
611 int ret;
612 u8 status;
613
614 /* First write to initialize the transfer */
615 if (hdq_data->init_trans == 0)
616 omap_hdq_get(hdq_data);
617
e93762bb
V
618 /*
619 * We need to reset the slave before
620 * issuing the SKIP ROM command, else
621 * the slave will not work.
622 */
623 if (byte == W1_SKIP_ROM)
624 omap_hdq_break(hdq_data);
625
9f2bc79f
MC
626 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
627 if (ret < 0) {
628 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
629 return;
630 }
631 hdq_data->init_trans++;
632 mutex_unlock(&hdq_data->hdq_mutex);
633
634 ret = hdq_write_byte(hdq_data, byte, &status);
7b5362a6 635 if (ret < 0) {
9f2bc79f
MC
636 dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
637 return;
638 }
639
25985edc 640 /* Second write, data transferred. Release the module */
9f2bc79f
MC
641 if (hdq_data->init_trans > 1) {
642 omap_hdq_put(hdq_data);
643 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
644 if (ret < 0) {
645 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
646 return;
647 }
648 hdq_data->init_trans = 0;
649 mutex_unlock(&hdq_data->hdq_mutex);
650 }
9f2bc79f
MC
651}
652
50fa2951
AD
653static struct w1_bus_master omap_w1_master = {
654 .read_byte = omap_w1_read_byte,
655 .write_byte = omap_w1_write_byte,
656 .reset_bus = omap_w1_reset_bus,
657};
658
479e2bce 659static int omap_hdq_probe(struct platform_device *pdev)
9f2bc79f 660{
19afea50 661 struct device *dev = &pdev->dev;
9f2bc79f
MC
662 struct hdq_data *hdq_data;
663 struct resource *res;
664 int ret, irq;
665 u8 rev;
e93762bb 666 const char *mode;
9f2bc79f 667
19afea50 668 hdq_data = devm_kzalloc(dev, sizeof(*hdq_data), GFP_KERNEL);
9f2bc79f
MC
669 if (!hdq_data) {
670 dev_dbg(&pdev->dev, "unable to allocate memory\n");
19afea50 671 return -ENOMEM;
9f2bc79f
MC
672 }
673
19afea50 674 hdq_data->dev = dev;
9f2bc79f
MC
675 platform_set_drvdata(pdev, hdq_data);
676
677 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4d6dc3a7
TR
678 hdq_data->hdq_base = devm_ioremap_resource(dev, res);
679 if (IS_ERR(hdq_data->hdq_base))
680 return PTR_ERR(hdq_data->hdq_base);
9f2bc79f 681
9f2bc79f 682 hdq_data->hdq_usecount = 0;
e93762bb 683 hdq_data->rrw = 0;
9f2bc79f
MC
684 mutex_init(&hdq_data->hdq_mutex);
685
c354a864 686 pm_runtime_enable(&pdev->dev);
e93762bb
V
687 ret = pm_runtime_get_sync(&pdev->dev);
688 if (ret < 0) {
689 dev_dbg(&pdev->dev, "pm_runtime_get_sync failed\n");
690 goto err_w1;
691 }
692
693 ret = _omap_hdq_reset(hdq_data);
694 if (ret) {
695 dev_dbg(&pdev->dev, "reset failed\n");
8333eb15 696 goto err_irq;
e93762bb 697 }
9f2bc79f
MC
698
699 rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
700 dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
701 (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
702
703 spin_lock_init(&hdq_data->hdq_spinlock);
704
705 irq = platform_get_irq(pdev, 0);
706 if (irq < 0) {
c89876dd
GS
707 dev_dbg(&pdev->dev, "Failed to get IRQ: %d\n", irq);
708 ret = irq;
9f2bc79f
MC
709 goto err_irq;
710 }
711
fe576a58 712 ret = devm_request_irq(dev, irq, hdq_isr, 0, "omap_hdq", hdq_data);
9f2bc79f
MC
713 if (ret < 0) {
714 dev_dbg(&pdev->dev, "could not request irq\n");
715 goto err_irq;
716 }
717
718 omap_hdq_break(hdq_data);
719
c354a864 720 pm_runtime_put_sync(&pdev->dev);
9f2bc79f 721
e93762bb
V
722 ret = of_property_read_string(pdev->dev.of_node, "ti,mode", &mode);
723 if (ret < 0 || !strcmp(mode, "hdq")) {
724 hdq_data->mode = 0;
725 omap_w1_master.search = omap_w1_search_bus;
726 } else {
727 hdq_data->mode = 1;
728 omap_w1_master.triplet = omap_w1_triplet;
729 }
730
9f2bc79f
MC
731 omap_w1_master.data = hdq_data;
732
733 ret = w1_add_master_device(&omap_w1_master);
734 if (ret) {
735 dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
736 goto err_w1;
737 }
738
739 return 0;
740
9f2bc79f 741err_irq:
c354a864
PW
742 pm_runtime_put_sync(&pdev->dev);
743err_w1:
744 pm_runtime_disable(&pdev->dev);
80d02d27 745
9f2bc79f 746 return ret;
9f2bc79f
MC
747}
748
82849a93 749static int omap_hdq_remove(struct platform_device *pdev)
9f2bc79f
MC
750{
751 struct hdq_data *hdq_data = platform_get_drvdata(pdev);
752
753 mutex_lock(&hdq_data->hdq_mutex);
754
755 if (hdq_data->hdq_usecount) {
756 dev_dbg(&pdev->dev, "removed when use count is not zero\n");
2020002a 757 mutex_unlock(&hdq_data->hdq_mutex);
9f2bc79f
MC
758 return -EBUSY;
759 }
760
761 mutex_unlock(&hdq_data->hdq_mutex);
762
763 /* remove module dependency */
c354a864 764 pm_runtime_disable(&pdev->dev);
9f2bc79f
MC
765
766 return 0;
767}
768
50fa2951
AD
769static const struct of_device_id omap_hdq_dt_ids[] = {
770 { .compatible = "ti,omap3-1w" },
771 { .compatible = "ti,am4372-hdq" },
772 {}
773};
774MODULE_DEVICE_TABLE(of, omap_hdq_dt_ids);
9f2bc79f 775
50fa2951
AD
776static struct platform_driver omap_hdq_driver = {
777 .probe = omap_hdq_probe,
778 .remove = omap_hdq_remove,
779 .driver = {
780 .name = "omap_hdq",
781 .of_match_table = omap_hdq_dt_ids,
782 },
783};
784module_platform_driver(omap_hdq_driver);
9f2bc79f
MC
785
786MODULE_AUTHOR("Texas Instruments");
e93762bb 787MODULE_DESCRIPTION("HDQ-1W driver Library");
9f2bc79f 788MODULE_LICENSE("GPL");