]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/w1/masters/omap_hdq.c
selftests: timers: freq-step: fix compile error
[mirror_ubuntu-artful-kernel.git] / drivers / w1 / masters / omap_hdq.c
1 /*
2 * drivers/w1/masters/omap_hdq.c
3 *
4 * Copyright (C) 2007,2012 Texas Instruments, Inc.
5 *
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
9 *
10 */
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <linux/interrupt.h>
15 #include <linux/slab.h>
16 #include <linux/err.h>
17 #include <linux/io.h>
18 #include <linux/sched.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/of.h>
21
22 #include <linux/w1.h>
23
24 #define MOD_NAME "OMAP_HDQ:"
25
26 #define OMAP_HDQ_REVISION 0x00
27 #define OMAP_HDQ_TX_DATA 0x04
28 #define OMAP_HDQ_RX_DATA 0x08
29 #define OMAP_HDQ_CTRL_STATUS 0x0c
30 #define OMAP_HDQ_CTRL_STATUS_SINGLE BIT(7)
31 #define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK BIT(6)
32 #define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE BIT(5)
33 #define OMAP_HDQ_CTRL_STATUS_GO BIT(4)
34 #define OMAP_HDQ_CTRL_STATUS_PRESENCE BIT(3)
35 #define OMAP_HDQ_CTRL_STATUS_INITIALIZATION BIT(2)
36 #define OMAP_HDQ_CTRL_STATUS_DIR BIT(1)
37 #define OMAP_HDQ_INT_STATUS 0x10
38 #define OMAP_HDQ_INT_STATUS_TXCOMPLETE BIT(2)
39 #define OMAP_HDQ_INT_STATUS_RXCOMPLETE BIT(1)
40 #define OMAP_HDQ_INT_STATUS_TIMEOUT BIT(0)
41 #define OMAP_HDQ_SYSCONFIG 0x14
42 #define OMAP_HDQ_SYSCONFIG_SOFTRESET BIT(1)
43 #define OMAP_HDQ_SYSCONFIG_AUTOIDLE BIT(0)
44 #define OMAP_HDQ_SYSCONFIG_NOIDLE 0x0
45 #define OMAP_HDQ_SYSSTATUS 0x18
46 #define OMAP_HDQ_SYSSTATUS_RESETDONE BIT(0)
47
48 #define OMAP_HDQ_FLAG_CLEAR 0
49 #define OMAP_HDQ_FLAG_SET 1
50 #define OMAP_HDQ_TIMEOUT (HZ/5)
51
52 #define OMAP_HDQ_MAX_USER 4
53
54 static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
55
56 static int w1_id;
57 module_param(w1_id, int, S_IRUSR);
58 MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection in HDQ mode");
59
60 struct hdq_data {
61 struct device *dev;
62 void __iomem *hdq_base;
63 /* lock status update */
64 struct mutex hdq_mutex;
65 int hdq_usecount;
66 u8 hdq_irqstatus;
67 /* device lock */
68 spinlock_t hdq_spinlock;
69 /*
70 * Used to control the call to omap_hdq_get and omap_hdq_put.
71 * HDQ Protocol: Write the CMD|REG_address first, followed by
72 * the data wrire or read.
73 */
74 int init_trans;
75 int rrw;
76 /* mode: 0-HDQ 1-W1 */
77 int mode;
78
79 };
80
81 /* HDQ register I/O routines */
82 static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
83 {
84 return __raw_readl(hdq_data->hdq_base + offset);
85 }
86
87 static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
88 {
89 __raw_writel(val, hdq_data->hdq_base + offset);
90 }
91
92 static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
93 u8 val, u8 mask)
94 {
95 u8 new_val = (__raw_readl(hdq_data->hdq_base + offset) & ~mask)
96 | (val & mask);
97 __raw_writel(new_val, hdq_data->hdq_base + offset);
98
99 return new_val;
100 }
101
102 static void hdq_disable_interrupt(struct hdq_data *hdq_data, u32 offset,
103 u32 mask)
104 {
105 u32 ie;
106
107 ie = readl(hdq_data->hdq_base + offset);
108 writel(ie & mask, hdq_data->hdq_base + offset);
109 }
110
111 /*
112 * Wait for one or more bits in flag change.
113 * HDQ_FLAG_SET: wait until any bit in the flag is set.
114 * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
115 * return 0 on success and -ETIMEDOUT in the case of timeout.
116 */
117 static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
118 u8 flag, u8 flag_set, u8 *status)
119 {
120 int ret = 0;
121 unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
122
123 if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
124 /* wait for the flag clear */
125 while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
126 && time_before(jiffies, timeout)) {
127 schedule_timeout_uninterruptible(1);
128 }
129 if (*status & flag)
130 ret = -ETIMEDOUT;
131 } else if (flag_set == OMAP_HDQ_FLAG_SET) {
132 /* wait for the flag set */
133 while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
134 && time_before(jiffies, timeout)) {
135 schedule_timeout_uninterruptible(1);
136 }
137 if (!(*status & flag))
138 ret = -ETIMEDOUT;
139 } else
140 return -EINVAL;
141
142 return ret;
143 }
144
145 /* write out a byte and fill *status with HDQ_INT_STATUS */
146 static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
147 {
148 int ret;
149 u8 tmp_status;
150 unsigned long irqflags;
151
152 *status = 0;
153
154 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
155 /* clear interrupt flags via a dummy read */
156 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
157 /* ISR loads it with new INT_STATUS */
158 hdq_data->hdq_irqstatus = 0;
159 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
160
161 hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
162
163 /* set the GO bit */
164 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
165 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
166 /* wait for the TXCOMPLETE bit */
167 ret = wait_event_timeout(hdq_wait_queue,
168 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
169 if (ret == 0) {
170 dev_dbg(hdq_data->dev, "TX wait elapsed\n");
171 ret = -ETIMEDOUT;
172 goto out;
173 }
174
175 *status = hdq_data->hdq_irqstatus;
176 /* check irqstatus */
177 if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
178 dev_dbg(hdq_data->dev, "timeout waiting for"
179 " TXCOMPLETE/RXCOMPLETE, %x", *status);
180 ret = -ETIMEDOUT;
181 goto out;
182 }
183
184 /* wait for the GO bit return to zero */
185 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
186 OMAP_HDQ_CTRL_STATUS_GO,
187 OMAP_HDQ_FLAG_CLEAR, &tmp_status);
188 if (ret) {
189 dev_dbg(hdq_data->dev, "timeout waiting GO bit"
190 " return to zero, %x", tmp_status);
191 }
192
193 out:
194 return ret;
195 }
196
197 /* HDQ Interrupt service routine */
198 static irqreturn_t hdq_isr(int irq, void *_hdq)
199 {
200 struct hdq_data *hdq_data = _hdq;
201 unsigned long irqflags;
202
203 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
204 hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
205 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
206 dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
207
208 if (hdq_data->hdq_irqstatus &
209 (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
210 | OMAP_HDQ_INT_STATUS_TIMEOUT)) {
211 /* wake up sleeping process */
212 wake_up(&hdq_wait_queue);
213 }
214
215 return IRQ_HANDLED;
216 }
217
218 /* W1 search callback function in HDQ mode */
219 static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
220 u8 search_type, w1_slave_found_callback slave_found)
221 {
222 u64 module_id, rn_le, cs, id;
223
224 if (w1_id)
225 module_id = w1_id;
226 else
227 module_id = 0x1;
228
229 rn_le = cpu_to_le64(module_id);
230 /*
231 * HDQ might not obey truly the 1-wire spec.
232 * So calculate CRC based on module parameter.
233 */
234 cs = w1_calc_crc8((u8 *)&rn_le, 7);
235 id = (cs << 56) | module_id;
236
237 slave_found(master_dev, id);
238 }
239
240 static int _omap_hdq_reset(struct hdq_data *hdq_data)
241 {
242 int ret;
243 u8 tmp_status;
244
245 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
246 OMAP_HDQ_SYSCONFIG_SOFTRESET);
247 /*
248 * Select HDQ/1W mode & enable clocks.
249 * It is observed that INT flags can't be cleared via a read and GO/INIT
250 * won't return to zero if interrupt is disabled. So we always enable
251 * interrupt.
252 */
253 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
254 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
255 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
256
257 /* wait for reset to complete */
258 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS,
259 OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status);
260 if (ret)
261 dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x",
262 tmp_status);
263 else {
264 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
265 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
266 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK |
267 hdq_data->mode);
268 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
269 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
270 }
271
272 return ret;
273 }
274
275 /* Issue break pulse to the device */
276 static int omap_hdq_break(struct hdq_data *hdq_data)
277 {
278 int ret = 0;
279 u8 tmp_status;
280 unsigned long irqflags;
281
282 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
283 if (ret < 0) {
284 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
285 ret = -EINTR;
286 goto rtn;
287 }
288
289 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
290 /* clear interrupt flags via a dummy read */
291 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
292 /* ISR loads it with new INT_STATUS */
293 hdq_data->hdq_irqstatus = 0;
294 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
295
296 /* set the INIT and GO bit */
297 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
298 OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
299 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
300 OMAP_HDQ_CTRL_STATUS_GO);
301
302 /* wait for the TIMEOUT bit */
303 ret = wait_event_timeout(hdq_wait_queue,
304 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
305 if (ret == 0) {
306 dev_dbg(hdq_data->dev, "break wait elapsed\n");
307 ret = -EINTR;
308 goto out;
309 }
310
311 tmp_status = hdq_data->hdq_irqstatus;
312 /* check irqstatus */
313 if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
314 dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
315 tmp_status);
316 ret = -ETIMEDOUT;
317 goto out;
318 }
319
320 /*
321 * check for the presence detect bit to get
322 * set to show that the slave is responding
323 */
324 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_CTRL_STATUS) &
325 OMAP_HDQ_CTRL_STATUS_PRESENCE)) {
326 dev_dbg(hdq_data->dev, "Presence bit not set\n");
327 ret = -ETIMEDOUT;
328 goto out;
329 }
330
331 /*
332 * wait for both INIT and GO bits rerurn to zero.
333 * zero wait time expected for interrupt mode.
334 */
335 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
336 OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
337 OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
338 &tmp_status);
339 if (ret)
340 dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
341 " return to zero, %x", tmp_status);
342
343 out:
344 mutex_unlock(&hdq_data->hdq_mutex);
345 rtn:
346 return ret;
347 }
348
349 static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
350 {
351 int ret = 0;
352 u8 status;
353
354 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
355 if (ret < 0) {
356 ret = -EINTR;
357 goto rtn;
358 }
359
360 if (!hdq_data->hdq_usecount) {
361 ret = -EINVAL;
362 goto out;
363 }
364
365 if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
366 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
367 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
368 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
369 /*
370 * The RX comes immediately after TX.
371 */
372 wait_event_timeout(hdq_wait_queue,
373 (hdq_data->hdq_irqstatus
374 & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
375 OMAP_HDQ_TIMEOUT);
376
377 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
378 OMAP_HDQ_CTRL_STATUS_DIR);
379 status = hdq_data->hdq_irqstatus;
380 /* check irqstatus */
381 if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
382 dev_dbg(hdq_data->dev, "timeout waiting for"
383 " RXCOMPLETE, %x", status);
384 ret = -ETIMEDOUT;
385 goto out;
386 }
387 }
388 /* the data is ready. Read it in! */
389 *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
390 out:
391 mutex_unlock(&hdq_data->hdq_mutex);
392 rtn:
393 return ret;
394
395 }
396
397 /* Enable clocks and set the controller to HDQ/1W mode */
398 static int omap_hdq_get(struct hdq_data *hdq_data)
399 {
400 int ret = 0;
401
402 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
403 if (ret < 0) {
404 ret = -EINTR;
405 goto rtn;
406 }
407
408 if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) {
409 dev_dbg(hdq_data->dev, "attempt to exceed the max use count");
410 ret = -EINVAL;
411 goto out;
412 } else {
413 hdq_data->hdq_usecount++;
414 try_module_get(THIS_MODULE);
415 if (1 == hdq_data->hdq_usecount) {
416
417 pm_runtime_get_sync(hdq_data->dev);
418
419 /* make sure HDQ/1W is out of reset */
420 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
421 OMAP_HDQ_SYSSTATUS_RESETDONE)) {
422 ret = _omap_hdq_reset(hdq_data);
423 if (ret)
424 /* back up the count */
425 hdq_data->hdq_usecount--;
426 } else {
427 /* select HDQ/1W mode & enable clocks */
428 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
429 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
430 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK |
431 hdq_data->mode);
432 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
433 OMAP_HDQ_SYSCONFIG_NOIDLE);
434 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
435 }
436 }
437 }
438
439 out:
440 mutex_unlock(&hdq_data->hdq_mutex);
441 rtn:
442 return ret;
443 }
444
445 /* Disable clocks to the module */
446 static int omap_hdq_put(struct hdq_data *hdq_data)
447 {
448 int ret = 0;
449
450 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
451 if (ret < 0)
452 return -EINTR;
453
454 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
455 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
456 if (0 == hdq_data->hdq_usecount) {
457 dev_dbg(hdq_data->dev, "attempt to decrement use count"
458 " when it is zero");
459 ret = -EINVAL;
460 } else {
461 hdq_data->hdq_usecount--;
462 module_put(THIS_MODULE);
463 if (0 == hdq_data->hdq_usecount)
464 pm_runtime_put_sync(hdq_data->dev);
465 }
466 mutex_unlock(&hdq_data->hdq_mutex);
467
468 return ret;
469 }
470
471 /*
472 * W1 triplet callback function - used for searching ROM addresses.
473 * Registered only when controller is in 1-wire mode.
474 */
475 static u8 omap_w1_triplet(void *_hdq, u8 bdir)
476 {
477 u8 id_bit, comp_bit;
478 int err;
479 u8 ret = 0x3; /* no slaves responded */
480 struct hdq_data *hdq_data = _hdq;
481 u8 ctrl = OMAP_HDQ_CTRL_STATUS_SINGLE | OMAP_HDQ_CTRL_STATUS_GO |
482 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK;
483 u8 mask = ctrl | OMAP_HDQ_CTRL_STATUS_DIR;
484
485 omap_hdq_get(_hdq);
486
487 err = mutex_lock_interruptible(&hdq_data->hdq_mutex);
488 if (err < 0) {
489 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
490 goto rtn;
491 }
492
493 hdq_data->hdq_irqstatus = 0;
494 /* read id_bit */
495 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
496 ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
497 err = wait_event_timeout(hdq_wait_queue,
498 (hdq_data->hdq_irqstatus
499 & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
500 OMAP_HDQ_TIMEOUT);
501 if (err == 0) {
502 dev_dbg(hdq_data->dev, "RX wait elapsed\n");
503 goto out;
504 }
505 id_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
506
507 hdq_data->hdq_irqstatus = 0;
508 /* read comp_bit */
509 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
510 ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
511 err = wait_event_timeout(hdq_wait_queue,
512 (hdq_data->hdq_irqstatus
513 & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
514 OMAP_HDQ_TIMEOUT);
515 if (err == 0) {
516 dev_dbg(hdq_data->dev, "RX wait elapsed\n");
517 goto out;
518 }
519 comp_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
520
521 if (id_bit && comp_bit) {
522 ret = 0x03; /* no slaves responded */
523 goto out;
524 }
525 if (!id_bit && !comp_bit) {
526 /* Both bits are valid, take the direction given */
527 ret = bdir ? 0x04 : 0;
528 } else {
529 /* Only one bit is valid, take that direction */
530 bdir = id_bit;
531 ret = id_bit ? 0x05 : 0x02;
532 }
533
534 /* write bdir bit */
535 hdq_reg_out(_hdq, OMAP_HDQ_TX_DATA, bdir);
536 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, ctrl, mask);
537 err = wait_event_timeout(hdq_wait_queue,
538 (hdq_data->hdq_irqstatus
539 & OMAP_HDQ_INT_STATUS_TXCOMPLETE),
540 OMAP_HDQ_TIMEOUT);
541 if (err == 0) {
542 dev_dbg(hdq_data->dev, "TX wait elapsed\n");
543 goto out;
544 }
545
546 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, 0,
547 OMAP_HDQ_CTRL_STATUS_SINGLE);
548
549 out:
550 mutex_unlock(&hdq_data->hdq_mutex);
551 rtn:
552 omap_hdq_put(_hdq);
553 return ret;
554 }
555
556 /* reset callback */
557 static u8 omap_w1_reset_bus(void *_hdq)
558 {
559 omap_hdq_get(_hdq);
560 omap_hdq_break(_hdq);
561 omap_hdq_put(_hdq);
562 return 0;
563 }
564
565 /* Read a byte of data from the device */
566 static u8 omap_w1_read_byte(void *_hdq)
567 {
568 struct hdq_data *hdq_data = _hdq;
569 u8 val = 0;
570 int ret;
571
572 /* First write to initialize the transfer */
573 if (hdq_data->init_trans == 0)
574 omap_hdq_get(hdq_data);
575
576 ret = hdq_read_byte(hdq_data, &val);
577 if (ret) {
578 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
579 if (ret < 0) {
580 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
581 return -EINTR;
582 }
583 hdq_data->init_trans = 0;
584 mutex_unlock(&hdq_data->hdq_mutex);
585 omap_hdq_put(hdq_data);
586 return -1;
587 }
588
589 hdq_disable_interrupt(hdq_data, OMAP_HDQ_CTRL_STATUS,
590 ~OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
591
592 /* Write followed by a read, release the module */
593 if (hdq_data->init_trans) {
594 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
595 if (ret < 0) {
596 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
597 return -EINTR;
598 }
599 hdq_data->init_trans = 0;
600 mutex_unlock(&hdq_data->hdq_mutex);
601 omap_hdq_put(hdq_data);
602 }
603
604 return val;
605 }
606
607 /* Write a byte of data to the device */
608 static void omap_w1_write_byte(void *_hdq, u8 byte)
609 {
610 struct hdq_data *hdq_data = _hdq;
611 int ret;
612 u8 status;
613
614 /* First write to initialize the transfer */
615 if (hdq_data->init_trans == 0)
616 omap_hdq_get(hdq_data);
617
618 /*
619 * We need to reset the slave before
620 * issuing the SKIP ROM command, else
621 * the slave will not work.
622 */
623 if (byte == W1_SKIP_ROM)
624 omap_hdq_break(hdq_data);
625
626 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
627 if (ret < 0) {
628 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
629 return;
630 }
631 hdq_data->init_trans++;
632 mutex_unlock(&hdq_data->hdq_mutex);
633
634 ret = hdq_write_byte(hdq_data, byte, &status);
635 if (ret < 0) {
636 dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
637 return;
638 }
639
640 /* Second write, data transferred. Release the module */
641 if (hdq_data->init_trans > 1) {
642 omap_hdq_put(hdq_data);
643 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
644 if (ret < 0) {
645 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
646 return;
647 }
648 hdq_data->init_trans = 0;
649 mutex_unlock(&hdq_data->hdq_mutex);
650 }
651 }
652
653 static struct w1_bus_master omap_w1_master = {
654 .read_byte = omap_w1_read_byte,
655 .write_byte = omap_w1_write_byte,
656 .reset_bus = omap_w1_reset_bus,
657 };
658
659 static int omap_hdq_probe(struct platform_device *pdev)
660 {
661 struct device *dev = &pdev->dev;
662 struct hdq_data *hdq_data;
663 struct resource *res;
664 int ret, irq;
665 u8 rev;
666 const char *mode;
667
668 hdq_data = devm_kzalloc(dev, sizeof(*hdq_data), GFP_KERNEL);
669 if (!hdq_data) {
670 dev_dbg(&pdev->dev, "unable to allocate memory\n");
671 return -ENOMEM;
672 }
673
674 hdq_data->dev = dev;
675 platform_set_drvdata(pdev, hdq_data);
676
677 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
678 hdq_data->hdq_base = devm_ioremap_resource(dev, res);
679 if (IS_ERR(hdq_data->hdq_base))
680 return PTR_ERR(hdq_data->hdq_base);
681
682 hdq_data->hdq_usecount = 0;
683 hdq_data->rrw = 0;
684 mutex_init(&hdq_data->hdq_mutex);
685
686 pm_runtime_enable(&pdev->dev);
687 ret = pm_runtime_get_sync(&pdev->dev);
688 if (ret < 0) {
689 dev_dbg(&pdev->dev, "pm_runtime_get_sync failed\n");
690 goto err_w1;
691 }
692
693 ret = _omap_hdq_reset(hdq_data);
694 if (ret) {
695 dev_dbg(&pdev->dev, "reset failed\n");
696 goto err_irq;
697 }
698
699 rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
700 dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
701 (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
702
703 spin_lock_init(&hdq_data->hdq_spinlock);
704
705 irq = platform_get_irq(pdev, 0);
706 if (irq < 0) {
707 ret = -ENXIO;
708 goto err_irq;
709 }
710
711 ret = devm_request_irq(dev, irq, hdq_isr, 0, "omap_hdq", hdq_data);
712 if (ret < 0) {
713 dev_dbg(&pdev->dev, "could not request irq\n");
714 goto err_irq;
715 }
716
717 omap_hdq_break(hdq_data);
718
719 pm_runtime_put_sync(&pdev->dev);
720
721 ret = of_property_read_string(pdev->dev.of_node, "ti,mode", &mode);
722 if (ret < 0 || !strcmp(mode, "hdq")) {
723 hdq_data->mode = 0;
724 omap_w1_master.search = omap_w1_search_bus;
725 } else {
726 hdq_data->mode = 1;
727 omap_w1_master.triplet = omap_w1_triplet;
728 }
729
730 omap_w1_master.data = hdq_data;
731
732 ret = w1_add_master_device(&omap_w1_master);
733 if (ret) {
734 dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
735 goto err_w1;
736 }
737
738 return 0;
739
740 err_irq:
741 pm_runtime_put_sync(&pdev->dev);
742 err_w1:
743 pm_runtime_disable(&pdev->dev);
744
745 return ret;
746 }
747
748 static int omap_hdq_remove(struct platform_device *pdev)
749 {
750 struct hdq_data *hdq_data = platform_get_drvdata(pdev);
751
752 mutex_lock(&hdq_data->hdq_mutex);
753
754 if (hdq_data->hdq_usecount) {
755 dev_dbg(&pdev->dev, "removed when use count is not zero\n");
756 mutex_unlock(&hdq_data->hdq_mutex);
757 return -EBUSY;
758 }
759
760 mutex_unlock(&hdq_data->hdq_mutex);
761
762 /* remove module dependency */
763 pm_runtime_disable(&pdev->dev);
764
765 return 0;
766 }
767
768 static const struct of_device_id omap_hdq_dt_ids[] = {
769 { .compatible = "ti,omap3-1w" },
770 { .compatible = "ti,am4372-hdq" },
771 {}
772 };
773 MODULE_DEVICE_TABLE(of, omap_hdq_dt_ids);
774
775 static struct platform_driver omap_hdq_driver = {
776 .probe = omap_hdq_probe,
777 .remove = omap_hdq_remove,
778 .driver = {
779 .name = "omap_hdq",
780 .of_match_table = omap_hdq_dt_ids,
781 },
782 };
783 module_platform_driver(omap_hdq_driver);
784
785 MODULE_AUTHOR("Texas Instruments");
786 MODULE_DESCRIPTION("HDQ-1W driver Library");
787 MODULE_LICENSE("GPL");