Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[deliverable/linux.git] / drivers / w1 / masters / omap_hdq.c
1 /*
2 * drivers/w1/masters/omap_hdq.c
3 *
4 * Copyright (C) 2007,2012 Texas Instruments, Inc.
5 *
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
9 *
10 */
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <linux/interrupt.h>
15 #include <linux/slab.h>
16 #include <linux/err.h>
17 #include <linux/io.h>
18 #include <linux/sched.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/of.h>
21
22 #include "../w1.h"
23 #include "../w1_int.h"
24
25 #define MOD_NAME "OMAP_HDQ:"
26
27 #define OMAP_HDQ_REVISION 0x00
28 #define OMAP_HDQ_TX_DATA 0x04
29 #define OMAP_HDQ_RX_DATA 0x08
30 #define OMAP_HDQ_CTRL_STATUS 0x0c
31 #define OMAP_HDQ_CTRL_STATUS_SINGLE BIT(7)
32 #define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK BIT(6)
33 #define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE BIT(5)
34 #define OMAP_HDQ_CTRL_STATUS_GO BIT(4)
35 #define OMAP_HDQ_CTRL_STATUS_PRESENCE BIT(3)
36 #define OMAP_HDQ_CTRL_STATUS_INITIALIZATION BIT(2)
37 #define OMAP_HDQ_CTRL_STATUS_DIR BIT(1)
38 #define OMAP_HDQ_INT_STATUS 0x10
39 #define OMAP_HDQ_INT_STATUS_TXCOMPLETE BIT(2)
40 #define OMAP_HDQ_INT_STATUS_RXCOMPLETE BIT(1)
41 #define OMAP_HDQ_INT_STATUS_TIMEOUT BIT(0)
42 #define OMAP_HDQ_SYSCONFIG 0x14
43 #define OMAP_HDQ_SYSCONFIG_SOFTRESET BIT(1)
44 #define OMAP_HDQ_SYSCONFIG_AUTOIDLE BIT(0)
45 #define OMAP_HDQ_SYSCONFIG_NOIDLE 0x0
46 #define OMAP_HDQ_SYSSTATUS 0x18
47 #define OMAP_HDQ_SYSSTATUS_RESETDONE BIT(0)
48
49 #define OMAP_HDQ_FLAG_CLEAR 0
50 #define OMAP_HDQ_FLAG_SET 1
51 #define OMAP_HDQ_TIMEOUT (HZ/5)
52
53 #define OMAP_HDQ_MAX_USER 4
54
55 static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
56 static int w1_id;
57
58 struct hdq_data {
59 struct device *dev;
60 void __iomem *hdq_base;
61 /* lock status update */
62 struct mutex hdq_mutex;
63 int hdq_usecount;
64 u8 hdq_irqstatus;
65 /* device lock */
66 spinlock_t hdq_spinlock;
67 /*
68 * Used to control the call to omap_hdq_get and omap_hdq_put.
69 * HDQ Protocol: Write the CMD|REG_address first, followed by
70 * the data wrire or read.
71 */
72 int init_trans;
73 int rrw;
74 /* mode: 0-HDQ 1-W1 */
75 int mode;
76
77 };
78
79 static int omap_hdq_probe(struct platform_device *pdev);
80 static int omap_hdq_remove(struct platform_device *pdev);
81
82 static const struct of_device_id omap_hdq_dt_ids[] = {
83 { .compatible = "ti,omap3-1w" },
84 { .compatible = "ti,am4372-hdq" },
85 {}
86 };
87 MODULE_DEVICE_TABLE(of, omap_hdq_dt_ids);
88
89 static struct platform_driver omap_hdq_driver = {
90 .probe = omap_hdq_probe,
91 .remove = omap_hdq_remove,
92 .driver = {
93 .name = "omap_hdq",
94 .of_match_table = omap_hdq_dt_ids,
95 },
96 };
97
98 static u8 omap_w1_read_byte(void *_hdq);
99 static void omap_w1_write_byte(void *_hdq, u8 byte);
100 static u8 omap_w1_reset_bus(void *_hdq);
101
102
103 static struct w1_bus_master omap_w1_master = {
104 .read_byte = omap_w1_read_byte,
105 .write_byte = omap_w1_write_byte,
106 .reset_bus = omap_w1_reset_bus,
107 };
108
109 /* HDQ register I/O routines */
110 static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
111 {
112 return __raw_readl(hdq_data->hdq_base + offset);
113 }
114
115 static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
116 {
117 __raw_writel(val, hdq_data->hdq_base + offset);
118 }
119
120 static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
121 u8 val, u8 mask)
122 {
123 u8 new_val = (__raw_readl(hdq_data->hdq_base + offset) & ~mask)
124 | (val & mask);
125 __raw_writel(new_val, hdq_data->hdq_base + offset);
126
127 return new_val;
128 }
129
130 static void hdq_disable_interrupt(struct hdq_data *hdq_data, u32 offset,
131 u32 mask)
132 {
133 u32 ie;
134
135 ie = readl(hdq_data->hdq_base + offset);
136 writel(ie & mask, hdq_data->hdq_base + offset);
137 }
138
139 /*
140 * Wait for one or more bits in flag change.
141 * HDQ_FLAG_SET: wait until any bit in the flag is set.
142 * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
143 * return 0 on success and -ETIMEDOUT in the case of timeout.
144 */
145 static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
146 u8 flag, u8 flag_set, u8 *status)
147 {
148 int ret = 0;
149 unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
150
151 if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
152 /* wait for the flag clear */
153 while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
154 && time_before(jiffies, timeout)) {
155 schedule_timeout_uninterruptible(1);
156 }
157 if (*status & flag)
158 ret = -ETIMEDOUT;
159 } else if (flag_set == OMAP_HDQ_FLAG_SET) {
160 /* wait for the flag set */
161 while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
162 && time_before(jiffies, timeout)) {
163 schedule_timeout_uninterruptible(1);
164 }
165 if (!(*status & flag))
166 ret = -ETIMEDOUT;
167 } else
168 return -EINVAL;
169
170 return ret;
171 }
172
173 /* write out a byte and fill *status with HDQ_INT_STATUS */
174 static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
175 {
176 int ret;
177 u8 tmp_status;
178 unsigned long irqflags;
179
180 *status = 0;
181
182 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
183 /* clear interrupt flags via a dummy read */
184 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
185 /* ISR loads it with new INT_STATUS */
186 hdq_data->hdq_irqstatus = 0;
187 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
188
189 hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
190
191 /* set the GO bit */
192 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
193 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
194 /* wait for the TXCOMPLETE bit */
195 ret = wait_event_timeout(hdq_wait_queue,
196 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
197 if (ret == 0) {
198 dev_dbg(hdq_data->dev, "TX wait elapsed\n");
199 ret = -ETIMEDOUT;
200 goto out;
201 }
202
203 *status = hdq_data->hdq_irqstatus;
204 /* check irqstatus */
205 if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
206 dev_dbg(hdq_data->dev, "timeout waiting for"
207 " TXCOMPLETE/RXCOMPLETE, %x", *status);
208 ret = -ETIMEDOUT;
209 goto out;
210 }
211
212 /* wait for the GO bit return to zero */
213 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
214 OMAP_HDQ_CTRL_STATUS_GO,
215 OMAP_HDQ_FLAG_CLEAR, &tmp_status);
216 if (ret) {
217 dev_dbg(hdq_data->dev, "timeout waiting GO bit"
218 " return to zero, %x", tmp_status);
219 }
220
221 out:
222 return ret;
223 }
224
225 /* HDQ Interrupt service routine */
226 static irqreturn_t hdq_isr(int irq, void *_hdq)
227 {
228 struct hdq_data *hdq_data = _hdq;
229 unsigned long irqflags;
230
231 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
232 hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
233 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
234 dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
235
236 if (hdq_data->hdq_irqstatus &
237 (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
238 | OMAP_HDQ_INT_STATUS_TIMEOUT)) {
239 /* wake up sleeping process */
240 wake_up(&hdq_wait_queue);
241 }
242
243 return IRQ_HANDLED;
244 }
245
246 /* W1 search callback function in HDQ mode */
247 static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
248 u8 search_type, w1_slave_found_callback slave_found)
249 {
250 u64 module_id, rn_le, cs, id;
251
252 if (w1_id)
253 module_id = w1_id;
254 else
255 module_id = 0x1;
256
257 rn_le = cpu_to_le64(module_id);
258 /*
259 * HDQ might not obey truly the 1-wire spec.
260 * So calculate CRC based on module parameter.
261 */
262 cs = w1_calc_crc8((u8 *)&rn_le, 7);
263 id = (cs << 56) | module_id;
264
265 slave_found(master_dev, id);
266 }
267
268 static int _omap_hdq_reset(struct hdq_data *hdq_data)
269 {
270 int ret;
271 u8 tmp_status;
272
273 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
274 OMAP_HDQ_SYSCONFIG_SOFTRESET);
275 /*
276 * Select HDQ/1W mode & enable clocks.
277 * It is observed that INT flags can't be cleared via a read and GO/INIT
278 * won't return to zero if interrupt is disabled. So we always enable
279 * interrupt.
280 */
281 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
282 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
283 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
284
285 /* wait for reset to complete */
286 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS,
287 OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status);
288 if (ret)
289 dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x",
290 tmp_status);
291 else {
292 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
293 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
294 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK |
295 hdq_data->mode);
296 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
297 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
298 }
299
300 return ret;
301 }
302
303 /* Issue break pulse to the device */
304 static int omap_hdq_break(struct hdq_data *hdq_data)
305 {
306 int ret = 0;
307 u8 tmp_status;
308 unsigned long irqflags;
309
310 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
311 if (ret < 0) {
312 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
313 ret = -EINTR;
314 goto rtn;
315 }
316
317 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
318 /* clear interrupt flags via a dummy read */
319 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
320 /* ISR loads it with new INT_STATUS */
321 hdq_data->hdq_irqstatus = 0;
322 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
323
324 /* set the INIT and GO bit */
325 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
326 OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
327 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
328 OMAP_HDQ_CTRL_STATUS_GO);
329
330 /* wait for the TIMEOUT bit */
331 ret = wait_event_timeout(hdq_wait_queue,
332 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
333 if (ret == 0) {
334 dev_dbg(hdq_data->dev, "break wait elapsed\n");
335 ret = -EINTR;
336 goto out;
337 }
338
339 tmp_status = hdq_data->hdq_irqstatus;
340 /* check irqstatus */
341 if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
342 dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
343 tmp_status);
344 ret = -ETIMEDOUT;
345 goto out;
346 }
347
348 /*
349 * check for the presence detect bit to get
350 * set to show that the slave is responding
351 */
352 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_CTRL_STATUS) &
353 OMAP_HDQ_CTRL_STATUS_PRESENCE)) {
354 dev_dbg(hdq_data->dev, "Presence bit not set\n");
355 ret = -ETIMEDOUT;
356 goto out;
357 }
358
359 /*
360 * wait for both INIT and GO bits rerurn to zero.
361 * zero wait time expected for interrupt mode.
362 */
363 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
364 OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
365 OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
366 &tmp_status);
367 if (ret)
368 dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
369 " return to zero, %x", tmp_status);
370
371 out:
372 mutex_unlock(&hdq_data->hdq_mutex);
373 rtn:
374 return ret;
375 }
376
377 static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
378 {
379 int ret = 0;
380 u8 status;
381
382 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
383 if (ret < 0) {
384 ret = -EINTR;
385 goto rtn;
386 }
387
388 if (!hdq_data->hdq_usecount) {
389 ret = -EINVAL;
390 goto out;
391 }
392
393 if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
394 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
395 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
396 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
397 /*
398 * The RX comes immediately after TX.
399 */
400 wait_event_timeout(hdq_wait_queue,
401 (hdq_data->hdq_irqstatus
402 & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
403 OMAP_HDQ_TIMEOUT);
404
405 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
406 OMAP_HDQ_CTRL_STATUS_DIR);
407 status = hdq_data->hdq_irqstatus;
408 /* check irqstatus */
409 if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
410 dev_dbg(hdq_data->dev, "timeout waiting for"
411 " RXCOMPLETE, %x", status);
412 ret = -ETIMEDOUT;
413 goto out;
414 }
415 }
416 /* the data is ready. Read it in! */
417 *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
418 out:
419 mutex_unlock(&hdq_data->hdq_mutex);
420 rtn:
421 return ret;
422
423 }
424
425 /* Enable clocks and set the controller to HDQ/1W mode */
426 static int omap_hdq_get(struct hdq_data *hdq_data)
427 {
428 int ret = 0;
429
430 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
431 if (ret < 0) {
432 ret = -EINTR;
433 goto rtn;
434 }
435
436 if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) {
437 dev_dbg(hdq_data->dev, "attempt to exceed the max use count");
438 ret = -EINVAL;
439 goto out;
440 } else {
441 hdq_data->hdq_usecount++;
442 try_module_get(THIS_MODULE);
443 if (1 == hdq_data->hdq_usecount) {
444
445 pm_runtime_get_sync(hdq_data->dev);
446
447 /* make sure HDQ/1W is out of reset */
448 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
449 OMAP_HDQ_SYSSTATUS_RESETDONE)) {
450 ret = _omap_hdq_reset(hdq_data);
451 if (ret)
452 /* back up the count */
453 hdq_data->hdq_usecount--;
454 } else {
455 /* select HDQ/1W mode & enable clocks */
456 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
457 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
458 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK |
459 hdq_data->mode);
460 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
461 OMAP_HDQ_SYSCONFIG_NOIDLE);
462 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
463 }
464 }
465 }
466
467 out:
468 mutex_unlock(&hdq_data->hdq_mutex);
469 rtn:
470 return ret;
471 }
472
473 /* Disable clocks to the module */
474 static int omap_hdq_put(struct hdq_data *hdq_data)
475 {
476 int ret = 0;
477
478 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
479 if (ret < 0)
480 return -EINTR;
481
482 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
483 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
484 if (0 == hdq_data->hdq_usecount) {
485 dev_dbg(hdq_data->dev, "attempt to decrement use count"
486 " when it is zero");
487 ret = -EINVAL;
488 } else {
489 hdq_data->hdq_usecount--;
490 module_put(THIS_MODULE);
491 if (0 == hdq_data->hdq_usecount)
492 pm_runtime_put_sync(hdq_data->dev);
493 }
494 mutex_unlock(&hdq_data->hdq_mutex);
495
496 return ret;
497 }
498
499 /*
500 * W1 triplet callback function - used for searching ROM addresses.
501 * Registered only when controller is in 1-wire mode.
502 */
503 static u8 omap_w1_triplet(void *_hdq, u8 bdir)
504 {
505 u8 id_bit, comp_bit;
506 int err;
507 u8 ret = 0x3; /* no slaves responded */
508 struct hdq_data *hdq_data = _hdq;
509 u8 ctrl = OMAP_HDQ_CTRL_STATUS_SINGLE | OMAP_HDQ_CTRL_STATUS_GO |
510 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK;
511 u8 mask = ctrl | OMAP_HDQ_CTRL_STATUS_DIR;
512
513 omap_hdq_get(_hdq);
514
515 err = mutex_lock_interruptible(&hdq_data->hdq_mutex);
516 if (err < 0) {
517 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
518 goto rtn;
519 }
520
521 hdq_data->hdq_irqstatus = 0;
522 /* read id_bit */
523 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
524 ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
525 err = wait_event_timeout(hdq_wait_queue,
526 (hdq_data->hdq_irqstatus
527 & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
528 OMAP_HDQ_TIMEOUT);
529 if (err == 0) {
530 dev_dbg(hdq_data->dev, "RX wait elapsed\n");
531 goto out;
532 }
533 id_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
534
535 hdq_data->hdq_irqstatus = 0;
536 /* read comp_bit */
537 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
538 ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
539 err = wait_event_timeout(hdq_wait_queue,
540 (hdq_data->hdq_irqstatus
541 & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
542 OMAP_HDQ_TIMEOUT);
543 if (err == 0) {
544 dev_dbg(hdq_data->dev, "RX wait elapsed\n");
545 goto out;
546 }
547 comp_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
548
549 if (id_bit && comp_bit) {
550 ret = 0x03; /* no slaves responded */
551 goto out;
552 }
553 if (!id_bit && !comp_bit) {
554 /* Both bits are valid, take the direction given */
555 ret = bdir ? 0x04 : 0;
556 } else {
557 /* Only one bit is valid, take that direction */
558 bdir = id_bit;
559 ret = id_bit ? 0x05 : 0x02;
560 }
561
562 /* write bdir bit */
563 hdq_reg_out(_hdq, OMAP_HDQ_TX_DATA, bdir);
564 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, ctrl, mask);
565 err = wait_event_timeout(hdq_wait_queue,
566 (hdq_data->hdq_irqstatus
567 & OMAP_HDQ_INT_STATUS_TXCOMPLETE),
568 OMAP_HDQ_TIMEOUT);
569 if (err == 0) {
570 dev_dbg(hdq_data->dev, "TX wait elapsed\n");
571 goto out;
572 }
573
574 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, 0,
575 OMAP_HDQ_CTRL_STATUS_SINGLE);
576
577 out:
578 mutex_unlock(&hdq_data->hdq_mutex);
579 rtn:
580 omap_hdq_put(_hdq);
581 return ret;
582 }
583
584 /* reset callback */
585 static u8 omap_w1_reset_bus(void *_hdq)
586 {
587 omap_hdq_get(_hdq);
588 omap_hdq_break(_hdq);
589 omap_hdq_put(_hdq);
590 return 0;
591 }
592
593 /* Read a byte of data from the device */
594 static u8 omap_w1_read_byte(void *_hdq)
595 {
596 struct hdq_data *hdq_data = _hdq;
597 u8 val = 0;
598 int ret;
599
600 /* First write to initialize the transfer */
601 if (hdq_data->init_trans == 0)
602 omap_hdq_get(hdq_data);
603
604 ret = hdq_read_byte(hdq_data, &val);
605 if (ret) {
606 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
607 if (ret < 0) {
608 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
609 return -EINTR;
610 }
611 hdq_data->init_trans = 0;
612 mutex_unlock(&hdq_data->hdq_mutex);
613 omap_hdq_put(hdq_data);
614 return -1;
615 }
616
617 hdq_disable_interrupt(hdq_data, OMAP_HDQ_CTRL_STATUS,
618 ~OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
619
620 /* Write followed by a read, release the module */
621 if (hdq_data->init_trans) {
622 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
623 if (ret < 0) {
624 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
625 return -EINTR;
626 }
627 hdq_data->init_trans = 0;
628 mutex_unlock(&hdq_data->hdq_mutex);
629 omap_hdq_put(hdq_data);
630 }
631
632 return val;
633 }
634
635 /* Write a byte of data to the device */
636 static void omap_w1_write_byte(void *_hdq, u8 byte)
637 {
638 struct hdq_data *hdq_data = _hdq;
639 int ret;
640 u8 status;
641
642 /* First write to initialize the transfer */
643 if (hdq_data->init_trans == 0)
644 omap_hdq_get(hdq_data);
645
646 /*
647 * We need to reset the slave before
648 * issuing the SKIP ROM command, else
649 * the slave will not work.
650 */
651 if (byte == W1_SKIP_ROM)
652 omap_hdq_break(hdq_data);
653
654 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
655 if (ret < 0) {
656 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
657 return;
658 }
659 hdq_data->init_trans++;
660 mutex_unlock(&hdq_data->hdq_mutex);
661
662 ret = hdq_write_byte(hdq_data, byte, &status);
663 if (ret < 0) {
664 dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
665 return;
666 }
667
668 /* Second write, data transferred. Release the module */
669 if (hdq_data->init_trans > 1) {
670 omap_hdq_put(hdq_data);
671 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
672 if (ret < 0) {
673 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
674 return;
675 }
676 hdq_data->init_trans = 0;
677 mutex_unlock(&hdq_data->hdq_mutex);
678 }
679 }
680
681 static int omap_hdq_probe(struct platform_device *pdev)
682 {
683 struct device *dev = &pdev->dev;
684 struct hdq_data *hdq_data;
685 struct resource *res;
686 int ret, irq;
687 u8 rev;
688 const char *mode;
689
690 hdq_data = devm_kzalloc(dev, sizeof(*hdq_data), GFP_KERNEL);
691 if (!hdq_data) {
692 dev_dbg(&pdev->dev, "unable to allocate memory\n");
693 return -ENOMEM;
694 }
695
696 hdq_data->dev = dev;
697 platform_set_drvdata(pdev, hdq_data);
698
699 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
700 hdq_data->hdq_base = devm_ioremap_resource(dev, res);
701 if (IS_ERR(hdq_data->hdq_base))
702 return PTR_ERR(hdq_data->hdq_base);
703
704 hdq_data->hdq_usecount = 0;
705 hdq_data->rrw = 0;
706 mutex_init(&hdq_data->hdq_mutex);
707
708 pm_runtime_enable(&pdev->dev);
709 ret = pm_runtime_get_sync(&pdev->dev);
710 if (ret < 0) {
711 dev_dbg(&pdev->dev, "pm_runtime_get_sync failed\n");
712 goto err_w1;
713 }
714
715 ret = _omap_hdq_reset(hdq_data);
716 if (ret) {
717 dev_dbg(&pdev->dev, "reset failed\n");
718 return -EINVAL;
719 }
720
721 rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
722 dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
723 (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
724
725 spin_lock_init(&hdq_data->hdq_spinlock);
726
727 irq = platform_get_irq(pdev, 0);
728 if (irq < 0) {
729 ret = -ENXIO;
730 goto err_irq;
731 }
732
733 ret = devm_request_irq(dev, irq, hdq_isr, 0, "omap_hdq", hdq_data);
734 if (ret < 0) {
735 dev_dbg(&pdev->dev, "could not request irq\n");
736 goto err_irq;
737 }
738
739 omap_hdq_break(hdq_data);
740
741 pm_runtime_put_sync(&pdev->dev);
742
743 ret = of_property_read_string(pdev->dev.of_node, "ti,mode", &mode);
744 if (ret < 0 || !strcmp(mode, "hdq")) {
745 hdq_data->mode = 0;
746 omap_w1_master.search = omap_w1_search_bus;
747 } else {
748 hdq_data->mode = 1;
749 omap_w1_master.triplet = omap_w1_triplet;
750 }
751
752 omap_w1_master.data = hdq_data;
753
754 ret = w1_add_master_device(&omap_w1_master);
755 if (ret) {
756 dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
757 goto err_w1;
758 }
759
760 return 0;
761
762 err_irq:
763 pm_runtime_put_sync(&pdev->dev);
764 err_w1:
765 pm_runtime_disable(&pdev->dev);
766
767 return ret;
768 }
769
770 static int omap_hdq_remove(struct platform_device *pdev)
771 {
772 struct hdq_data *hdq_data = platform_get_drvdata(pdev);
773
774 mutex_lock(&hdq_data->hdq_mutex);
775
776 if (hdq_data->hdq_usecount) {
777 dev_dbg(&pdev->dev, "removed when use count is not zero\n");
778 mutex_unlock(&hdq_data->hdq_mutex);
779 return -EBUSY;
780 }
781
782 mutex_unlock(&hdq_data->hdq_mutex);
783
784 /* remove module dependency */
785 pm_runtime_disable(&pdev->dev);
786
787 return 0;
788 }
789
790 module_platform_driver(omap_hdq_driver);
791
792 module_param(w1_id, int, S_IRUSR);
793 MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection in HDQ mode");
794
795 MODULE_AUTHOR("Texas Instruments");
796 MODULE_DESCRIPTION("HDQ-1W driver Library");
797 MODULE_LICENSE("GPL");
This page took 0.056871 seconds and 5 git commands to generate.