Merge branch 'kirkwood/cleanup' of git://git.infradead.org/users/jcooper/linux into...
[deliverable/linux.git] / drivers / w1 / masters / omap_hdq.c
CommitLineData
9f2bc79f
MC
1/*
2 * drivers/w1/masters/omap_hdq.c
3 *
c354a864 4 * Copyright (C) 2007,2012 Texas Instruments, Inc.
9f2bc79f
MC
5 *
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
9 *
10 */
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/platform_device.h>
14#include <linux/interrupt.h>
5a0e3ad6 15#include <linux/slab.h>
9f2bc79f 16#include <linux/err.h>
9f2bc79f 17#include <linux/io.h>
81fa08f2 18#include <linux/sched.h>
c354a864 19#include <linux/pm_runtime.h>
9f2bc79f
MC
20
21#include <asm/irq.h>
9f2bc79f
MC
22
23#include "../w1.h"
24#include "../w1_int.h"
25
26#define MOD_NAME "OMAP_HDQ:"
27
28#define OMAP_HDQ_REVISION 0x00
29#define OMAP_HDQ_TX_DATA 0x04
30#define OMAP_HDQ_RX_DATA 0x08
31#define OMAP_HDQ_CTRL_STATUS 0x0c
32#define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK (1<<6)
33#define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE (1<<5)
34#define OMAP_HDQ_CTRL_STATUS_GO (1<<4)
35#define OMAP_HDQ_CTRL_STATUS_INITIALIZATION (1<<2)
36#define OMAP_HDQ_CTRL_STATUS_DIR (1<<1)
37#define OMAP_HDQ_CTRL_STATUS_MODE (1<<0)
38#define OMAP_HDQ_INT_STATUS 0x10
39#define OMAP_HDQ_INT_STATUS_TXCOMPLETE (1<<2)
40#define OMAP_HDQ_INT_STATUS_RXCOMPLETE (1<<1)
41#define OMAP_HDQ_INT_STATUS_TIMEOUT (1<<0)
42#define OMAP_HDQ_SYSCONFIG 0x14
43#define OMAP_HDQ_SYSCONFIG_SOFTRESET (1<<1)
44#define OMAP_HDQ_SYSCONFIG_AUTOIDLE (1<<0)
45#define OMAP_HDQ_SYSSTATUS 0x18
46#define OMAP_HDQ_SYSSTATUS_RESETDONE (1<<0)
47
48#define OMAP_HDQ_FLAG_CLEAR 0
49#define OMAP_HDQ_FLAG_SET 1
50#define OMAP_HDQ_TIMEOUT (HZ/5)
51
52#define OMAP_HDQ_MAX_USER 4
53
54static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
55static int w1_id;
56
57struct hdq_data {
58 struct device *dev;
59 void __iomem *hdq_base;
60 /* lock status update */
61 struct mutex hdq_mutex;
62 int hdq_usecount;
9f2bc79f
MC
63 u8 hdq_irqstatus;
64 /* device lock */
65 spinlock_t hdq_spinlock;
66 /*
67 * Used to control the call to omap_hdq_get and omap_hdq_put.
68 * HDQ Protocol: Write the CMD|REG_address first, followed by
69 * the data wrire or read.
70 */
71 int init_trans;
72};
73
a96b9121 74static int __devinit omap_hdq_probe(struct platform_device *pdev);
9f2bc79f
MC
75static int omap_hdq_remove(struct platform_device *pdev);
76
77static struct platform_driver omap_hdq_driver = {
78 .probe = omap_hdq_probe,
79 .remove = omap_hdq_remove,
80 .driver = {
81 .name = "omap_hdq",
82 },
83};
84
85static u8 omap_w1_read_byte(void *_hdq);
86static void omap_w1_write_byte(void *_hdq, u8 byte);
87static u8 omap_w1_reset_bus(void *_hdq);
06b0d4dc
SM
88static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
89 u8 search_type, w1_slave_found_callback slave_found);
9f2bc79f
MC
90
91
92static struct w1_bus_master omap_w1_master = {
93 .read_byte = omap_w1_read_byte,
94 .write_byte = omap_w1_write_byte,
95 .reset_bus = omap_w1_reset_bus,
96 .search = omap_w1_search_bus,
97};
98
99/* HDQ register I/O routines */
100static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
101{
2acd0894 102 return __raw_readl(hdq_data->hdq_base + offset);
9f2bc79f
MC
103}
104
105static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
106{
2acd0894 107 __raw_writel(val, hdq_data->hdq_base + offset);
9f2bc79f
MC
108}
109
110static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
111 u8 val, u8 mask)
112{
2acd0894 113 u8 new_val = (__raw_readl(hdq_data->hdq_base + offset) & ~mask)
9f2bc79f 114 | (val & mask);
2acd0894 115 __raw_writel(new_val, hdq_data->hdq_base + offset);
9f2bc79f
MC
116
117 return new_val;
118}
119
120/*
121 * Wait for one or more bits in flag change.
122 * HDQ_FLAG_SET: wait until any bit in the flag is set.
123 * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
124 * return 0 on success and -ETIMEDOUT in the case of timeout.
125 */
126static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
127 u8 flag, u8 flag_set, u8 *status)
128{
129 int ret = 0;
130 unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
131
132 if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
133 /* wait for the flag clear */
134 while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
135 && time_before(jiffies, timeout)) {
136 schedule_timeout_uninterruptible(1);
137 }
138 if (*status & flag)
139 ret = -ETIMEDOUT;
140 } else if (flag_set == OMAP_HDQ_FLAG_SET) {
141 /* wait for the flag set */
142 while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
143 && time_before(jiffies, timeout)) {
144 schedule_timeout_uninterruptible(1);
145 }
146 if (!(*status & flag))
147 ret = -ETIMEDOUT;
148 } else
149 return -EINVAL;
150
151 return ret;
152}
153
154/* write out a byte and fill *status with HDQ_INT_STATUS */
155static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
156{
157 int ret;
158 u8 tmp_status;
159 unsigned long irqflags;
160
161 *status = 0;
162
163 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
164 /* clear interrupt flags via a dummy read */
165 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
166 /* ISR loads it with new INT_STATUS */
167 hdq_data->hdq_irqstatus = 0;
168 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
169
170 hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
171
172 /* set the GO bit */
173 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
174 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
175 /* wait for the TXCOMPLETE bit */
176 ret = wait_event_timeout(hdq_wait_queue,
177 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
178 if (ret == 0) {
179 dev_dbg(hdq_data->dev, "TX wait elapsed\n");
7b5362a6 180 ret = -ETIMEDOUT;
9f2bc79f
MC
181 goto out;
182 }
183
184 *status = hdq_data->hdq_irqstatus;
185 /* check irqstatus */
186 if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
187 dev_dbg(hdq_data->dev, "timeout waiting for"
7b5362a6 188 " TXCOMPLETE/RXCOMPLETE, %x", *status);
9f2bc79f
MC
189 ret = -ETIMEDOUT;
190 goto out;
191 }
192
193 /* wait for the GO bit return to zero */
194 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
195 OMAP_HDQ_CTRL_STATUS_GO,
196 OMAP_HDQ_FLAG_CLEAR, &tmp_status);
197 if (ret) {
198 dev_dbg(hdq_data->dev, "timeout waiting GO bit"
7b5362a6 199 " return to zero, %x", tmp_status);
9f2bc79f
MC
200 }
201
202out:
203 return ret;
204}
205
206/* HDQ Interrupt service routine */
207static irqreturn_t hdq_isr(int irq, void *_hdq)
208{
209 struct hdq_data *hdq_data = _hdq;
210 unsigned long irqflags;
211
212 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
213 hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
214 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
215 dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
216
217 if (hdq_data->hdq_irqstatus &
218 (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
219 | OMAP_HDQ_INT_STATUS_TIMEOUT)) {
220 /* wake up sleeping process */
221 wake_up(&hdq_wait_queue);
222 }
223
224 return IRQ_HANDLED;
225}
226
227/* HDQ Mode: always return success */
228static u8 omap_w1_reset_bus(void *_hdq)
229{
230 return 0;
231}
232
233/* W1 search callback function */
06b0d4dc
SM
234static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
235 u8 search_type, w1_slave_found_callback slave_found)
9f2bc79f
MC
236{
237 u64 module_id, rn_le, cs, id;
238
239 if (w1_id)
240 module_id = w1_id;
241 else
242 module_id = 0x1;
243
244 rn_le = cpu_to_le64(module_id);
245 /*
246 * HDQ might not obey truly the 1-wire spec.
247 * So calculate CRC based on module parameter.
248 */
249 cs = w1_calc_crc8((u8 *)&rn_le, 7);
250 id = (cs << 56) | module_id;
251
06b0d4dc 252 slave_found(master_dev, id);
9f2bc79f
MC
253}
254
255static int _omap_hdq_reset(struct hdq_data *hdq_data)
256{
257 int ret;
258 u8 tmp_status;
259
260 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_SOFTRESET);
261 /*
262 * Select HDQ mode & enable clocks.
263 * It is observed that INT flags can't be cleared via a read and GO/INIT
264 * won't return to zero if interrupt is disabled. So we always enable
265 * interrupt.
266 */
267 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
268 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
269 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
270
271 /* wait for reset to complete */
272 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS,
273 OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status);
274 if (ret)
275 dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x",
276 tmp_status);
277 else {
278 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
279 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
280 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
281 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
282 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
283 }
284
285 return ret;
286}
287
288/* Issue break pulse to the device */
289static int omap_hdq_break(struct hdq_data *hdq_data)
290{
291 int ret = 0;
292 u8 tmp_status;
293 unsigned long irqflags;
294
295 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
296 if (ret < 0) {
297 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
298 ret = -EINTR;
299 goto rtn;
300 }
301
302 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
303 /* clear interrupt flags via a dummy read */
304 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
305 /* ISR loads it with new INT_STATUS */
306 hdq_data->hdq_irqstatus = 0;
307 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
308
309 /* set the INIT and GO bit */
310 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
311 OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
312 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
313 OMAP_HDQ_CTRL_STATUS_GO);
314
315 /* wait for the TIMEOUT bit */
316 ret = wait_event_timeout(hdq_wait_queue,
317 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
318 if (ret == 0) {
319 dev_dbg(hdq_data->dev, "break wait elapsed\n");
320 ret = -EINTR;
321 goto out;
322 }
323
324 tmp_status = hdq_data->hdq_irqstatus;
325 /* check irqstatus */
326 if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
327 dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
328 tmp_status);
329 ret = -ETIMEDOUT;
330 goto out;
331 }
332 /*
333 * wait for both INIT and GO bits rerurn to zero.
334 * zero wait time expected for interrupt mode.
335 */
336 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
337 OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
338 OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
339 &tmp_status);
340 if (ret)
341 dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
7b5362a6 342 " return to zero, %x", tmp_status);
9f2bc79f
MC
343
344out:
345 mutex_unlock(&hdq_data->hdq_mutex);
346rtn:
347 return ret;
348}
349
350static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
351{
352 int ret = 0;
353 u8 status;
9f2bc79f
MC
354
355 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
356 if (ret < 0) {
357 ret = -EINTR;
358 goto rtn;
359 }
360
361 if (!hdq_data->hdq_usecount) {
362 ret = -EINVAL;
363 goto out;
364 }
365
366 if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
367 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
368 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
369 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
370 /*
b7e938d0 371 * The RX comes immediately after TX.
9f2bc79f 372 */
b7e938d0
N
373 wait_event_timeout(hdq_wait_queue,
374 (hdq_data->hdq_irqstatus
375 & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
376 OMAP_HDQ_TIMEOUT);
377
9f2bc79f
MC
378 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
379 OMAP_HDQ_CTRL_STATUS_DIR);
380 status = hdq_data->hdq_irqstatus;
381 /* check irqstatus */
382 if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
383 dev_dbg(hdq_data->dev, "timeout waiting for"
7b5362a6 384 " RXCOMPLETE, %x", status);
9f2bc79f
MC
385 ret = -ETIMEDOUT;
386 goto out;
387 }
388 }
389 /* the data is ready. Read it in! */
390 *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
391out:
392 mutex_unlock(&hdq_data->hdq_mutex);
393rtn:
7b5362a6 394 return ret;
9f2bc79f
MC
395
396}
397
398/* Enable clocks and set the controller to HDQ mode */
399static int omap_hdq_get(struct hdq_data *hdq_data)
400{
401 int ret = 0;
402
403 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
404 if (ret < 0) {
405 ret = -EINTR;
406 goto rtn;
407 }
408
409 if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) {
410 dev_dbg(hdq_data->dev, "attempt to exceed the max use count");
411 ret = -EINVAL;
412 goto out;
413 } else {
414 hdq_data->hdq_usecount++;
415 try_module_get(THIS_MODULE);
416 if (1 == hdq_data->hdq_usecount) {
c354a864
PW
417
418 pm_runtime_get_sync(hdq_data->dev);
9f2bc79f
MC
419
420 /* make sure HDQ is out of reset */
421 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
422 OMAP_HDQ_SYSSTATUS_RESETDONE)) {
423 ret = _omap_hdq_reset(hdq_data);
424 if (ret)
425 /* back up the count */
426 hdq_data->hdq_usecount--;
427 } else {
428 /* select HDQ mode & enable clocks */
429 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
430 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
431 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
432 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
433 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
434 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
435 }
436 }
437 }
438
9f2bc79f
MC
439out:
440 mutex_unlock(&hdq_data->hdq_mutex);
441rtn:
442 return ret;
443}
444
445/* Disable clocks to the module */
446static int omap_hdq_put(struct hdq_data *hdq_data)
447{
448 int ret = 0;
449
450 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
451 if (ret < 0)
452 return -EINTR;
453
454 if (0 == hdq_data->hdq_usecount) {
455 dev_dbg(hdq_data->dev, "attempt to decrement use count"
7b5362a6 456 " when it is zero");
9f2bc79f
MC
457 ret = -EINVAL;
458 } else {
459 hdq_data->hdq_usecount--;
460 module_put(THIS_MODULE);
c354a864
PW
461 if (0 == hdq_data->hdq_usecount)
462 pm_runtime_put_sync(hdq_data->dev);
9f2bc79f
MC
463 }
464 mutex_unlock(&hdq_data->hdq_mutex);
465
466 return ret;
467}
468
469/* Read a byte of data from the device */
470static u8 omap_w1_read_byte(void *_hdq)
471{
472 struct hdq_data *hdq_data = _hdq;
473 u8 val = 0;
474 int ret;
475
476 ret = hdq_read_byte(hdq_data, &val);
477 if (ret) {
478 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
479 if (ret < 0) {
480 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
481 return -EINTR;
482 }
483 hdq_data->init_trans = 0;
484 mutex_unlock(&hdq_data->hdq_mutex);
485 omap_hdq_put(hdq_data);
486 return -1;
487 }
488
489 /* Write followed by a read, release the module */
490 if (hdq_data->init_trans) {
491 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
492 if (ret < 0) {
493 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
494 return -EINTR;
495 }
496 hdq_data->init_trans = 0;
497 mutex_unlock(&hdq_data->hdq_mutex);
498 omap_hdq_put(hdq_data);
499 }
500
501 return val;
502}
503
504/* Write a byte of data to the device */
505static void omap_w1_write_byte(void *_hdq, u8 byte)
506{
507 struct hdq_data *hdq_data = _hdq;
508 int ret;
509 u8 status;
510
511 /* First write to initialize the transfer */
512 if (hdq_data->init_trans == 0)
513 omap_hdq_get(hdq_data);
514
515 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
516 if (ret < 0) {
517 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
518 return;
519 }
520 hdq_data->init_trans++;
521 mutex_unlock(&hdq_data->hdq_mutex);
522
523 ret = hdq_write_byte(hdq_data, byte, &status);
7b5362a6 524 if (ret < 0) {
9f2bc79f
MC
525 dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
526 return;
527 }
528
25985edc 529 /* Second write, data transferred. Release the module */
9f2bc79f
MC
530 if (hdq_data->init_trans > 1) {
531 omap_hdq_put(hdq_data);
532 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
533 if (ret < 0) {
534 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
535 return;
536 }
537 hdq_data->init_trans = 0;
538 mutex_unlock(&hdq_data->hdq_mutex);
539 }
540
541 return;
542}
543
a96b9121 544static int __devinit omap_hdq_probe(struct platform_device *pdev)
9f2bc79f
MC
545{
546 struct hdq_data *hdq_data;
547 struct resource *res;
548 int ret, irq;
549 u8 rev;
550
551 hdq_data = kmalloc(sizeof(*hdq_data), GFP_KERNEL);
552 if (!hdq_data) {
553 dev_dbg(&pdev->dev, "unable to allocate memory\n");
554 ret = -ENOMEM;
555 goto err_kmalloc;
556 }
557
558 hdq_data->dev = &pdev->dev;
559 platform_set_drvdata(pdev, hdq_data);
560
561 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
562 if (!res) {
563 dev_dbg(&pdev->dev, "unable to get resource\n");
564 ret = -ENXIO;
565 goto err_resource;
566 }
567
568 hdq_data->hdq_base = ioremap(res->start, SZ_4K);
569 if (!hdq_data->hdq_base) {
570 dev_dbg(&pdev->dev, "ioremap failed\n");
571 ret = -EINVAL;
572 goto err_ioremap;
573 }
574
9f2bc79f
MC
575 hdq_data->hdq_usecount = 0;
576 mutex_init(&hdq_data->hdq_mutex);
577
c354a864
PW
578 pm_runtime_enable(&pdev->dev);
579 pm_runtime_get_sync(&pdev->dev);
9f2bc79f
MC
580
581 rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
582 dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
583 (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
584
585 spin_lock_init(&hdq_data->hdq_spinlock);
586
587 irq = platform_get_irq(pdev, 0);
588 if (irq < 0) {
589 ret = -ENXIO;
590 goto err_irq;
591 }
592
593 ret = request_irq(irq, hdq_isr, IRQF_DISABLED, "omap_hdq", hdq_data);
594 if (ret < 0) {
595 dev_dbg(&pdev->dev, "could not request irq\n");
596 goto err_irq;
597 }
598
599 omap_hdq_break(hdq_data);
600
c354a864 601 pm_runtime_put_sync(&pdev->dev);
9f2bc79f
MC
602
603 omap_w1_master.data = hdq_data;
604
605 ret = w1_add_master_device(&omap_w1_master);
606 if (ret) {
607 dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
608 goto err_w1;
609 }
610
611 return 0;
612
9f2bc79f 613err_irq:
c354a864
PW
614 pm_runtime_put_sync(&pdev->dev);
615err_w1:
616 pm_runtime_disable(&pdev->dev);
80d02d27 617
9f2bc79f
MC
618 iounmap(hdq_data->hdq_base);
619
620err_ioremap:
621err_resource:
622 platform_set_drvdata(pdev, NULL);
623 kfree(hdq_data);
624
625err_kmalloc:
626 return ret;
627
628}
629
630static int omap_hdq_remove(struct platform_device *pdev)
631{
632 struct hdq_data *hdq_data = platform_get_drvdata(pdev);
633
634 mutex_lock(&hdq_data->hdq_mutex);
635
636 if (hdq_data->hdq_usecount) {
637 dev_dbg(&pdev->dev, "removed when use count is not zero\n");
2020002a 638 mutex_unlock(&hdq_data->hdq_mutex);
9f2bc79f
MC
639 return -EBUSY;
640 }
641
642 mutex_unlock(&hdq_data->hdq_mutex);
643
644 /* remove module dependency */
c354a864 645 pm_runtime_disable(&pdev->dev);
aefaf7be 646 free_irq(platform_get_irq(pdev, 0), hdq_data);
9f2bc79f
MC
647 platform_set_drvdata(pdev, NULL);
648 iounmap(hdq_data->hdq_base);
649 kfree(hdq_data);
650
651 return 0;
652}
653
654static int __init
655omap_hdq_init(void)
656{
657 return platform_driver_register(&omap_hdq_driver);
658}
659module_init(omap_hdq_init);
660
661static void __exit
662omap_hdq_exit(void)
663{
664 platform_driver_unregister(&omap_hdq_driver);
665}
666module_exit(omap_hdq_exit);
667
668module_param(w1_id, int, S_IRUSR);
669MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection");
670
671MODULE_AUTHOR("Texas Instruments");
672MODULE_DESCRIPTION("HDQ driver Library");
673MODULE_LICENSE("GPL");
This page took 0.301994 seconds and 5 git commands to generate.