Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[deliverable/linux.git] / drivers / net / wireless / rt2x00 / rt2x00mmio.c
1 /*
2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21 /*
22 Module: rt2x00mmio
23 Abstract: rt2x00 generic mmio device routines.
24 */
25
26 #include <linux/dma-mapping.h>
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/slab.h>
30
31 #include "rt2x00.h"
32 #include "rt2x00mmio.h"
33
34 /*
35 * Register access.
36 */
37 int rt2x00mmio_regbusy_read(struct rt2x00_dev *rt2x00dev,
38 const unsigned int offset,
39 const struct rt2x00_field32 field,
40 u32 *reg)
41 {
42 unsigned int i;
43
44 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
45 return 0;
46
47 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
48 rt2x00mmio_register_read(rt2x00dev, offset, reg);
49 if (!rt2x00_get_field32(*reg, field))
50 return 1;
51 udelay(REGISTER_BUSY_DELAY);
52 }
53
54 printk_once(KERN_ERR "%s() Indirect register access failed: "
55 "offset=0x%.08x, value=0x%.08x\n", __func__, offset, *reg);
56 *reg = ~0;
57
58 return 0;
59 }
60 EXPORT_SYMBOL_GPL(rt2x00mmio_regbusy_read);
61
62 bool rt2x00mmio_rxdone(struct rt2x00_dev *rt2x00dev)
63 {
64 struct data_queue *queue = rt2x00dev->rx;
65 struct queue_entry *entry;
66 struct queue_entry_priv_mmio *entry_priv;
67 struct skb_frame_desc *skbdesc;
68 int max_rx = 16;
69
70 while (--max_rx) {
71 entry = rt2x00queue_get_entry(queue, Q_INDEX);
72 entry_priv = entry->priv_data;
73
74 if (rt2x00dev->ops->lib->get_entry_state(entry))
75 break;
76
77 /*
78 * Fill in desc fields of the skb descriptor
79 */
80 skbdesc = get_skb_frame_desc(entry->skb);
81 skbdesc->desc = entry_priv->desc;
82 skbdesc->desc_len = entry->queue->desc_size;
83
84 /*
85 * DMA is already done, notify rt2x00lib that
86 * it finished successfully.
87 */
88 rt2x00lib_dmastart(entry);
89 rt2x00lib_dmadone(entry);
90
91 /*
92 * Send the frame to rt2x00lib for further processing.
93 */
94 rt2x00lib_rxdone(entry, GFP_ATOMIC);
95 }
96
97 return !max_rx;
98 }
99 EXPORT_SYMBOL_GPL(rt2x00mmio_rxdone);
100
101 void rt2x00mmio_flush_queue(struct data_queue *queue, bool drop)
102 {
103 unsigned int i;
104
105 for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++)
106 msleep(10);
107 }
108 EXPORT_SYMBOL_GPL(rt2x00mmio_flush_queue);
109
110 /*
111 * Device initialization handlers.
112 */
113 static int rt2x00mmio_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
114 struct data_queue *queue)
115 {
116 struct queue_entry_priv_mmio *entry_priv;
117 void *addr;
118 dma_addr_t dma;
119 unsigned int i;
120
121 /*
122 * Allocate DMA memory for descriptor and buffer.
123 */
124 addr = dma_alloc_coherent(rt2x00dev->dev,
125 queue->limit * queue->desc_size,
126 &dma, GFP_KERNEL);
127 if (!addr)
128 return -ENOMEM;
129
130 memset(addr, 0, queue->limit * queue->desc_size);
131
132 /*
133 * Initialize all queue entries to contain valid addresses.
134 */
135 for (i = 0; i < queue->limit; i++) {
136 entry_priv = queue->entries[i].priv_data;
137 entry_priv->desc = addr + i * queue->desc_size;
138 entry_priv->desc_dma = dma + i * queue->desc_size;
139 }
140
141 return 0;
142 }
143
144 static void rt2x00mmio_free_queue_dma(struct rt2x00_dev *rt2x00dev,
145 struct data_queue *queue)
146 {
147 struct queue_entry_priv_mmio *entry_priv =
148 queue->entries[0].priv_data;
149
150 if (entry_priv->desc)
151 dma_free_coherent(rt2x00dev->dev,
152 queue->limit * queue->desc_size,
153 entry_priv->desc, entry_priv->desc_dma);
154 entry_priv->desc = NULL;
155 }
156
157 int rt2x00mmio_initialize(struct rt2x00_dev *rt2x00dev)
158 {
159 struct data_queue *queue;
160 int status;
161
162 /*
163 * Allocate DMA
164 */
165 queue_for_each(rt2x00dev, queue) {
166 status = rt2x00mmio_alloc_queue_dma(rt2x00dev, queue);
167 if (status)
168 goto exit;
169 }
170
171 /*
172 * Register interrupt handler.
173 */
174 status = request_irq(rt2x00dev->irq,
175 rt2x00dev->ops->lib->irq_handler,
176 IRQF_SHARED, rt2x00dev->name, rt2x00dev);
177 if (status) {
178 rt2x00_err(rt2x00dev, "IRQ %d allocation failed (error %d)\n",
179 rt2x00dev->irq, status);
180 goto exit;
181 }
182
183 return 0;
184
185 exit:
186 queue_for_each(rt2x00dev, queue)
187 rt2x00mmio_free_queue_dma(rt2x00dev, queue);
188
189 return status;
190 }
191 EXPORT_SYMBOL_GPL(rt2x00mmio_initialize);
192
193 void rt2x00mmio_uninitialize(struct rt2x00_dev *rt2x00dev)
194 {
195 struct data_queue *queue;
196
197 /*
198 * Free irq line.
199 */
200 free_irq(rt2x00dev->irq, rt2x00dev);
201
202 /*
203 * Free DMA
204 */
205 queue_for_each(rt2x00dev, queue)
206 rt2x00mmio_free_queue_dma(rt2x00dev, queue);
207 }
208 EXPORT_SYMBOL_GPL(rt2x00mmio_uninitialize);
209
210 /*
211 * rt2x00mmio module information.
212 */
213 MODULE_AUTHOR(DRV_PROJECT);
214 MODULE_VERSION(DRV_VERSION);
215 MODULE_DESCRIPTION("rt2x00 mmio library");
216 MODULE_LICENSE("GPL");
This page took 0.03415 seconds and 5 git commands to generate.