firewire: cdev: add ioctls for isochronous resource management
[deliverable/linux.git] / drivers / firewire / fw-iso.c
1 /*
2 * Isochronous I/O functionality:
3 * - Isochronous DMA context management
4 * - Isochronous bus resource management (channels, bandwidth), client side
5 *
6 * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23 #include <linux/dma-mapping.h>
24 #include <linux/errno.h>
25 #include <linux/firewire-constants.h>
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/spinlock.h>
29 #include <linux/vmalloc.h>
30
31 #include "fw-topology.h"
32 #include "fw-transaction.h"
33
34 /*
35 * Isochronous DMA context management
36 */
37
38 int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
39 int page_count, enum dma_data_direction direction)
40 {
41 int i, j;
42 dma_addr_t address;
43
44 buffer->page_count = page_count;
45 buffer->direction = direction;
46
47 buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]),
48 GFP_KERNEL);
49 if (buffer->pages == NULL)
50 goto out;
51
52 for (i = 0; i < buffer->page_count; i++) {
53 buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
54 if (buffer->pages[i] == NULL)
55 goto out_pages;
56
57 address = dma_map_page(card->device, buffer->pages[i],
58 0, PAGE_SIZE, direction);
59 if (dma_mapping_error(card->device, address)) {
60 __free_page(buffer->pages[i]);
61 goto out_pages;
62 }
63 set_page_private(buffer->pages[i], address);
64 }
65
66 return 0;
67
68 out_pages:
69 for (j = 0; j < i; j++) {
70 address = page_private(buffer->pages[j]);
71 dma_unmap_page(card->device, address,
72 PAGE_SIZE, DMA_TO_DEVICE);
73 __free_page(buffer->pages[j]);
74 }
75 kfree(buffer->pages);
76 out:
77 buffer->pages = NULL;
78 return -ENOMEM;
79 }
80
81 int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
82 {
83 unsigned long uaddr;
84 int i, ret;
85
86 uaddr = vma->vm_start;
87 for (i = 0; i < buffer->page_count; i++) {
88 ret = vm_insert_page(vma, uaddr, buffer->pages[i]);
89 if (ret)
90 return ret;
91 uaddr += PAGE_SIZE;
92 }
93
94 return 0;
95 }
96
97 void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
98 struct fw_card *card)
99 {
100 int i;
101 dma_addr_t address;
102
103 for (i = 0; i < buffer->page_count; i++) {
104 address = page_private(buffer->pages[i]);
105 dma_unmap_page(card->device, address,
106 PAGE_SIZE, DMA_TO_DEVICE);
107 __free_page(buffer->pages[i]);
108 }
109
110 kfree(buffer->pages);
111 buffer->pages = NULL;
112 }
113
114 struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
115 int type, int channel, int speed, size_t header_size,
116 fw_iso_callback_t callback, void *callback_data)
117 {
118 struct fw_iso_context *ctx;
119
120 ctx = card->driver->allocate_iso_context(card,
121 type, channel, header_size);
122 if (IS_ERR(ctx))
123 return ctx;
124
125 ctx->card = card;
126 ctx->type = type;
127 ctx->channel = channel;
128 ctx->speed = speed;
129 ctx->header_size = header_size;
130 ctx->callback = callback;
131 ctx->callback_data = callback_data;
132
133 return ctx;
134 }
135
136 void fw_iso_context_destroy(struct fw_iso_context *ctx)
137 {
138 struct fw_card *card = ctx->card;
139
140 card->driver->free_iso_context(ctx);
141 }
142
143 int fw_iso_context_start(struct fw_iso_context *ctx,
144 int cycle, int sync, int tags)
145 {
146 return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
147 }
148
149 int fw_iso_context_queue(struct fw_iso_context *ctx,
150 struct fw_iso_packet *packet,
151 struct fw_iso_buffer *buffer,
152 unsigned long payload)
153 {
154 struct fw_card *card = ctx->card;
155
156 return card->driver->queue_iso(ctx, packet, buffer, payload);
157 }
158
159 int fw_iso_context_stop(struct fw_iso_context *ctx)
160 {
161 return ctx->card->driver->stop_iso(ctx);
162 }
163
164 /*
165 * Isochronous bus resource management (channels, bandwidth), client side
166 */
167
168 static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
169 int bandwidth, bool allocate)
170 {
171 __be32 data[2];
172 int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0;
173
174 /*
175 * On a 1394a IRM with low contention, try < 1 is enough.
176 * On a 1394-1995 IRM, we need at least try < 2.
177 * Let's just do try < 5.
178 */
179 for (try = 0; try < 5; try++) {
180 new = allocate ? old - bandwidth : old + bandwidth;
181 if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL)
182 break;
183
184 data[0] = cpu_to_be32(old);
185 data[1] = cpu_to_be32(new);
186 switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
187 irm_id, generation, SCODE_100,
188 CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE,
189 data, sizeof(data))) {
190 case RCODE_GENERATION:
191 /* A generation change frees all bandwidth. */
192 return allocate ? -EAGAIN : bandwidth;
193
194 case RCODE_COMPLETE:
195 if (be32_to_cpup(data) == old)
196 return bandwidth;
197
198 old = be32_to_cpup(data);
199 /* Fall through. */
200 }
201 }
202
203 return -EIO;
204 }
205
206 static int manage_channel(struct fw_card *card, int irm_id, int generation,
207 __be32 channels_mask, u64 offset, bool allocate)
208 {
209 __be32 data[2], c, old = allocate ? cpu_to_be32(~0) : 0;
210 int i, retry = 5;
211
212 for (i = 0; i < 32; i++) {
213 c = cpu_to_be32(1 << (31 - i));
214 if (!(channels_mask & c))
215 continue;
216
217 if (allocate == !(old & c))
218 continue;
219
220 data[0] = old;
221 data[1] = old ^ c;
222 switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
223 irm_id, generation, SCODE_100,
224 offset, data, sizeof(data))) {
225 case RCODE_GENERATION:
226 /* A generation change frees all channels. */
227 return allocate ? -EAGAIN : i;
228
229 case RCODE_COMPLETE:
230 if (data[0] == old)
231 return i;
232
233 old = data[0];
234
235 /* Is the IRM 1394a-2000 compliant? */
236 if ((data[0] & c) != (data[1] & c))
237 continue;
238
239 /* 1394-1995 IRM, fall through to retry. */
240 default:
241 if (retry--)
242 i--;
243 }
244 }
245
246 return -EIO;
247 }
248
249 static void deallocate_channel(struct fw_card *card, int irm_id,
250 int generation, int channel)
251 {
252 __be32 mask;
253 u64 offset;
254
255 mask = channel < 32 ? cpu_to_be32(1 << (31 - channel)) :
256 cpu_to_be32(1 << (63 - channel));
257 offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI :
258 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO;
259
260 manage_channel(card, irm_id, generation, mask, offset, false);
261 }
262
263 /**
264 * fw_iso_resource_manage - Allocate or deallocate a channel and/or bandwidth
265 *
266 * In parameters: card, generation, channels_mask, bandwidth, allocate
267 * Out parameters: channel, bandwidth
268 * This function blocks (sleeps) during communication with the IRM.
269 * Allocates or deallocates at most one channel out of channels_mask.
270 *
271 * Returns channel < 0 if no channel was allocated or deallocated.
272 * Returns bandwidth = 0 if no bandwidth was allocated or deallocated.
273 *
274 * If generation is stale, deallocations succeed but allocations fail with
275 * channel = -EAGAIN.
276 *
277 * If channel (de)allocation fails, bandwidth (de)allocation fails too.
278 * If bandwidth allocation fails, no channel will be allocated either.
279 * If bandwidth deallocation fails, channel deallocation may still have been
280 * successful.
281 */
282 void fw_iso_resource_manage(struct fw_card *card, int generation,
283 u64 channels_mask, int *channel, int *bandwidth,
284 bool allocate)
285 {
286 __be32 channels_hi = cpu_to_be32(channels_mask >> 32);
287 __be32 channels_lo = cpu_to_be32(channels_mask);
288 int irm_id, ret, c = -EINVAL;
289
290 spin_lock_irq(&card->lock);
291 irm_id = card->irm_node->node_id;
292 spin_unlock_irq(&card->lock);
293
294 if (channels_hi)
295 c = manage_channel(card, irm_id, generation, channels_hi,
296 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate);
297 if (channels_lo && c < 0) {
298 c = manage_channel(card, irm_id, generation, channels_lo,
299 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate);
300 if (c >= 0)
301 c += 32;
302 }
303 *channel = c;
304
305 if (channels_mask != 0 && c < 0)
306 *bandwidth = 0;
307
308 if (*bandwidth == 0)
309 return;
310
311 ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate);
312 if (ret < 0)
313 *bandwidth = 0;
314
315 if (ret < 0 && c >= 0 && allocate) {
316 deallocate_channel(card, irm_id, generation, c);
317 *channel = ret;
318 }
319 }
This page took 0.055004 seconds and 5 git commands to generate.