Commit | Line | Data |
---|---|---|
77c9a5da SR |
1 | #ifndef _LINUX_FIREWIRE_H |
2 | #define _LINUX_FIREWIRE_H | |
3 | ||
4 | #include <linux/completion.h> | |
c76acec6 | 5 | #include <linux/dma-mapping.h> |
77c9a5da SR |
6 | #include <linux/kernel.h> |
7 | #include <linux/kref.h> | |
8 | #include <linux/list.h> | |
9 | #include <linux/mutex.h> | |
10 | #include <linux/spinlock.h> | |
11 | #include <linux/sysfs.h> | |
12 | #include <linux/timer.h> | |
13 | #include <linux/types.h> | |
14 | #include <linux/workqueue.h> | |
15 | ||
60063497 | 16 | #include <linux/atomic.h> |
77c9a5da SR |
17 | #include <asm/byteorder.h> |
18 | ||
77c9a5da SR |
19 | #define CSR_REGISTER_BASE 0xfffff0000000ULL |
20 | ||
21 | /* register offsets are relative to CSR_REGISTER_BASE */ | |
22 | #define CSR_STATE_CLEAR 0x0 | |
23 | #define CSR_STATE_SET 0x4 | |
24 | #define CSR_NODE_IDS 0x8 | |
25 | #define CSR_RESET_START 0xc | |
26 | #define CSR_SPLIT_TIMEOUT_HI 0x18 | |
27 | #define CSR_SPLIT_TIMEOUT_LO 0x1c | |
28 | #define CSR_CYCLE_TIME 0x200 | |
29 | #define CSR_BUS_TIME 0x204 | |
30 | #define CSR_BUSY_TIMEOUT 0x210 | |
a1a1132b | 31 | #define CSR_PRIORITY_BUDGET 0x218 |
77c9a5da SR |
32 | #define CSR_BUS_MANAGER_ID 0x21c |
33 | #define CSR_BANDWIDTH_AVAILABLE 0x220 | |
34 | #define CSR_CHANNELS_AVAILABLE 0x224 | |
35 | #define CSR_CHANNELS_AVAILABLE_HI 0x224 | |
36 | #define CSR_CHANNELS_AVAILABLE_LO 0x228 | |
3d1f46eb | 37 | #define CSR_MAINT_UTILITY 0x230 |
77c9a5da SR |
38 | #define CSR_BROADCAST_CHANNEL 0x234 |
39 | #define CSR_CONFIG_ROM 0x400 | |
40 | #define CSR_CONFIG_ROM_END 0x800 | |
31ef9134 CL |
41 | #define CSR_OMPR 0x900 |
42 | #define CSR_OPCR(i) (0x904 + (i) * 4) | |
43 | #define CSR_IMPR 0x980 | |
44 | #define CSR_IPCR(i) (0x984 + (i) * 4) | |
77c9a5da SR |
45 | #define CSR_FCP_COMMAND 0xB00 |
46 | #define CSR_FCP_RESPONSE 0xD00 | |
47 | #define CSR_FCP_END 0xF00 | |
48 | #define CSR_TOPOLOGY_MAP 0x1000 | |
49 | #define CSR_TOPOLOGY_MAP_END 0x1400 | |
50 | #define CSR_SPEED_MAP 0x2000 | |
51 | #define CSR_SPEED_MAP_END 0x3000 | |
52 | ||
53 | #define CSR_OFFSET 0x40 | |
54 | #define CSR_LEAF 0x80 | |
55 | #define CSR_DIRECTORY 0xc0 | |
56 | ||
57 | #define CSR_DESCRIPTOR 0x01 | |
58 | #define CSR_VENDOR 0x03 | |
59 | #define CSR_HARDWARE_VERSION 0x04 | |
77c9a5da SR |
60 | #define CSR_UNIT 0x11 |
61 | #define CSR_SPECIFIER_ID 0x12 | |
62 | #define CSR_VERSION 0x13 | |
63 | #define CSR_DEPENDENT_INFO 0x14 | |
64 | #define CSR_MODEL 0x17 | |
77c9a5da SR |
65 | #define CSR_DIRECTORY_ID 0x20 |
66 | ||
313162d0 PG |
67 | struct device; |
68 | ||
77c9a5da | 69 | struct fw_csr_iterator { |
13b302d0 SR |
70 | const u32 *p; |
71 | const u32 *end; | |
77c9a5da SR |
72 | }; |
73 | ||
13b302d0 | 74 | void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p); |
77c9a5da | 75 | int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value); |
13b302d0 | 76 | int fw_csr_string(const u32 *directory, int key, char *buf, size_t size); |
1f8fef7b | 77 | |
77c9a5da SR |
78 | extern struct bus_type fw_bus_type; |
79 | ||
80 | struct fw_card_driver; | |
81 | struct fw_node; | |
82 | ||
83 | struct fw_card { | |
84 | const struct fw_card_driver *driver; | |
85 | struct device *device; | |
86 | struct kref kref; | |
87 | struct completion done; | |
88 | ||
89 | int node_id; | |
90 | int generation; | |
1e626fdc SR |
91 | int current_tlabel; |
92 | u64 tlabel_mask; | |
77c9a5da | 93 | struct list_head transaction_list; |
e71084af | 94 | u64 reset_jiffies; |
77c9a5da | 95 | |
8e4b50f9 CL |
96 | u32 split_timeout_hi; |
97 | u32 split_timeout_lo; | |
98 | unsigned int split_timeout_cycles; | |
99 | unsigned int split_timeout_jiffies; | |
100 | ||
77c9a5da SR |
101 | unsigned long long guid; |
102 | unsigned max_receive; | |
103 | int link_speed; | |
104 | int config_rom_generation; | |
105 | ||
106 | spinlock_t lock; /* Take this lock when handling the lists in | |
107 | * this struct. */ | |
108 | struct fw_node *local_node; | |
109 | struct fw_node *root_node; | |
110 | struct fw_node *irm_node; | |
111 | u8 color; /* must be u8 to match the definition in struct fw_node */ | |
112 | int gap_count; | |
113 | bool beta_repeaters_present; | |
114 | ||
115 | int index; | |
77c9a5da SR |
116 | struct list_head link; |
117 | ||
bf54e146 SR |
118 | struct list_head phy_receiver_list; |
119 | ||
02d37bed SR |
120 | struct delayed_work br_work; /* bus reset job */ |
121 | bool br_short; | |
122 | ||
123 | struct delayed_work bm_work; /* bus manager job */ | |
77c9a5da SR |
124 | int bm_retries; |
125 | int bm_generation; | |
250b2b6d | 126 | int bm_node_id; |
c8a94ded | 127 | bool bm_abdicate; |
77c9a5da | 128 | |
db3c9cc1 SR |
129 | bool priority_budget_implemented; /* controller feature */ |
130 | bool broadcast_channel_auto_allocated; /* controller feature */ | |
131 | ||
77c9a5da SR |
132 | bool broadcast_channel_allocated; |
133 | u32 broadcast_channel; | |
cb7c96da | 134 | __be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; |
3d1f46eb CL |
135 | |
136 | __be32 maint_utility_register; | |
77c9a5da SR |
137 | }; |
138 | ||
77c9a5da SR |
139 | struct fw_attribute_group { |
140 | struct attribute_group *groups[2]; | |
141 | struct attribute_group group; | |
142 | struct attribute *attrs[12]; | |
143 | }; | |
144 | ||
145 | enum fw_device_state { | |
146 | FW_DEVICE_INITIALIZING, | |
147 | FW_DEVICE_RUNNING, | |
148 | FW_DEVICE_GONE, | |
149 | FW_DEVICE_SHUTDOWN, | |
150 | }; | |
151 | ||
152 | /* | |
153 | * Note, fw_device.generation always has to be read before fw_device.node_id. | |
154 | * Use SMP memory barriers to ensure this. Otherwise requests will be sent | |
155 | * to an outdated node_id if the generation was updated in the meantime due | |
156 | * to a bus reset. | |
157 | * | |
158 | * Likewise, fw-core will take care to update .node_id before .generation so | |
159 | * that whenever fw_device.generation is current WRT the actual bus generation, | |
160 | * fw_device.node_id is guaranteed to be current too. | |
161 | * | |
162 | * The same applies to fw_device.card->node_id vs. fw_device.generation. | |
163 | * | |
164 | * fw_device.config_rom and fw_device.config_rom_length may be accessed during | |
165 | * the lifetime of any fw_unit belonging to the fw_device, before device_del() | |
166 | * was called on the last fw_unit. Alternatively, they may be accessed while | |
167 | * holding fw_device_rwsem. | |
168 | */ | |
169 | struct fw_device { | |
170 | atomic_t state; | |
171 | struct fw_node *node; | |
172 | int node_id; | |
173 | int generation; | |
174 | unsigned max_speed; | |
175 | struct fw_card *card; | |
176 | struct device device; | |
177 | ||
178 | struct mutex client_list_mutex; | |
179 | struct list_head client_list; | |
180 | ||
13b302d0 | 181 | const u32 *config_rom; |
77c9a5da SR |
182 | size_t config_rom_length; |
183 | int config_rom_retries; | |
184 | unsigned is_local:1; | |
837ec787 | 185 | unsigned max_rec:4; |
77c9a5da | 186 | unsigned cmc:1; |
837ec787 | 187 | unsigned irmc:1; |
77c9a5da SR |
188 | unsigned bc_implemented:2; |
189 | ||
190 | struct delayed_work work; | |
191 | struct fw_attribute_group attribute_group; | |
192 | }; | |
193 | ||
194 | static inline struct fw_device *fw_device(struct device *dev) | |
195 | { | |
196 | return container_of(dev, struct fw_device, device); | |
197 | } | |
198 | ||
199 | static inline int fw_device_is_shutdown(struct fw_device *device) | |
200 | { | |
201 | return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN; | |
202 | } | |
203 | ||
77c9a5da SR |
204 | int fw_device_enable_phys_dma(struct fw_device *device); |
205 | ||
206 | /* | |
207 | * fw_unit.directory must not be accessed after device_del(&fw_unit.device). | |
208 | */ | |
209 | struct fw_unit { | |
210 | struct device device; | |
13b302d0 | 211 | const u32 *directory; |
77c9a5da SR |
212 | struct fw_attribute_group attribute_group; |
213 | }; | |
214 | ||
215 | static inline struct fw_unit *fw_unit(struct device *dev) | |
216 | { | |
217 | return container_of(dev, struct fw_unit, device); | |
218 | } | |
219 | ||
220 | static inline struct fw_unit *fw_unit_get(struct fw_unit *unit) | |
221 | { | |
222 | get_device(&unit->device); | |
223 | ||
224 | return unit; | |
225 | } | |
226 | ||
227 | static inline void fw_unit_put(struct fw_unit *unit) | |
228 | { | |
229 | put_device(&unit->device); | |
230 | } | |
231 | ||
e5110d01 SR |
232 | static inline struct fw_device *fw_parent_device(struct fw_unit *unit) |
233 | { | |
234 | return fw_device(unit->device.parent); | |
235 | } | |
236 | ||
77c9a5da SR |
237 | struct ieee1394_device_id; |
238 | ||
239 | struct fw_driver { | |
240 | struct device_driver driver; | |
241 | /* Called when the parent device sits through a bus reset. */ | |
242 | void (*update)(struct fw_unit *unit); | |
243 | const struct ieee1394_device_id *id_table; | |
244 | }; | |
245 | ||
246 | struct fw_packet; | |
247 | struct fw_request; | |
248 | ||
249 | typedef void (*fw_packet_callback_t)(struct fw_packet *packet, | |
250 | struct fw_card *card, int status); | |
251 | typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode, | |
252 | void *data, size_t length, | |
253 | void *callback_data); | |
254 | /* | |
db5d247a CL |
255 | * Important note: Except for the FCP registers, the callback must guarantee |
256 | * that either fw_send_response() or kfree() is called on the @request. | |
77c9a5da SR |
257 | */ |
258 | typedef void (*fw_address_callback_t)(struct fw_card *card, | |
259 | struct fw_request *request, | |
260 | int tcode, int destination, int source, | |
33e553fe | 261 | int generation, |
77c9a5da SR |
262 | unsigned long long offset, |
263 | void *data, size_t length, | |
264 | void *callback_data); | |
265 | ||
266 | struct fw_packet { | |
267 | int speed; | |
268 | int generation; | |
269 | u32 header[4]; | |
270 | size_t header_length; | |
271 | void *payload; | |
272 | size_t payload_length; | |
273 | dma_addr_t payload_bus; | |
19593ffd | 274 | bool payload_mapped; |
77c9a5da SR |
275 | u32 timestamp; |
276 | ||
277 | /* | |
18d0cdfd SR |
278 | * This callback is called when the packet transmission has completed. |
279 | * For successful transmission, the status code is the ack received | |
280 | * from the destination. Otherwise it is one of the juju-specific | |
281 | * rcodes: RCODE_SEND_ERROR, _CANCELLED, _BUSY, _GENERATION, _NO_ACK. | |
77c9a5da SR |
282 | * The callback can be called from tasklet context and thus |
283 | * must never block. | |
284 | */ | |
285 | fw_packet_callback_t callback; | |
286 | int ack; | |
287 | struct list_head link; | |
288 | void *driver_data; | |
289 | }; | |
290 | ||
291 | struct fw_transaction { | |
292 | int node_id; /* The generation is implied; it is always the current. */ | |
293 | int tlabel; | |
77c9a5da | 294 | struct list_head link; |
5c40cbfe | 295 | struct fw_card *card; |
410cf2bd | 296 | bool is_split_transaction; |
5c40cbfe | 297 | struct timer_list split_timeout_timer; |
77c9a5da SR |
298 | |
299 | struct fw_packet packet; | |
300 | ||
301 | /* | |
302 | * The data passed to the callback is valid only during the | |
303 | * callback. | |
304 | */ | |
305 | fw_transaction_callback_t callback; | |
306 | void *callback_data; | |
307 | }; | |
308 | ||
309 | struct fw_address_handler { | |
310 | u64 offset; | |
311 | size_t length; | |
312 | fw_address_callback_t address_callback; | |
313 | void *callback_data; | |
314 | struct list_head link; | |
315 | }; | |
316 | ||
317 | struct fw_address_region { | |
318 | u64 start; | |
319 | u64 end; | |
320 | }; | |
321 | ||
322 | extern const struct fw_address_region fw_high_memory_region; | |
323 | ||
324 | int fw_core_add_address_handler(struct fw_address_handler *handler, | |
325 | const struct fw_address_region *region); | |
326 | void fw_core_remove_address_handler(struct fw_address_handler *handler); | |
327 | void fw_send_response(struct fw_card *card, | |
328 | struct fw_request *request, int rcode); | |
329 | void fw_send_request(struct fw_card *card, struct fw_transaction *t, | |
330 | int tcode, int destination_id, int generation, int speed, | |
331 | unsigned long long offset, void *payload, size_t length, | |
332 | fw_transaction_callback_t callback, void *callback_data); | |
333 | int fw_cancel_transaction(struct fw_card *card, | |
334 | struct fw_transaction *transaction); | |
335 | int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, | |
336 | int generation, int speed, unsigned long long offset, | |
337 | void *payload, size_t length); | |
338 | ||
c76acec6 JF |
339 | static inline int fw_stream_packet_destination_id(int tag, int channel, int sy) |
340 | { | |
341 | return tag << 14 | channel << 8 | sy; | |
342 | } | |
343 | ||
344 | struct fw_descriptor { | |
345 | struct list_head link; | |
346 | size_t length; | |
347 | u32 immediate; | |
348 | u32 key; | |
349 | const u32 *data; | |
350 | }; | |
351 | ||
352 | int fw_core_add_descriptor(struct fw_descriptor *desc); | |
353 | void fw_core_remove_descriptor(struct fw_descriptor *desc); | |
354 | ||
355 | /* | |
356 | * The iso packet format allows for an immediate header/payload part | |
357 | * stored in 'header' immediately after the packet info plus an | |
358 | * indirect payload part that is pointer to by the 'payload' field. | |
359 | * Applications can use one or the other or both to implement simple | |
360 | * low-bandwidth streaming (e.g. audio) or more advanced | |
361 | * scatter-gather streaming (e.g. assembling video frame automatically). | |
362 | */ | |
363 | struct fw_iso_packet { | |
872e330e SR |
364 | u16 payload_length; /* Length of indirect payload */ |
365 | u32 interrupt:1; /* Generate interrupt on this packet */ | |
366 | u32 skip:1; /* tx: Set to not send packet at all */ | |
367 | /* rx: Sync bit, wait for matching sy */ | |
368 | u32 tag:2; /* tx: Tag in packet header */ | |
369 | u32 sy:4; /* tx: Sy in packet header */ | |
370 | u32 header_length:8; /* Length of immediate header */ | |
371 | u32 header[0]; /* tx: Top of 1394 isoch. data_block */ | |
c76acec6 JF |
372 | }; |
373 | ||
872e330e SR |
374 | #define FW_ISO_CONTEXT_TRANSMIT 0 |
375 | #define FW_ISO_CONTEXT_RECEIVE 1 | |
376 | #define FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL 2 | |
c76acec6 JF |
377 | |
378 | #define FW_ISO_CONTEXT_MATCH_TAG0 1 | |
379 | #define FW_ISO_CONTEXT_MATCH_TAG1 2 | |
380 | #define FW_ISO_CONTEXT_MATCH_TAG2 4 | |
381 | #define FW_ISO_CONTEXT_MATCH_TAG3 8 | |
382 | #define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15 | |
383 | ||
384 | /* | |
385 | * An iso buffer is just a set of pages mapped for DMA in the | |
386 | * specified direction. Since the pages are to be used for DMA, they | |
387 | * are not mapped into the kernel virtual address space. We store the | |
388 | * DMA address in the page private. The helper function | |
389 | * fw_iso_buffer_map() will map the pages into a given vma. | |
390 | */ | |
391 | struct fw_iso_buffer { | |
392 | enum dma_data_direction direction; | |
393 | struct page **pages; | |
394 | int page_count; | |
395 | }; | |
396 | ||
397 | int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, | |
398 | int page_count, enum dma_data_direction direction); | |
399 | void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); | |
872e330e | 400 | size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed); |
c76acec6 JF |
401 | |
402 | struct fw_iso_context; | |
403 | typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, | |
404 | u32 cycle, size_t header_length, | |
405 | void *header, void *data); | |
872e330e SR |
406 | typedef void (*fw_iso_mc_callback_t)(struct fw_iso_context *context, |
407 | dma_addr_t completed, void *data); | |
c76acec6 JF |
408 | struct fw_iso_context { |
409 | struct fw_card *card; | |
410 | int type; | |
411 | int channel; | |
412 | int speed; | |
413 | size_t header_size; | |
872e330e SR |
414 | union { |
415 | fw_iso_callback_t sc; | |
416 | fw_iso_mc_callback_t mc; | |
417 | } callback; | |
c76acec6 JF |
418 | void *callback_data; |
419 | }; | |
420 | ||
421 | struct fw_iso_context *fw_iso_context_create(struct fw_card *card, | |
422 | int type, int channel, int speed, size_t header_size, | |
423 | fw_iso_callback_t callback, void *callback_data); | |
872e330e | 424 | int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels); |
c76acec6 JF |
425 | int fw_iso_context_queue(struct fw_iso_context *ctx, |
426 | struct fw_iso_packet *packet, | |
427 | struct fw_iso_buffer *buffer, | |
428 | unsigned long payload); | |
13882a82 | 429 | void fw_iso_context_queue_flush(struct fw_iso_context *ctx); |
d1bbd209 | 430 | int fw_iso_context_flush_completions(struct fw_iso_context *ctx); |
c76acec6 JF |
431 | int fw_iso_context_start(struct fw_iso_context *ctx, |
432 | int cycle, int sync, int tags); | |
433 | int fw_iso_context_stop(struct fw_iso_context *ctx); | |
434 | void fw_iso_context_destroy(struct fw_iso_context *ctx); | |
31ef9134 CL |
435 | void fw_iso_resource_manage(struct fw_card *card, int generation, |
436 | u64 channels_mask, int *channel, int *bandwidth, | |
f30e6d3e | 437 | bool allocate); |
c76acec6 | 438 | |
105e53f8 SR |
439 | extern struct workqueue_struct *fw_workqueue; |
440 | ||
77c9a5da | 441 | #endif /* _LINUX_FIREWIRE_H */ |