firewire: core: prepare for non-core children of card devices
[deliverable/linux.git] / include / linux / firewire.h
CommitLineData
77c9a5da
SR
1#ifndef _LINUX_FIREWIRE_H
2#define _LINUX_FIREWIRE_H
3
4#include <linux/completion.h>
5#include <linux/device.h>
6#include <linux/kernel.h>
7#include <linux/kref.h>
8#include <linux/list.h>
9#include <linux/mutex.h>
10#include <linux/spinlock.h>
11#include <linux/sysfs.h>
12#include <linux/timer.h>
13#include <linux/types.h>
14#include <linux/workqueue.h>
15
16#include <asm/atomic.h>
17#include <asm/byteorder.h>
18
19#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
20#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
21
22static inline void fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
23{
24 u32 *dst = _dst;
25 __be32 *src = _src;
26 int i;
27
28 for (i = 0; i < size / 4; i++)
29 dst[i] = be32_to_cpu(src[i]);
30}
31
32static inline void fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
33{
34 fw_memcpy_from_be32(_dst, _src, size);
35}
36#define CSR_REGISTER_BASE 0xfffff0000000ULL
37
38/* register offsets are relative to CSR_REGISTER_BASE */
39#define CSR_STATE_CLEAR 0x0
40#define CSR_STATE_SET 0x4
41#define CSR_NODE_IDS 0x8
42#define CSR_RESET_START 0xc
43#define CSR_SPLIT_TIMEOUT_HI 0x18
44#define CSR_SPLIT_TIMEOUT_LO 0x1c
45#define CSR_CYCLE_TIME 0x200
46#define CSR_BUS_TIME 0x204
47#define CSR_BUSY_TIMEOUT 0x210
48#define CSR_BUS_MANAGER_ID 0x21c
49#define CSR_BANDWIDTH_AVAILABLE 0x220
50#define CSR_CHANNELS_AVAILABLE 0x224
51#define CSR_CHANNELS_AVAILABLE_HI 0x224
52#define CSR_CHANNELS_AVAILABLE_LO 0x228
53#define CSR_BROADCAST_CHANNEL 0x234
54#define CSR_CONFIG_ROM 0x400
55#define CSR_CONFIG_ROM_END 0x800
56#define CSR_FCP_COMMAND 0xB00
57#define CSR_FCP_RESPONSE 0xD00
58#define CSR_FCP_END 0xF00
59#define CSR_TOPOLOGY_MAP 0x1000
60#define CSR_TOPOLOGY_MAP_END 0x1400
61#define CSR_SPEED_MAP 0x2000
62#define CSR_SPEED_MAP_END 0x3000
63
64#define CSR_OFFSET 0x40
65#define CSR_LEAF 0x80
66#define CSR_DIRECTORY 0xc0
67
68#define CSR_DESCRIPTOR 0x01
69#define CSR_VENDOR 0x03
70#define CSR_HARDWARE_VERSION 0x04
71#define CSR_NODE_CAPABILITIES 0x0c
72#define CSR_UNIT 0x11
73#define CSR_SPECIFIER_ID 0x12
74#define CSR_VERSION 0x13
75#define CSR_DEPENDENT_INFO 0x14
76#define CSR_MODEL 0x17
77#define CSR_INSTANCE 0x18
78#define CSR_DIRECTORY_ID 0x20
79
80struct fw_csr_iterator {
81 u32 *p;
82 u32 *end;
83};
84
85void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 *p);
86int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value);
87
88extern struct bus_type fw_bus_type;
89
90struct fw_card_driver;
91struct fw_node;
92
93struct fw_card {
94 const struct fw_card_driver *driver;
95 struct device *device;
96 struct kref kref;
97 struct completion done;
98
99 int node_id;
100 int generation;
101 int current_tlabel, tlabel_mask;
102 struct list_head transaction_list;
103 struct timer_list flush_timer;
104 unsigned long reset_jiffies;
105
106 unsigned long long guid;
107 unsigned max_receive;
108 int link_speed;
109 int config_rom_generation;
110
111 spinlock_t lock; /* Take this lock when handling the lists in
112 * this struct. */
113 struct fw_node *local_node;
114 struct fw_node *root_node;
115 struct fw_node *irm_node;
116 u8 color; /* must be u8 to match the definition in struct fw_node */
117 int gap_count;
118 bool beta_repeaters_present;
119
120 int index;
121
122 struct list_head link;
123
124 /* Work struct for BM duties. */
125 struct delayed_work work;
126 int bm_retries;
127 int bm_generation;
128
129 bool broadcast_channel_allocated;
130 u32 broadcast_channel;
131 u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
132};
133
134static inline struct fw_card *fw_card_get(struct fw_card *card)
135{
136 kref_get(&card->kref);
137
138 return card;
139}
140
141void fw_card_release(struct kref *kref);
142
143static inline void fw_card_put(struct fw_card *card)
144{
145 kref_put(&card->kref, fw_card_release);
146}
147
148struct fw_attribute_group {
149 struct attribute_group *groups[2];
150 struct attribute_group group;
151 struct attribute *attrs[12];
152};
153
154enum fw_device_state {
155 FW_DEVICE_INITIALIZING,
156 FW_DEVICE_RUNNING,
157 FW_DEVICE_GONE,
158 FW_DEVICE_SHUTDOWN,
159};
160
161/*
162 * Note, fw_device.generation always has to be read before fw_device.node_id.
163 * Use SMP memory barriers to ensure this. Otherwise requests will be sent
164 * to an outdated node_id if the generation was updated in the meantime due
165 * to a bus reset.
166 *
167 * Likewise, fw-core will take care to update .node_id before .generation so
168 * that whenever fw_device.generation is current WRT the actual bus generation,
169 * fw_device.node_id is guaranteed to be current too.
170 *
171 * The same applies to fw_device.card->node_id vs. fw_device.generation.
172 *
173 * fw_device.config_rom and fw_device.config_rom_length may be accessed during
174 * the lifetime of any fw_unit belonging to the fw_device, before device_del()
175 * was called on the last fw_unit. Alternatively, they may be accessed while
176 * holding fw_device_rwsem.
177 */
178struct fw_device {
179 atomic_t state;
180 struct fw_node *node;
181 int node_id;
182 int generation;
183 unsigned max_speed;
184 struct fw_card *card;
185 struct device device;
186
187 struct mutex client_list_mutex;
188 struct list_head client_list;
189
190 u32 *config_rom;
191 size_t config_rom_length;
192 int config_rom_retries;
193 unsigned is_local:1;
194 unsigned cmc:1;
195 unsigned bc_implemented:2;
196
197 struct delayed_work work;
198 struct fw_attribute_group attribute_group;
199};
200
201static inline struct fw_device *fw_device(struct device *dev)
202{
203 return container_of(dev, struct fw_device, device);
204}
205
206static inline int fw_device_is_shutdown(struct fw_device *device)
207{
208 return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN;
209}
210
211static inline struct fw_device *fw_device_get(struct fw_device *device)
212{
213 get_device(&device->device);
214
215 return device;
216}
217
218static inline void fw_device_put(struct fw_device *device)
219{
220 put_device(&device->device);
221}
222
223int fw_device_enable_phys_dma(struct fw_device *device);
224
225/*
226 * fw_unit.directory must not be accessed after device_del(&fw_unit.device).
227 */
228struct fw_unit {
229 struct device device;
230 u32 *directory;
231 struct fw_attribute_group attribute_group;
232};
233
234static inline struct fw_unit *fw_unit(struct device *dev)
235{
236 return container_of(dev, struct fw_unit, device);
237}
238
239static inline struct fw_unit *fw_unit_get(struct fw_unit *unit)
240{
241 get_device(&unit->device);
242
243 return unit;
244}
245
246static inline void fw_unit_put(struct fw_unit *unit)
247{
248 put_device(&unit->device);
249}
250
e5110d01
SR
251static inline struct fw_device *fw_parent_device(struct fw_unit *unit)
252{
253 return fw_device(unit->device.parent);
254}
255
77c9a5da
SR
256struct ieee1394_device_id;
257
258struct fw_driver {
259 struct device_driver driver;
260 /* Called when the parent device sits through a bus reset. */
261 void (*update)(struct fw_unit *unit);
262 const struct ieee1394_device_id *id_table;
263};
264
265struct fw_packet;
266struct fw_request;
267
268typedef void (*fw_packet_callback_t)(struct fw_packet *packet,
269 struct fw_card *card, int status);
270typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
271 void *data, size_t length,
272 void *callback_data);
273/*
274 * Important note: The callback must guarantee that either fw_send_response()
275 * or kfree() is called on the @request.
276 */
277typedef void (*fw_address_callback_t)(struct fw_card *card,
278 struct fw_request *request,
279 int tcode, int destination, int source,
280 int generation, int speed,
281 unsigned long long offset,
282 void *data, size_t length,
283 void *callback_data);
284
285struct fw_packet {
286 int speed;
287 int generation;
288 u32 header[4];
289 size_t header_length;
290 void *payload;
291 size_t payload_length;
292 dma_addr_t payload_bus;
293 u32 timestamp;
294
295 /*
296 * This callback is called when the packet transmission has
297 * completed; for successful transmission, the status code is
298 * the ack received from the destination, otherwise it's a
299 * negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO.
300 * The callback can be called from tasklet context and thus
301 * must never block.
302 */
303 fw_packet_callback_t callback;
304 int ack;
305 struct list_head link;
306 void *driver_data;
307};
308
309struct fw_transaction {
310 int node_id; /* The generation is implied; it is always the current. */
311 int tlabel;
312 int timestamp;
313 struct list_head link;
314
315 struct fw_packet packet;
316
317 /*
318 * The data passed to the callback is valid only during the
319 * callback.
320 */
321 fw_transaction_callback_t callback;
322 void *callback_data;
323};
324
325struct fw_address_handler {
326 u64 offset;
327 size_t length;
328 fw_address_callback_t address_callback;
329 void *callback_data;
330 struct list_head link;
331};
332
333struct fw_address_region {
334 u64 start;
335 u64 end;
336};
337
338extern const struct fw_address_region fw_high_memory_region;
339
340int fw_core_add_address_handler(struct fw_address_handler *handler,
341 const struct fw_address_region *region);
342void fw_core_remove_address_handler(struct fw_address_handler *handler);
343void fw_send_response(struct fw_card *card,
344 struct fw_request *request, int rcode);
345void fw_send_request(struct fw_card *card, struct fw_transaction *t,
346 int tcode, int destination_id, int generation, int speed,
347 unsigned long long offset, void *payload, size_t length,
348 fw_transaction_callback_t callback, void *callback_data);
349int fw_cancel_transaction(struct fw_card *card,
350 struct fw_transaction *transaction);
351int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
352 int generation, int speed, unsigned long long offset,
353 void *payload, size_t length);
354
355#endif /* _LINUX_FIREWIRE_H */
This page took 0.039697 seconds and 5 git commands to generate.