Commit | Line | Data |
---|---|---|
16603153 AN |
1 | /* |
2 | * Thunderbolt Cactus Ridge driver - NHI driver | |
3 | * | |
4 | * The NHI (native host interface) is the pci device that allows us to send and | |
5 | * receive frames from the thunderbolt bus. | |
6 | * | |
7 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> | |
8 | */ | |
9 | ||
10 | #include <linux/slab.h> | |
11 | #include <linux/errno.h> | |
12 | #include <linux/pci.h> | |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/dmi.h> | |
16 | ||
17 | #include "nhi.h" | |
18 | #include "nhi_regs.h" | |
19 | ||
20 | #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring") | |
21 | ||
22 | ||
23 | static int ring_interrupt_index(struct tb_ring *ring) | |
24 | { | |
25 | int bit = ring->hop; | |
26 | if (!ring->is_tx) | |
27 | bit += ring->nhi->hop_count; | |
28 | return bit; | |
29 | } | |
30 | ||
31 | /** | |
32 | * ring_interrupt_active() - activate/deactivate interrupts for a single ring | |
33 | * | |
34 | * ring->nhi->lock must be held. | |
35 | */ | |
36 | static void ring_interrupt_active(struct tb_ring *ring, bool active) | |
37 | { | |
38 | int reg = REG_RING_INTERRUPT_BASE + ring_interrupt_index(ring) / 32; | |
39 | int bit = ring_interrupt_index(ring) & 31; | |
40 | int mask = 1 << bit; | |
41 | u32 old, new; | |
42 | old = ioread32(ring->nhi->iobase + reg); | |
43 | if (active) | |
44 | new = old | mask; | |
45 | else | |
46 | new = old & ~mask; | |
47 | ||
48 | dev_info(&ring->nhi->pdev->dev, | |
49 | "%s interrupt at register %#x bit %d (%#x -> %#x)\n", | |
50 | active ? "enabling" : "disabling", reg, bit, old, new); | |
51 | ||
52 | if (new == old) | |
53 | dev_WARN(&ring->nhi->pdev->dev, | |
54 | "interrupt for %s %d is already %s\n", | |
55 | RING_TYPE(ring), ring->hop, | |
56 | active ? "enabled" : "disabled"); | |
57 | iowrite32(new, ring->nhi->iobase + reg); | |
58 | } | |
59 | ||
60 | /** | |
61 | * nhi_disable_interrupts() - disable interrupts for all rings | |
62 | * | |
63 | * Use only during init and shutdown. | |
64 | */ | |
65 | static void nhi_disable_interrupts(struct tb_nhi *nhi) | |
66 | { | |
67 | int i = 0; | |
68 | /* disable interrupts */ | |
69 | for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++) | |
70 | iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i); | |
71 | ||
72 | /* clear interrupt status bits */ | |
73 | for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++) | |
74 | ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i); | |
75 | } | |
76 | ||
77 | /* ring helper methods */ | |
78 | ||
79 | static void __iomem *ring_desc_base(struct tb_ring *ring) | |
80 | { | |
81 | void __iomem *io = ring->nhi->iobase; | |
82 | io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE; | |
83 | io += ring->hop * 16; | |
84 | return io; | |
85 | } | |
86 | ||
87 | static void __iomem *ring_options_base(struct tb_ring *ring) | |
88 | { | |
89 | void __iomem *io = ring->nhi->iobase; | |
90 | io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE; | |
91 | io += ring->hop * 32; | |
92 | return io; | |
93 | } | |
94 | ||
95 | static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset) | |
96 | { | |
97 | iowrite16(value, ring_desc_base(ring) + offset); | |
98 | } | |
99 | ||
100 | static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset) | |
101 | { | |
102 | iowrite32(value, ring_desc_base(ring) + offset); | |
103 | } | |
104 | ||
105 | static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset) | |
106 | { | |
107 | iowrite32(value, ring_desc_base(ring) + offset); | |
108 | iowrite32(value >> 32, ring_desc_base(ring) + offset + 4); | |
109 | } | |
110 | ||
111 | static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset) | |
112 | { | |
113 | iowrite32(value, ring_options_base(ring) + offset); | |
114 | } | |
115 | ||
116 | static bool ring_full(struct tb_ring *ring) | |
117 | { | |
118 | return ((ring->head + 1) % ring->size) == ring->tail; | |
119 | } | |
120 | ||
121 | static bool ring_empty(struct tb_ring *ring) | |
122 | { | |
123 | return ring->head == ring->tail; | |
124 | } | |
125 | ||
126 | /** | |
127 | * ring_write_descriptors() - post frames from ring->queue to the controller | |
128 | * | |
129 | * ring->lock is held. | |
130 | */ | |
131 | static void ring_write_descriptors(struct tb_ring *ring) | |
132 | { | |
133 | struct ring_frame *frame, *n; | |
134 | struct ring_desc *descriptor; | |
135 | list_for_each_entry_safe(frame, n, &ring->queue, list) { | |
136 | if (ring_full(ring)) | |
137 | break; | |
138 | list_move_tail(&frame->list, &ring->in_flight); | |
139 | descriptor = &ring->descriptors[ring->head]; | |
140 | descriptor->phys = frame->buffer_phy; | |
141 | descriptor->time = 0; | |
142 | descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT; | |
143 | if (ring->is_tx) { | |
144 | descriptor->length = frame->size; | |
145 | descriptor->eof = frame->eof; | |
146 | descriptor->sof = frame->sof; | |
147 | } | |
148 | ring->head = (ring->head + 1) % ring->size; | |
149 | ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8); | |
150 | } | |
151 | } | |
152 | ||
153 | /** | |
154 | * ring_work() - progress completed frames | |
155 | * | |
156 | * If the ring is shutting down then all frames are marked as canceled and | |
157 | * their callbacks are invoked. | |
158 | * | |
159 | * Otherwise we collect all completed frame from the ring buffer, write new | |
160 | * frame to the ring buffer and invoke the callbacks for the completed frames. | |
161 | */ | |
162 | static void ring_work(struct work_struct *work) | |
163 | { | |
164 | struct tb_ring *ring = container_of(work, typeof(*ring), work); | |
165 | struct ring_frame *frame; | |
166 | bool canceled = false; | |
167 | LIST_HEAD(done); | |
168 | mutex_lock(&ring->lock); | |
169 | ||
170 | if (!ring->running) { | |
171 | /* Move all frames to done and mark them as canceled. */ | |
172 | list_splice_tail_init(&ring->in_flight, &done); | |
173 | list_splice_tail_init(&ring->queue, &done); | |
174 | canceled = true; | |
175 | goto invoke_callback; | |
176 | } | |
177 | ||
178 | while (!ring_empty(ring)) { | |
179 | if (!(ring->descriptors[ring->tail].flags | |
180 | & RING_DESC_COMPLETED)) | |
181 | break; | |
182 | frame = list_first_entry(&ring->in_flight, typeof(*frame), | |
183 | list); | |
184 | list_move_tail(&frame->list, &done); | |
185 | if (!ring->is_tx) { | |
186 | frame->size = ring->descriptors[ring->tail].length; | |
187 | frame->eof = ring->descriptors[ring->tail].eof; | |
188 | frame->sof = ring->descriptors[ring->tail].sof; | |
189 | frame->flags = ring->descriptors[ring->tail].flags; | |
190 | if (frame->sof != 0) | |
191 | dev_WARN(&ring->nhi->pdev->dev, | |
192 | "%s %d got unexpected SOF: %#x\n", | |
193 | RING_TYPE(ring), ring->hop, | |
194 | frame->sof); | |
195 | /* | |
196 | * known flags: | |
197 | * raw not enabled, interupt not set: 0x2=0010 | |
198 | * raw enabled: 0xa=1010 | |
199 | * raw not enabled: 0xb=1011 | |
200 | * partial frame (>MAX_FRAME_SIZE): 0xe=1110 | |
201 | */ | |
202 | if (frame->flags != 0xa) | |
203 | dev_WARN(&ring->nhi->pdev->dev, | |
204 | "%s %d got unexpected flags: %#x\n", | |
205 | RING_TYPE(ring), ring->hop, | |
206 | frame->flags); | |
207 | } | |
208 | ring->tail = (ring->tail + 1) % ring->size; | |
209 | } | |
210 | ring_write_descriptors(ring); | |
211 | ||
212 | invoke_callback: | |
213 | mutex_unlock(&ring->lock); /* allow callbacks to schedule new work */ | |
214 | while (!list_empty(&done)) { | |
215 | frame = list_first_entry(&done, typeof(*frame), list); | |
216 | /* | |
217 | * The callback may reenqueue or delete frame. | |
218 | * Do not hold on to it. | |
219 | */ | |
220 | list_del_init(&frame->list); | |
221 | frame->callback(ring, frame, canceled); | |
222 | } | |
223 | } | |
224 | ||
225 | int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame) | |
226 | { | |
227 | int ret = 0; | |
228 | mutex_lock(&ring->lock); | |
229 | if (ring->running) { | |
230 | list_add_tail(&frame->list, &ring->queue); | |
231 | ring_write_descriptors(ring); | |
232 | } else { | |
233 | ret = -ESHUTDOWN; | |
234 | } | |
235 | mutex_unlock(&ring->lock); | |
236 | return ret; | |
237 | } | |
238 | ||
239 | static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size, | |
240 | bool transmit) | |
241 | { | |
242 | struct tb_ring *ring = NULL; | |
243 | dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n", | |
244 | transmit ? "TX" : "RX", hop, size); | |
245 | ||
246 | mutex_lock(&nhi->lock); | |
247 | if (hop >= nhi->hop_count) { | |
248 | dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop); | |
249 | goto err; | |
250 | } | |
251 | if (transmit && nhi->tx_rings[hop]) { | |
252 | dev_WARN(&nhi->pdev->dev, "TX hop %d already allocated\n", hop); | |
253 | goto err; | |
254 | } else if (!transmit && nhi->rx_rings[hop]) { | |
255 | dev_WARN(&nhi->pdev->dev, "RX hop %d already allocated\n", hop); | |
256 | goto err; | |
257 | } | |
258 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); | |
259 | if (!ring) | |
260 | goto err; | |
261 | ||
262 | mutex_init(&ring->lock); | |
263 | INIT_LIST_HEAD(&ring->queue); | |
264 | INIT_LIST_HEAD(&ring->in_flight); | |
265 | INIT_WORK(&ring->work, ring_work); | |
266 | ||
267 | ring->nhi = nhi; | |
268 | ring->hop = hop; | |
269 | ring->is_tx = transmit; | |
270 | ring->size = size; | |
271 | ring->head = 0; | |
272 | ring->tail = 0; | |
273 | ring->running = false; | |
274 | ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev, | |
275 | size * sizeof(*ring->descriptors), | |
276 | &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO); | |
277 | if (!ring->descriptors) | |
278 | goto err; | |
279 | ||
280 | if (transmit) | |
281 | nhi->tx_rings[hop] = ring; | |
282 | else | |
283 | nhi->rx_rings[hop] = ring; | |
284 | mutex_unlock(&nhi->lock); | |
285 | return ring; | |
286 | ||
287 | err: | |
288 | if (ring) | |
289 | mutex_destroy(&ring->lock); | |
290 | kfree(ring); | |
291 | mutex_unlock(&nhi->lock); | |
292 | return NULL; | |
293 | } | |
294 | ||
295 | struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size) | |
296 | { | |
297 | return ring_alloc(nhi, hop, size, true); | |
298 | } | |
299 | ||
300 | struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size) | |
301 | { | |
302 | return ring_alloc(nhi, hop, size, false); | |
303 | } | |
304 | ||
305 | /** | |
306 | * ring_start() - enable a ring | |
307 | * | |
308 | * Must not be invoked in parallel with ring_stop(). | |
309 | */ | |
310 | void ring_start(struct tb_ring *ring) | |
311 | { | |
312 | mutex_lock(&ring->nhi->lock); | |
313 | mutex_lock(&ring->lock); | |
314 | if (ring->running) { | |
315 | dev_WARN(&ring->nhi->pdev->dev, "ring already started\n"); | |
316 | goto err; | |
317 | } | |
318 | dev_info(&ring->nhi->pdev->dev, "starting %s %d\n", | |
319 | RING_TYPE(ring), ring->hop); | |
320 | ||
321 | ring_iowrite64desc(ring, ring->descriptors_dma, 0); | |
322 | if (ring->is_tx) { | |
323 | ring_iowrite32desc(ring, ring->size, 12); | |
324 | ring_iowrite32options(ring, 0, 4); /* time releated ? */ | |
325 | ring_iowrite32options(ring, | |
326 | RING_FLAG_ENABLE | RING_FLAG_RAW, 0); | |
327 | } else { | |
328 | ring_iowrite32desc(ring, | |
329 | (TB_FRAME_SIZE << 16) | ring->size, 12); | |
330 | ring_iowrite32options(ring, 0xffffffff, 4); /* SOF EOF mask */ | |
331 | ring_iowrite32options(ring, | |
332 | RING_FLAG_ENABLE | RING_FLAG_RAW, 0); | |
333 | } | |
334 | ring_interrupt_active(ring, true); | |
335 | ring->running = true; | |
336 | err: | |
337 | mutex_unlock(&ring->lock); | |
338 | mutex_unlock(&ring->nhi->lock); | |
339 | } | |
340 | ||
341 | ||
342 | /** | |
343 | * ring_stop() - shutdown a ring | |
344 | * | |
345 | * Must not be invoked from a callback. | |
346 | * | |
347 | * This method will disable the ring. Further calls to ring_tx/ring_rx will | |
348 | * return -ESHUTDOWN until ring_stop has been called. | |
349 | * | |
350 | * All enqueued frames will be canceled and their callbacks will be executed | |
351 | * with frame->canceled set to true (on the callback thread). This method | |
352 | * returns only after all callback invocations have finished. | |
353 | */ | |
354 | void ring_stop(struct tb_ring *ring) | |
355 | { | |
356 | mutex_lock(&ring->nhi->lock); | |
357 | mutex_lock(&ring->lock); | |
358 | dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n", | |
359 | RING_TYPE(ring), ring->hop); | |
360 | if (!ring->running) { | |
361 | dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n", | |
362 | RING_TYPE(ring), ring->hop); | |
363 | goto err; | |
364 | } | |
365 | ring_interrupt_active(ring, false); | |
366 | ||
367 | ring_iowrite32options(ring, 0, 0); | |
368 | ring_iowrite64desc(ring, 0, 0); | |
369 | ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8); | |
370 | ring_iowrite32desc(ring, 0, 12); | |
371 | ring->head = 0; | |
372 | ring->tail = 0; | |
373 | ring->running = false; | |
374 | ||
375 | err: | |
376 | mutex_unlock(&ring->lock); | |
377 | mutex_unlock(&ring->nhi->lock); | |
378 | ||
379 | /* | |
380 | * schedule ring->work to invoke callbacks on all remaining frames. | |
381 | */ | |
382 | schedule_work(&ring->work); | |
383 | flush_work(&ring->work); | |
384 | } | |
385 | ||
386 | /* | |
387 | * ring_free() - free ring | |
388 | * | |
389 | * When this method returns all invocations of ring->callback will have | |
390 | * finished. | |
391 | * | |
392 | * Ring must be stopped. | |
393 | * | |
394 | * Must NOT be called from ring_frame->callback! | |
395 | */ | |
396 | void ring_free(struct tb_ring *ring) | |
397 | { | |
398 | mutex_lock(&ring->nhi->lock); | |
399 | /* | |
400 | * Dissociate the ring from the NHI. This also ensures that | |
401 | * nhi_interrupt_work cannot reschedule ring->work. | |
402 | */ | |
403 | if (ring->is_tx) | |
404 | ring->nhi->tx_rings[ring->hop] = NULL; | |
405 | else | |
406 | ring->nhi->rx_rings[ring->hop] = NULL; | |
407 | ||
408 | if (ring->running) { | |
409 | dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n", | |
410 | RING_TYPE(ring), ring->hop); | |
411 | } | |
412 | ||
413 | dma_free_coherent(&ring->nhi->pdev->dev, | |
414 | ring->size * sizeof(*ring->descriptors), | |
415 | ring->descriptors, ring->descriptors_dma); | |
416 | ||
417 | ring->descriptors = 0; | |
418 | ring->descriptors_dma = 0; | |
419 | ||
420 | ||
421 | dev_info(&ring->nhi->pdev->dev, | |
422 | "freeing %s %d\n", | |
423 | RING_TYPE(ring), | |
424 | ring->hop); | |
425 | ||
426 | mutex_unlock(&ring->nhi->lock); | |
427 | /** | |
428 | * ring->work can no longer be scheduled (it is scheduled only by | |
429 | * nhi_interrupt_work and ring_stop). Wait for it to finish before | |
430 | * freeing the ring. | |
431 | */ | |
432 | flush_work(&ring->work); | |
433 | mutex_destroy(&ring->lock); | |
434 | kfree(ring); | |
435 | } | |
436 | ||
437 | static void nhi_interrupt_work(struct work_struct *work) | |
438 | { | |
439 | struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work); | |
440 | int value = 0; /* Suppress uninitialized usage warning. */ | |
441 | int bit; | |
442 | int hop = -1; | |
443 | int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */ | |
444 | struct tb_ring *ring; | |
445 | ||
446 | mutex_lock(&nhi->lock); | |
447 | ||
448 | /* | |
449 | * Starting at REG_RING_NOTIFY_BASE there are three status bitfields | |
450 | * (TX, RX, RX overflow). We iterate over the bits and read a new | |
451 | * dwords as required. The registers are cleared on read. | |
452 | */ | |
453 | for (bit = 0; bit < 3 * nhi->hop_count; bit++) { | |
454 | if (bit % 32 == 0) | |
455 | value = ioread32(nhi->iobase | |
456 | + REG_RING_NOTIFY_BASE | |
457 | + 4 * (bit / 32)); | |
458 | if (++hop == nhi->hop_count) { | |
459 | hop = 0; | |
460 | type++; | |
461 | } | |
462 | if ((value & (1 << (bit % 32))) == 0) | |
463 | continue; | |
464 | if (type == 2) { | |
465 | dev_warn(&nhi->pdev->dev, | |
466 | "RX overflow for ring %d\n", | |
467 | hop); | |
468 | continue; | |
469 | } | |
470 | if (type == 0) | |
471 | ring = nhi->tx_rings[hop]; | |
472 | else | |
473 | ring = nhi->rx_rings[hop]; | |
474 | if (ring == NULL) { | |
475 | dev_warn(&nhi->pdev->dev, | |
476 | "got interrupt for inactive %s ring %d\n", | |
477 | type ? "RX" : "TX", | |
478 | hop); | |
479 | continue; | |
480 | } | |
481 | /* we do not check ring->running, this is done in ring->work */ | |
482 | schedule_work(&ring->work); | |
483 | } | |
484 | mutex_unlock(&nhi->lock); | |
485 | } | |
486 | ||
487 | static irqreturn_t nhi_msi(int irq, void *data) | |
488 | { | |
489 | struct tb_nhi *nhi = data; | |
490 | schedule_work(&nhi->interrupt_work); | |
491 | return IRQ_HANDLED; | |
492 | } | |
493 | ||
494 | static void nhi_shutdown(struct tb_nhi *nhi) | |
495 | { | |
496 | int i; | |
497 | dev_info(&nhi->pdev->dev, "shutdown\n"); | |
498 | ||
499 | for (i = 0; i < nhi->hop_count; i++) { | |
500 | if (nhi->tx_rings[i]) | |
501 | dev_WARN(&nhi->pdev->dev, | |
502 | "TX ring %d is still active\n", i); | |
503 | if (nhi->rx_rings[i]) | |
504 | dev_WARN(&nhi->pdev->dev, | |
505 | "RX ring %d is still active\n", i); | |
506 | } | |
507 | nhi_disable_interrupts(nhi); | |
508 | /* | |
509 | * We have to release the irq before calling flush_work. Otherwise an | |
510 | * already executing IRQ handler could call schedule_work again. | |
511 | */ | |
512 | devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi); | |
513 | flush_work(&nhi->interrupt_work); | |
514 | mutex_destroy(&nhi->lock); | |
515 | } | |
516 | ||
517 | static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |
518 | { | |
519 | struct tb_nhi *nhi; | |
520 | int res; | |
521 | ||
522 | res = pcim_enable_device(pdev); | |
523 | if (res) { | |
524 | dev_err(&pdev->dev, "cannot enable PCI device, aborting\n"); | |
525 | return res; | |
526 | } | |
527 | ||
528 | res = pci_enable_msi(pdev); | |
529 | if (res) { | |
530 | dev_err(&pdev->dev, "cannot enable MSI, aborting\n"); | |
531 | return res; | |
532 | } | |
533 | ||
534 | res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt"); | |
535 | if (res) { | |
536 | dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n"); | |
537 | return res; | |
538 | } | |
539 | ||
540 | nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL); | |
541 | if (!nhi) | |
542 | return -ENOMEM; | |
543 | ||
544 | nhi->pdev = pdev; | |
545 | /* cannot fail - table is allocated bin pcim_iomap_regions */ | |
546 | nhi->iobase = pcim_iomap_table(pdev)[0]; | |
547 | nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff; | |
548 | if (nhi->hop_count != 12) | |
549 | dev_warn(&pdev->dev, "unexpected hop count: %d\n", | |
550 | nhi->hop_count); | |
551 | INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work); | |
552 | ||
553 | nhi->tx_rings = devm_kzalloc(&pdev->dev, | |
554 | nhi->hop_count * sizeof(struct tb_ring), | |
555 | GFP_KERNEL); | |
556 | nhi->rx_rings = devm_kzalloc(&pdev->dev, | |
557 | nhi->hop_count * sizeof(struct tb_ring), | |
558 | GFP_KERNEL); | |
559 | if (!nhi->tx_rings || !nhi->rx_rings) | |
560 | return -ENOMEM; | |
561 | ||
562 | nhi_disable_interrupts(nhi); /* In case someone left them on. */ | |
563 | res = devm_request_irq(&pdev->dev, pdev->irq, nhi_msi, | |
564 | IRQF_NO_SUSPEND, /* must work during _noirq */ | |
565 | "thunderbolt", nhi); | |
566 | if (res) { | |
567 | dev_err(&pdev->dev, "request_irq failed, aborting\n"); | |
568 | return res; | |
569 | } | |
570 | ||
571 | mutex_init(&nhi->lock); | |
572 | ||
573 | pci_set_master(pdev); | |
574 | ||
575 | /* magic value - clock related? */ | |
576 | iowrite32(3906250 / 10000, nhi->iobase + 0x38c00); | |
577 | ||
578 | pci_set_drvdata(pdev, nhi); | |
579 | ||
580 | return 0; | |
581 | } | |
582 | ||
583 | static void nhi_remove(struct pci_dev *pdev) | |
584 | { | |
585 | struct tb_nhi *nhi = pci_get_drvdata(pdev); | |
586 | nhi_shutdown(nhi); | |
587 | } | |
588 | ||
589 | struct pci_device_id nhi_ids[] = { | |
590 | /* | |
591 | * We have to specify class, the TB bridges use the same device and | |
592 | * vendor (sub)id. | |
593 | */ | |
594 | { | |
595 | .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, | |
596 | .vendor = PCI_VENDOR_ID_INTEL, .device = 0x1547, | |
597 | .subvendor = 0x2222, .subdevice = 0x1111, | |
598 | }, | |
599 | { | |
600 | .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, | |
601 | .vendor = PCI_VENDOR_ID_INTEL, .device = 0x156c, | |
602 | .subvendor = 0x2222, .subdevice = 0x1111, | |
603 | }, | |
604 | { 0,} | |
605 | }; | |
606 | ||
607 | MODULE_DEVICE_TABLE(pci, nhi_ids); | |
608 | MODULE_LICENSE("GPL"); | |
609 | ||
610 | static struct pci_driver nhi_driver = { | |
611 | .name = "thunderbolt", | |
612 | .id_table = nhi_ids, | |
613 | .probe = nhi_probe, | |
614 | .remove = nhi_remove, | |
615 | }; | |
616 | ||
617 | static int __init nhi_init(void) | |
618 | { | |
619 | if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc.")) | |
620 | return -ENOSYS; | |
621 | return pci_register_driver(&nhi_driver); | |
622 | } | |
623 | ||
624 | static void __exit nhi_unload(void) | |
625 | { | |
626 | pci_unregister_driver(&nhi_driver); | |
627 | } | |
628 | ||
629 | module_init(nhi_init); | |
630 | module_exit(nhi_unload); |