thunderbolt: Read switch uid from EEPROM
[deliverable/linux.git] / drivers / thunderbolt / nhi.c
CommitLineData
16603153
AN
1/*
2 * Thunderbolt Cactus Ridge driver - NHI driver
3 *
4 * The NHI (native host interface) is the pci device that allows us to send and
5 * receive frames from the thunderbolt bus.
6 *
7 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
8 */
9
10#include <linux/slab.h>
11#include <linux/errno.h>
12#include <linux/pci.h>
13#include <linux/interrupt.h>
14#include <linux/module.h>
15#include <linux/dmi.h>
16
17#include "nhi.h"
18#include "nhi_regs.h"
d6cc51cd 19#include "tb.h"
16603153
AN
20
21#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
22
23
24static int ring_interrupt_index(struct tb_ring *ring)
25{
26 int bit = ring->hop;
27 if (!ring->is_tx)
28 bit += ring->nhi->hop_count;
29 return bit;
30}
31
32/**
33 * ring_interrupt_active() - activate/deactivate interrupts for a single ring
34 *
35 * ring->nhi->lock must be held.
36 */
37static void ring_interrupt_active(struct tb_ring *ring, bool active)
38{
39 int reg = REG_RING_INTERRUPT_BASE + ring_interrupt_index(ring) / 32;
40 int bit = ring_interrupt_index(ring) & 31;
41 int mask = 1 << bit;
42 u32 old, new;
43 old = ioread32(ring->nhi->iobase + reg);
44 if (active)
45 new = old | mask;
46 else
47 new = old & ~mask;
48
49 dev_info(&ring->nhi->pdev->dev,
50 "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
51 active ? "enabling" : "disabling", reg, bit, old, new);
52
53 if (new == old)
54 dev_WARN(&ring->nhi->pdev->dev,
55 "interrupt for %s %d is already %s\n",
56 RING_TYPE(ring), ring->hop,
57 active ? "enabled" : "disabled");
58 iowrite32(new, ring->nhi->iobase + reg);
59}
60
61/**
62 * nhi_disable_interrupts() - disable interrupts for all rings
63 *
64 * Use only during init and shutdown.
65 */
66static void nhi_disable_interrupts(struct tb_nhi *nhi)
67{
68 int i = 0;
69 /* disable interrupts */
70 for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
71 iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
72
73 /* clear interrupt status bits */
74 for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
75 ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
76}
77
78/* ring helper methods */
79
80static void __iomem *ring_desc_base(struct tb_ring *ring)
81{
82 void __iomem *io = ring->nhi->iobase;
83 io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE;
84 io += ring->hop * 16;
85 return io;
86}
87
88static void __iomem *ring_options_base(struct tb_ring *ring)
89{
90 void __iomem *io = ring->nhi->iobase;
91 io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE;
92 io += ring->hop * 32;
93 return io;
94}
95
96static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset)
97{
98 iowrite16(value, ring_desc_base(ring) + offset);
99}
100
101static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
102{
103 iowrite32(value, ring_desc_base(ring) + offset);
104}
105
106static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset)
107{
108 iowrite32(value, ring_desc_base(ring) + offset);
109 iowrite32(value >> 32, ring_desc_base(ring) + offset + 4);
110}
111
112static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset)
113{
114 iowrite32(value, ring_options_base(ring) + offset);
115}
116
117static bool ring_full(struct tb_ring *ring)
118{
119 return ((ring->head + 1) % ring->size) == ring->tail;
120}
121
122static bool ring_empty(struct tb_ring *ring)
123{
124 return ring->head == ring->tail;
125}
126
127/**
128 * ring_write_descriptors() - post frames from ring->queue to the controller
129 *
130 * ring->lock is held.
131 */
132static void ring_write_descriptors(struct tb_ring *ring)
133{
134 struct ring_frame *frame, *n;
135 struct ring_desc *descriptor;
136 list_for_each_entry_safe(frame, n, &ring->queue, list) {
137 if (ring_full(ring))
138 break;
139 list_move_tail(&frame->list, &ring->in_flight);
140 descriptor = &ring->descriptors[ring->head];
141 descriptor->phys = frame->buffer_phy;
142 descriptor->time = 0;
143 descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT;
144 if (ring->is_tx) {
145 descriptor->length = frame->size;
146 descriptor->eof = frame->eof;
147 descriptor->sof = frame->sof;
148 }
149 ring->head = (ring->head + 1) % ring->size;
150 ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8);
151 }
152}
153
154/**
155 * ring_work() - progress completed frames
156 *
157 * If the ring is shutting down then all frames are marked as canceled and
158 * their callbacks are invoked.
159 *
160 * Otherwise we collect all completed frame from the ring buffer, write new
161 * frame to the ring buffer and invoke the callbacks for the completed frames.
162 */
163static void ring_work(struct work_struct *work)
164{
165 struct tb_ring *ring = container_of(work, typeof(*ring), work);
166 struct ring_frame *frame;
167 bool canceled = false;
168 LIST_HEAD(done);
169 mutex_lock(&ring->lock);
170
171 if (!ring->running) {
172 /* Move all frames to done and mark them as canceled. */
173 list_splice_tail_init(&ring->in_flight, &done);
174 list_splice_tail_init(&ring->queue, &done);
175 canceled = true;
176 goto invoke_callback;
177 }
178
179 while (!ring_empty(ring)) {
180 if (!(ring->descriptors[ring->tail].flags
181 & RING_DESC_COMPLETED))
182 break;
183 frame = list_first_entry(&ring->in_flight, typeof(*frame),
184 list);
185 list_move_tail(&frame->list, &done);
186 if (!ring->is_tx) {
187 frame->size = ring->descriptors[ring->tail].length;
188 frame->eof = ring->descriptors[ring->tail].eof;
189 frame->sof = ring->descriptors[ring->tail].sof;
190 frame->flags = ring->descriptors[ring->tail].flags;
191 if (frame->sof != 0)
192 dev_WARN(&ring->nhi->pdev->dev,
193 "%s %d got unexpected SOF: %#x\n",
194 RING_TYPE(ring), ring->hop,
195 frame->sof);
196 /*
197 * known flags:
198 * raw not enabled, interupt not set: 0x2=0010
199 * raw enabled: 0xa=1010
200 * raw not enabled: 0xb=1011
201 * partial frame (>MAX_FRAME_SIZE): 0xe=1110
202 */
203 if (frame->flags != 0xa)
204 dev_WARN(&ring->nhi->pdev->dev,
205 "%s %d got unexpected flags: %#x\n",
206 RING_TYPE(ring), ring->hop,
207 frame->flags);
208 }
209 ring->tail = (ring->tail + 1) % ring->size;
210 }
211 ring_write_descriptors(ring);
212
213invoke_callback:
214 mutex_unlock(&ring->lock); /* allow callbacks to schedule new work */
215 while (!list_empty(&done)) {
216 frame = list_first_entry(&done, typeof(*frame), list);
217 /*
218 * The callback may reenqueue or delete frame.
219 * Do not hold on to it.
220 */
221 list_del_init(&frame->list);
222 frame->callback(ring, frame, canceled);
223 }
224}
225
226int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
227{
228 int ret = 0;
229 mutex_lock(&ring->lock);
230 if (ring->running) {
231 list_add_tail(&frame->list, &ring->queue);
232 ring_write_descriptors(ring);
233 } else {
234 ret = -ESHUTDOWN;
235 }
236 mutex_unlock(&ring->lock);
237 return ret;
238}
239
240static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
241 bool transmit)
242{
243 struct tb_ring *ring = NULL;
244 dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
245 transmit ? "TX" : "RX", hop, size);
246
247 mutex_lock(&nhi->lock);
248 if (hop >= nhi->hop_count) {
249 dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop);
250 goto err;
251 }
252 if (transmit && nhi->tx_rings[hop]) {
253 dev_WARN(&nhi->pdev->dev, "TX hop %d already allocated\n", hop);
254 goto err;
255 } else if (!transmit && nhi->rx_rings[hop]) {
256 dev_WARN(&nhi->pdev->dev, "RX hop %d already allocated\n", hop);
257 goto err;
258 }
259 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
260 if (!ring)
261 goto err;
262
263 mutex_init(&ring->lock);
264 INIT_LIST_HEAD(&ring->queue);
265 INIT_LIST_HEAD(&ring->in_flight);
266 INIT_WORK(&ring->work, ring_work);
267
268 ring->nhi = nhi;
269 ring->hop = hop;
270 ring->is_tx = transmit;
271 ring->size = size;
272 ring->head = 0;
273 ring->tail = 0;
274 ring->running = false;
275 ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
276 size * sizeof(*ring->descriptors),
277 &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
278 if (!ring->descriptors)
279 goto err;
280
281 if (transmit)
282 nhi->tx_rings[hop] = ring;
283 else
284 nhi->rx_rings[hop] = ring;
285 mutex_unlock(&nhi->lock);
286 return ring;
287
288err:
289 if (ring)
290 mutex_destroy(&ring->lock);
291 kfree(ring);
292 mutex_unlock(&nhi->lock);
293 return NULL;
294}
295
296struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size)
297{
298 return ring_alloc(nhi, hop, size, true);
299}
300
301struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size)
302{
303 return ring_alloc(nhi, hop, size, false);
304}
305
306/**
307 * ring_start() - enable a ring
308 *
309 * Must not be invoked in parallel with ring_stop().
310 */
311void ring_start(struct tb_ring *ring)
312{
313 mutex_lock(&ring->nhi->lock);
314 mutex_lock(&ring->lock);
315 if (ring->running) {
316 dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
317 goto err;
318 }
319 dev_info(&ring->nhi->pdev->dev, "starting %s %d\n",
320 RING_TYPE(ring), ring->hop);
321
322 ring_iowrite64desc(ring, ring->descriptors_dma, 0);
323 if (ring->is_tx) {
324 ring_iowrite32desc(ring, ring->size, 12);
325 ring_iowrite32options(ring, 0, 4); /* time releated ? */
326 ring_iowrite32options(ring,
327 RING_FLAG_ENABLE | RING_FLAG_RAW, 0);
328 } else {
329 ring_iowrite32desc(ring,
330 (TB_FRAME_SIZE << 16) | ring->size, 12);
331 ring_iowrite32options(ring, 0xffffffff, 4); /* SOF EOF mask */
332 ring_iowrite32options(ring,
333 RING_FLAG_ENABLE | RING_FLAG_RAW, 0);
334 }
335 ring_interrupt_active(ring, true);
336 ring->running = true;
337err:
338 mutex_unlock(&ring->lock);
339 mutex_unlock(&ring->nhi->lock);
340}
341
342
343/**
344 * ring_stop() - shutdown a ring
345 *
346 * Must not be invoked from a callback.
347 *
348 * This method will disable the ring. Further calls to ring_tx/ring_rx will
349 * return -ESHUTDOWN until ring_stop has been called.
350 *
351 * All enqueued frames will be canceled and their callbacks will be executed
352 * with frame->canceled set to true (on the callback thread). This method
353 * returns only after all callback invocations have finished.
354 */
355void ring_stop(struct tb_ring *ring)
356{
357 mutex_lock(&ring->nhi->lock);
358 mutex_lock(&ring->lock);
359 dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n",
360 RING_TYPE(ring), ring->hop);
361 if (!ring->running) {
362 dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
363 RING_TYPE(ring), ring->hop);
364 goto err;
365 }
366 ring_interrupt_active(ring, false);
367
368 ring_iowrite32options(ring, 0, 0);
369 ring_iowrite64desc(ring, 0, 0);
370 ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8);
371 ring_iowrite32desc(ring, 0, 12);
372 ring->head = 0;
373 ring->tail = 0;
374 ring->running = false;
375
376err:
377 mutex_unlock(&ring->lock);
378 mutex_unlock(&ring->nhi->lock);
379
380 /*
381 * schedule ring->work to invoke callbacks on all remaining frames.
382 */
383 schedule_work(&ring->work);
384 flush_work(&ring->work);
385}
386
387/*
388 * ring_free() - free ring
389 *
390 * When this method returns all invocations of ring->callback will have
391 * finished.
392 *
393 * Ring must be stopped.
394 *
395 * Must NOT be called from ring_frame->callback!
396 */
397void ring_free(struct tb_ring *ring)
398{
399 mutex_lock(&ring->nhi->lock);
400 /*
401 * Dissociate the ring from the NHI. This also ensures that
402 * nhi_interrupt_work cannot reschedule ring->work.
403 */
404 if (ring->is_tx)
405 ring->nhi->tx_rings[ring->hop] = NULL;
406 else
407 ring->nhi->rx_rings[ring->hop] = NULL;
408
409 if (ring->running) {
410 dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
411 RING_TYPE(ring), ring->hop);
412 }
413
414 dma_free_coherent(&ring->nhi->pdev->dev,
415 ring->size * sizeof(*ring->descriptors),
416 ring->descriptors, ring->descriptors_dma);
417
418 ring->descriptors = 0;
419 ring->descriptors_dma = 0;
420
421
422 dev_info(&ring->nhi->pdev->dev,
423 "freeing %s %d\n",
424 RING_TYPE(ring),
425 ring->hop);
426
427 mutex_unlock(&ring->nhi->lock);
428 /**
429 * ring->work can no longer be scheduled (it is scheduled only by
430 * nhi_interrupt_work and ring_stop). Wait for it to finish before
431 * freeing the ring.
432 */
433 flush_work(&ring->work);
434 mutex_destroy(&ring->lock);
435 kfree(ring);
436}
437
438static void nhi_interrupt_work(struct work_struct *work)
439{
440 struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
441 int value = 0; /* Suppress uninitialized usage warning. */
442 int bit;
443 int hop = -1;
444 int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */
445 struct tb_ring *ring;
446
447 mutex_lock(&nhi->lock);
448
449 /*
450 * Starting at REG_RING_NOTIFY_BASE there are three status bitfields
451 * (TX, RX, RX overflow). We iterate over the bits and read a new
452 * dwords as required. The registers are cleared on read.
453 */
454 for (bit = 0; bit < 3 * nhi->hop_count; bit++) {
455 if (bit % 32 == 0)
456 value = ioread32(nhi->iobase
457 + REG_RING_NOTIFY_BASE
458 + 4 * (bit / 32));
459 if (++hop == nhi->hop_count) {
460 hop = 0;
461 type++;
462 }
463 if ((value & (1 << (bit % 32))) == 0)
464 continue;
465 if (type == 2) {
466 dev_warn(&nhi->pdev->dev,
467 "RX overflow for ring %d\n",
468 hop);
469 continue;
470 }
471 if (type == 0)
472 ring = nhi->tx_rings[hop];
473 else
474 ring = nhi->rx_rings[hop];
475 if (ring == NULL) {
476 dev_warn(&nhi->pdev->dev,
477 "got interrupt for inactive %s ring %d\n",
478 type ? "RX" : "TX",
479 hop);
480 continue;
481 }
482 /* we do not check ring->running, this is done in ring->work */
483 schedule_work(&ring->work);
484 }
485 mutex_unlock(&nhi->lock);
486}
487
488static irqreturn_t nhi_msi(int irq, void *data)
489{
490 struct tb_nhi *nhi = data;
491 schedule_work(&nhi->interrupt_work);
492 return IRQ_HANDLED;
493}
494
495static void nhi_shutdown(struct tb_nhi *nhi)
496{
497 int i;
498 dev_info(&nhi->pdev->dev, "shutdown\n");
499
500 for (i = 0; i < nhi->hop_count; i++) {
501 if (nhi->tx_rings[i])
502 dev_WARN(&nhi->pdev->dev,
503 "TX ring %d is still active\n", i);
504 if (nhi->rx_rings[i])
505 dev_WARN(&nhi->pdev->dev,
506 "RX ring %d is still active\n", i);
507 }
508 nhi_disable_interrupts(nhi);
509 /*
510 * We have to release the irq before calling flush_work. Otherwise an
511 * already executing IRQ handler could call schedule_work again.
512 */
513 devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
514 flush_work(&nhi->interrupt_work);
515 mutex_destroy(&nhi->lock);
516}
517
518static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
519{
520 struct tb_nhi *nhi;
d6cc51cd 521 struct tb *tb;
16603153
AN
522 int res;
523
524 res = pcim_enable_device(pdev);
525 if (res) {
526 dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
527 return res;
528 }
529
530 res = pci_enable_msi(pdev);
531 if (res) {
532 dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
533 return res;
534 }
535
536 res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
537 if (res) {
538 dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
539 return res;
540 }
541
542 nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
543 if (!nhi)
544 return -ENOMEM;
545
546 nhi->pdev = pdev;
547 /* cannot fail - table is allocated bin pcim_iomap_regions */
548 nhi->iobase = pcim_iomap_table(pdev)[0];
549 nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
550 if (nhi->hop_count != 12)
551 dev_warn(&pdev->dev, "unexpected hop count: %d\n",
552 nhi->hop_count);
553 INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
554
555 nhi->tx_rings = devm_kzalloc(&pdev->dev,
556 nhi->hop_count * sizeof(struct tb_ring),
557 GFP_KERNEL);
558 nhi->rx_rings = devm_kzalloc(&pdev->dev,
559 nhi->hop_count * sizeof(struct tb_ring),
560 GFP_KERNEL);
561 if (!nhi->tx_rings || !nhi->rx_rings)
562 return -ENOMEM;
563
564 nhi_disable_interrupts(nhi); /* In case someone left them on. */
565 res = devm_request_irq(&pdev->dev, pdev->irq, nhi_msi,
566 IRQF_NO_SUSPEND, /* must work during _noirq */
567 "thunderbolt", nhi);
568 if (res) {
569 dev_err(&pdev->dev, "request_irq failed, aborting\n");
570 return res;
571 }
572
573 mutex_init(&nhi->lock);
574
575 pci_set_master(pdev);
576
577 /* magic value - clock related? */
578 iowrite32(3906250 / 10000, nhi->iobase + 0x38c00);
579
d6cc51cd
AN
580 dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
581 tb = thunderbolt_alloc_and_start(nhi);
582 if (!tb) {
583 /*
584 * At this point the RX/TX rings might already have been
585 * activated. Do a proper shutdown.
586 */
587 nhi_shutdown(nhi);
588 return -EIO;
589 }
590 pci_set_drvdata(pdev, tb);
16603153
AN
591
592 return 0;
593}
594
595static void nhi_remove(struct pci_dev *pdev)
596{
d6cc51cd
AN
597 struct tb *tb = pci_get_drvdata(pdev);
598 struct tb_nhi *nhi = tb->nhi;
599 thunderbolt_shutdown_and_free(tb);
16603153
AN
600 nhi_shutdown(nhi);
601}
602
603struct pci_device_id nhi_ids[] = {
604 /*
605 * We have to specify class, the TB bridges use the same device and
606 * vendor (sub)id.
607 */
608 {
609 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
610 .vendor = PCI_VENDOR_ID_INTEL, .device = 0x1547,
611 .subvendor = 0x2222, .subdevice = 0x1111,
612 },
613 {
614 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
615 .vendor = PCI_VENDOR_ID_INTEL, .device = 0x156c,
616 .subvendor = 0x2222, .subdevice = 0x1111,
617 },
618 { 0,}
619};
620
621MODULE_DEVICE_TABLE(pci, nhi_ids);
622MODULE_LICENSE("GPL");
623
624static struct pci_driver nhi_driver = {
625 .name = "thunderbolt",
626 .id_table = nhi_ids,
627 .probe = nhi_probe,
628 .remove = nhi_remove,
629};
630
631static int __init nhi_init(void)
632{
633 if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
634 return -ENOSYS;
635 return pci_register_driver(&nhi_driver);
636}
637
638static void __exit nhi_unload(void)
639{
640 pci_unregister_driver(&nhi_driver);
641}
642
643module_init(nhi_init);
644module_exit(nhi_unload);
This page took 0.047568 seconds and 5 git commands to generate.