ieee1394: ohci1394: unroll a macro with return
[deliverable/linux.git] / drivers / ieee1394 / ohci1394.c
CommitLineData
1da177e4
LT
1/*
2 * ohci1394.c - driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Gord Peters <GordPeters@smarttech.com>
5 * 2001 Ben Collins <bcollins@debian.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22/*
23 * Things known to be working:
24 * . Async Request Transmit
25 * . Async Response Receive
26 * . Async Request Receive
27 * . Async Response Transmit
28 * . Iso Receive
29 * . DMA mmap for iso receive
30 * . Config ROM generation
31 *
32 * Things implemented, but still in test phase:
33 * . Iso Transmit
34 * . Async Stream Packets Transmit (Receive done via Iso interface)
35 *
36 * Things not implemented:
37 * . DMA error recovery
38 *
39 * Known bugs:
40 * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41 * added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
42 */
43
44/*
45 * Acknowledgments:
46 *
47 * Adam J Richter <adam@yggdrasil.com>
48 * . Use of pci_class to find device
49 *
50 * Emilie Chung <emilie.chung@axis.com>
51 * . Tip on Async Request Filter
52 *
53 * Pascal Drolet <pascal.drolet@informission.ca>
54 * . Various tips for optimization and functionnalities
55 *
56 * Robert Ficklin <rficklin@westengineering.com>
57 * . Loop in irq_handler
58 *
59 * James Goodwin <jamesg@Filanet.com>
60 * . Various tips on initialization, self-id reception, etc.
61 *
62 * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63 * . Apple PowerBook detection
64 *
65 * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66 * . Reset the board properly before leaving + misc cleanups
67 *
68 * Leon van Stuivenberg <leonvs@iae.nl>
69 * . Bug fixes
70 *
71 * Ben Collins <bcollins@debian.org>
72 * . Working big-endian support
73 * . Updated to 2.4.x module scheme (PCI aswell)
74 * . Config ROM generation
75 *
76 * Manfred Weihs <weihs@ict.tuwien.ac.at>
77 * . Reworked code for initiating bus resets
78 * (long, short, with or without hold-off)
79 *
80 * Nandu Santhi <contactnandu@users.sourceforge.net>
81 * . Added support for nVidia nForce2 onboard Firewire chipset
82 *
83 */
84
1da177e4
LT
85#include <linux/kernel.h>
86#include <linux/list.h>
87#include <linux/slab.h>
88#include <linux/interrupt.h>
89#include <linux/wait.h>
90#include <linux/errno.h>
91#include <linux/module.h>
92#include <linux/moduleparam.h>
93#include <linux/pci.h>
94#include <linux/fs.h>
95#include <linux/poll.h>
96#include <asm/byteorder.h>
97#include <asm/atomic.h>
98#include <asm/uaccess.h>
99#include <linux/delay.h>
100#include <linux/spinlock.h>
101
102#include <asm/pgtable.h>
103#include <asm/page.h>
104#include <asm/irq.h>
1da177e4
LT
105#include <linux/types.h>
106#include <linux/vmalloc.h>
107#include <linux/init.h>
108
109#ifdef CONFIG_PPC_PMAC
110#include <asm/machdep.h>
111#include <asm/pmac_feature.h>
112#include <asm/prom.h>
113#include <asm/pci-bridge.h>
114#endif
115
116#include "csr1212.h"
117#include "ieee1394.h"
118#include "ieee1394_types.h"
119#include "hosts.h"
120#include "dma.h"
121#include "iso.h"
122#include "ieee1394_core.h"
123#include "highlevel.h"
124#include "ohci1394.h"
125
126#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
127#define OHCI1394_DEBUG
128#endif
129
130#ifdef DBGMSG
131#undef DBGMSG
132#endif
133
134#ifdef OHCI1394_DEBUG
135#define DBGMSG(fmt, args...) \
136printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
137#else
611aa19f 138#define DBGMSG(fmt, args...) do {} while (0)
1da177e4
LT
139#endif
140
1da177e4
LT
141/* print general (card independent) information */
142#define PRINT_G(level, fmt, args...) \
143printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
144
145/* print card specific information */
146#define PRINT(level, fmt, args...) \
147printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
148
1da177e4
LT
149/* Module Parameters */
150static int phys_dma = 1;
fa9b7399 151module_param(phys_dma, int, 0444);
1da177e4
LT
152MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
153
154static void dma_trm_tasklet(unsigned long data);
155static void dma_trm_reset(struct dma_trm_ctx *d);
156
157static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
158 enum context_type type, int ctx, int num_desc,
159 int buf_size, int split_buf_size, int context_base);
1da177e4
LT
160static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
161
162static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
163 enum context_type type, int ctx, int num_desc,
164 int context_base);
165
166static void ohci1394_pci_remove(struct pci_dev *pdev);
167
168#ifndef __LITTLE_ENDIAN
c5a69d57 169static const size_t hdr_sizes[] = {
1da177e4
LT
170 3, /* TCODE_WRITEQ */
171 4, /* TCODE_WRITEB */
172 3, /* TCODE_WRITE_RESPONSE */
9531f13a 173 0, /* reserved */
1da177e4
LT
174 3, /* TCODE_READQ */
175 4, /* TCODE_READB */
176 3, /* TCODE_READQ_RESPONSE */
177 4, /* TCODE_READB_RESPONSE */
9531f13a 178 1, /* TCODE_CYCLE_START */
1da177e4
LT
179 4, /* TCODE_LOCK_REQUEST */
180 2, /* TCODE_ISO_DATA */
181 4, /* TCODE_LOCK_RESPONSE */
9531f13a 182 /* rest is reserved or link-internal */
1da177e4
LT
183};
184
9531f13a 185static inline void header_le32_to_cpu(quadlet_t *data, unsigned char tcode)
1da177e4 186{
9531f13a 187 size_t size;
1da177e4 188
9531f13a 189 if (unlikely(tcode >= ARRAY_SIZE(hdr_sizes)))
1da177e4
LT
190 return;
191
9531f13a 192 size = hdr_sizes[tcode];
1da177e4 193 while (size--)
9531f13a 194 data[size] = le32_to_cpu(data[size]);
1da177e4
LT
195}
196#else
9531f13a 197#define header_le32_to_cpu(w,x) do {} while (0)
1da177e4
LT
198#endif /* !LITTLE_ENDIAN */
199
200/***********************************
201 * IEEE-1394 functionality section *
202 ***********************************/
203
204static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
205{
206 int i;
207 unsigned long flags;
208 quadlet_t r;
209
210 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
211
212 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
213
214 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
215 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
216 break;
217
218 mdelay(1);
219 }
220
221 r = reg_read(ohci, OHCI1394_PhyControl);
222
223 if (i >= OHCI_LOOP_COUNT)
224 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
225 r, r & 0x80000000, i);
226
227 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
228
229 return (r & 0x00ff0000) >> 16;
230}
231
232static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
233{
234 int i;
235 unsigned long flags;
236 u32 r = 0;
237
238 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
239
240 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
241
242 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
243 r = reg_read(ohci, OHCI1394_PhyControl);
244 if (!(r & 0x00004000))
245 break;
246
247 mdelay(1);
248 }
249
250 if (i == OHCI_LOOP_COUNT)
251 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
252 r, r & 0x00004000, i);
253
254 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
255
256 return;
257}
258
259/* Or's our value into the current value */
260static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
261{
262 u8 old;
263
264 old = get_phy_reg (ohci, addr);
265 old |= data;
266 set_phy_reg (ohci, addr, old);
267
268 return;
269}
270
271static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
272 int phyid, int isroot)
273{
274 quadlet_t *q = ohci->selfid_buf_cpu;
275 quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
276 size_t size;
277 quadlet_t q0, q1;
278
279 /* Check status of self-id reception */
280
281 if (ohci->selfid_swap)
282 q0 = le32_to_cpu(q[0]);
283 else
284 q0 = q[0];
285
286 if ((self_id_count & 0x80000000) ||
287 ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
288 PRINT(KERN_ERR,
289 "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
290 self_id_count, q0, ohci->self_id_errors);
291
292 /* Tip by James Goodwin <jamesg@Filanet.com>:
293 * We had an error, generate another bus reset in response. */
294 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
295 set_phy_reg_mask (ohci, 1, 0x40);
296 ohci->self_id_errors++;
297 } else {
298 PRINT(KERN_ERR,
299 "Too many errors on SelfID error reception, giving up!");
300 }
301 return;
302 }
303
304 /* SelfID Ok, reset error counter. */
305 ohci->self_id_errors = 0;
306
307 size = ((self_id_count & 0x00001FFC) >> 2) - 1;
308 q++;
309
310 while (size > 0) {
311 if (ohci->selfid_swap) {
312 q0 = le32_to_cpu(q[0]);
313 q1 = le32_to_cpu(q[1]);
314 } else {
315 q0 = q[0];
316 q1 = q[1];
317 }
318
319 if (q0 == ~q1) {
320 DBGMSG ("SelfID packet 0x%x received", q0);
321 hpsb_selfid_received(host, cpu_to_be32(q0));
322 if (((q0 & 0x3f000000) >> 24) == phyid)
323 DBGMSG ("SelfID for this node is 0x%08x", q0);
324 } else {
325 PRINT(KERN_ERR,
326 "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
327 }
328 q += 2;
329 size -= 2;
330 }
331
332 DBGMSG("SelfID complete");
333
334 return;
335}
336
337static void ohci_soft_reset(struct ti_ohci *ohci) {
338 int i;
339
340 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
341
342 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
343 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
344 break;
345 mdelay(1);
346 }
347 DBGMSG ("Soft reset finished");
348}
349
350
351/* Generate the dma receive prgs and start the context */
352static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
353{
354 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
355 int i;
356
357 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
358
359 for (i=0; i<d->num_desc; i++) {
360 u32 c;
361
362 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
363 if (generate_irq)
364 c |= DMA_CTL_IRQ;
365
366 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
367
368 /* End of descriptor list? */
369 if (i + 1 < d->num_desc) {
370 d->prg_cpu[i]->branchAddress =
371 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
372 } else {
373 d->prg_cpu[i]->branchAddress =
374 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
375 }
376
377 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
378 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
379 }
380
381 d->buf_ind = 0;
382 d->buf_offset = 0;
383
384 if (d->type == DMA_CTX_ISO) {
385 /* Clear contextControl */
386 reg_write(ohci, d->ctrlClear, 0xffffffff);
387
388 /* Set bufferFill, isochHeader, multichannel for IR context */
389 reg_write(ohci, d->ctrlSet, 0xd0000000);
390
391 /* Set the context match register to match on all tags */
392 reg_write(ohci, d->ctxtMatch, 0xf0000000);
393
394 /* Clear the multi channel mask high and low registers */
395 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
396 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
397
398 /* Set up isoRecvIntMask to generate interrupts */
399 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
400 }
401
402 /* Tell the controller where the first AR program is */
403 reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
404
405 /* Run context */
406 reg_write(ohci, d->ctrlSet, 0x00008000);
407
408 DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
409}
410
411/* Initialize the dma transmit context */
412static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
413{
414 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
415
416 /* Stop the context */
417 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
418
419 d->prg_ind = 0;
420 d->sent_ind = 0;
421 d->free_prgs = d->num_desc;
422 d->branchAddrPtr = NULL;
423 INIT_LIST_HEAD(&d->fifo_list);
424 INIT_LIST_HEAD(&d->pending_list);
425
426 if (d->type == DMA_CTX_ISO) {
427 /* enable interrupts */
428 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
429 }
430
431 DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
432}
433
434/* Count the number of available iso contexts */
435static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
436{
437 int i,ctx=0;
438 u32 tmp;
439
440 reg_write(ohci, reg, 0xffffffff);
441 tmp = reg_read(ohci, reg);
442
443 DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
444
445 /* Count the number of contexts */
446 for (i=0; i<32; i++) {
447 if (tmp & 1) ctx++;
448 tmp >>= 1;
449 }
450 return ctx;
451}
452
453/* Global initialization */
454static void ohci_initialize(struct ti_ohci *ohci)
455{
1da177e4
LT
456 quadlet_t buf;
457 int num_ports, i;
458
459 spin_lock_init(&ohci->phy_reg_lock);
1da177e4
LT
460
461 /* Put some defaults to these undefined bus options */
462 buf = reg_read(ohci, OHCI1394_BusOptions);
463 buf |= 0x60000000; /* Enable CMC and ISC */
1934b8b6
BC
464 if (hpsb_disable_irm)
465 buf &= ~0x80000000;
466 else
1da177e4
LT
467 buf |= 0x80000000; /* Enable IRMC */
468 buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
469 buf &= ~0x18000000; /* Disable PMC and BMC */
470 reg_write(ohci, OHCI1394_BusOptions, buf);
471
472 /* Set the bus number */
473 reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
474
475 /* Enable posted writes */
476 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
477
478 /* Clear link control register */
479 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
480
481 /* Enable cycle timer and cycle master and set the IRM
482 * contender bit in our self ID packets if appropriate. */
483 reg_write(ohci, OHCI1394_LinkControlSet,
484 OHCI1394_LinkControl_CycleTimerEnable |
485 OHCI1394_LinkControl_CycleMaster);
1934b8b6
BC
486 i = get_phy_reg(ohci, 4) | PHY_04_LCTRL;
487 if (hpsb_disable_irm)
488 i &= ~PHY_04_CONTENDER;
489 else
490 i |= PHY_04_CONTENDER;
491 set_phy_reg(ohci, 4, i);
1da177e4
LT
492
493 /* Set up self-id dma buffer */
494 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
495
2ab77524
BK
496 /* enable self-id */
497 reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID);
1da177e4
LT
498
499 /* Set the Config ROM mapping register */
500 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
501
502 /* Now get our max packet size */
503 ohci->max_packet_size =
504 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
505
1da177e4
LT
506 /* Clear the interrupt mask */
507 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
508 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
509
510 /* Clear the interrupt mask */
511 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
512 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
513
514 /* Initialize AR dma */
515 initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
516 initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
517
518 /* Initialize AT dma */
519 initialize_dma_trm_ctx(&ohci->at_req_context);
520 initialize_dma_trm_ctx(&ohci->at_resp_context);
521
180a4304
SR
522 /* Accept AR requests from all nodes */
523 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
524
525 /* Set the address range of the physical response unit.
526 * Most controllers do not implement it as a writable register though.
527 * They will keep a hardwired offset of 0x00010000 and show 0x0 as
528 * register content.
529 * To actually enable physical responses is the job of our interrupt
530 * handler which programs the physical request filter. */
4611ed38
BC
531 reg_write(ohci, OHCI1394_PhyUpperBound,
532 OHCI1394_PHYS_UPPER_BOUND_PROGRAMMED >> 16);
180a4304
SR
533
534 DBGMSG("physUpperBoundOffset=%08x",
535 reg_read(ohci, OHCI1394_PhyUpperBound));
1da177e4
LT
536
537 /* Specify AT retries */
538 reg_write(ohci, OHCI1394_ATRetries,
539 OHCI1394_MAX_AT_REQ_RETRIES |
540 (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
541 (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
542
543 /* We don't want hardware swapping */
544 reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
545
546 /* Enable interrupts */
547 reg_write(ohci, OHCI1394_IntMaskSet,
548 OHCI1394_unrecoverableError |
549 OHCI1394_masterIntEnable |
550 OHCI1394_busReset |
551 OHCI1394_selfIDComplete |
552 OHCI1394_RSPkt |
553 OHCI1394_RQPkt |
554 OHCI1394_respTxComplete |
555 OHCI1394_reqTxComplete |
556 OHCI1394_isochRx |
557 OHCI1394_isochTx |
e2f8165d 558 OHCI1394_postedWriteErr |
57fdb58f 559 OHCI1394_cycleTooLong |
1da177e4
LT
560 OHCI1394_cycleInconsistent);
561
562 /* Enable link */
563 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
564
565 buf = reg_read(ohci, OHCI1394_Version);
c0f00e27 566 PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%d] "
e29419ff 567 "MMIO=[%llx-%llx] Max Packet=[%d] IR/IT contexts=[%d/%d]",
1da177e4 568 ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
c0f00e27 569 ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), ohci->dev->irq,
e29419ff
GKH
570 (unsigned long long)pci_resource_start(ohci->dev, 0),
571 (unsigned long long)pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
209171a1
SR
572 ohci->max_packet_size,
573 ohci->nb_iso_rcv_ctx, ohci->nb_iso_xmit_ctx);
1da177e4
LT
574
575 /* Check all of our ports to make sure that if anything is
576 * connected, we enable that port. */
577 num_ports = get_phy_reg(ohci, 2) & 0xf;
578 for (i = 0; i < num_ports; i++) {
579 unsigned int status;
580
581 set_phy_reg(ohci, 7, i);
582 status = get_phy_reg(ohci, 8);
583
584 if (status & 0x20)
585 set_phy_reg(ohci, 8, status & ~1);
586 }
587
588 /* Serial EEPROM Sanity check. */
589 if ((ohci->max_packet_size < 512) ||
590 (ohci->max_packet_size > 4096)) {
591 /* Serial EEPROM contents are suspect, set a sane max packet
592 * size and print the raw contents for bug reports if verbose
593 * debug is enabled. */
594#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
595 int i;
596#endif
597
598 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
749cf766 599 "attempting to set max_packet_size to 512 bytes");
1da177e4
LT
600 reg_write(ohci, OHCI1394_BusOptions,
601 (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
602 ohci->max_packet_size = 512;
603#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
604 PRINT(KERN_DEBUG, " EEPROM Present: %d",
605 (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
606 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
607
608 for (i = 0;
609 ((i < 1000) &&
610 (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
611 udelay(10);
612
613 for (i = 0; i < 0x20; i++) {
614 reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
615 PRINT(KERN_DEBUG, " EEPROM %02x: %02x", i,
616 (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
617 }
618#endif
619 }
620}
621
622/*
623 * Insert a packet in the DMA fifo and generate the DMA prg
624 * FIXME: rewrite the program in order to accept packets crossing
625 * page boundaries.
626 * check also that a single dma descriptor doesn't cross a
627 * page boundary.
628 */
629static void insert_packet(struct ti_ohci *ohci,
630 struct dma_trm_ctx *d, struct hpsb_packet *packet)
631{
632 u32 cycleTimer;
633 int idx = d->prg_ind;
634
635 DBGMSG("Inserting packet for node " NODE_BUS_FMT
636 ", tlabel=%d, tcode=0x%x, speed=%d",
637 NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
638 packet->tcode, packet->speed_code);
639
640 d->prg_cpu[idx]->begin.address = 0;
641 d->prg_cpu[idx]->begin.branchAddress = 0;
642
643 if (d->type == DMA_CTX_ASYNC_RESP) {
644 /*
645 * For response packets, we need to put a timeout value in
646 * the 16 lower bits of the status... let's try 1 sec timeout
647 */
648 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
649 d->prg_cpu[idx]->begin.status = cpu_to_le32(
650 (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
651 ((cycleTimer&0x01fff000)>>12));
652
653 DBGMSG("cycleTimer: %08x timeStamp: %08x",
654 cycleTimer, d->prg_cpu[idx]->begin.status);
655 } else
656 d->prg_cpu[idx]->begin.status = 0;
657
658 if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
659
660 if (packet->type == hpsb_raw) {
661 d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
662 d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
663 d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
664 } else {
665 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
666 (packet->header[0] & 0xFFFF);
667
668 if (packet->tcode == TCODE_ISO_DATA) {
669 /* Sending an async stream packet */
670 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
671 } else {
672 /* Sending a normal async request or response */
673 d->prg_cpu[idx]->data[1] =
674 (packet->header[1] & 0xFFFF) |
675 (packet->header[0] & 0xFFFF0000);
676 d->prg_cpu[idx]->data[2] = packet->header[2];
677 d->prg_cpu[idx]->data[3] = packet->header[3];
678 }
9531f13a 679 header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
1da177e4
LT
680 }
681
682 if (packet->data_size) { /* block transmit */
683 if (packet->tcode == TCODE_STREAM_DATA){
684 d->prg_cpu[idx]->begin.control =
685 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
686 DMA_CTL_IMMEDIATE | 0x8);
687 } else {
688 d->prg_cpu[idx]->begin.control =
689 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
690 DMA_CTL_IMMEDIATE | 0x10);
691 }
692 d->prg_cpu[idx]->end.control =
693 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
694 DMA_CTL_IRQ |
695 DMA_CTL_BRANCH |
696 packet->data_size);
697 /*
698 * Check that the packet data buffer
699 * does not cross a page boundary.
700 *
701 * XXX Fix this some day. eth1394 seems to trigger
702 * it, but ignoring it doesn't seem to cause a
703 * problem.
704 */
705#if 0
706 if (cross_bound((unsigned long)packet->data,
707 packet->data_size)>0) {
708 /* FIXME: do something about it */
709 PRINT(KERN_ERR,
710 "%s: packet data addr: %p size %Zd bytes "
b1ce1fd7 711 "cross page boundary", __func__,
1da177e4
LT
712 packet->data, packet->data_size);
713 }
714#endif
715 d->prg_cpu[idx]->end.address = cpu_to_le32(
716 pci_map_single(ohci->dev, packet->data,
717 packet->data_size,
718 PCI_DMA_TODEVICE));
1da177e4
LT
719
720 d->prg_cpu[idx]->end.branchAddress = 0;
721 d->prg_cpu[idx]->end.status = 0;
722 if (d->branchAddrPtr)
723 *(d->branchAddrPtr) =
724 cpu_to_le32(d->prg_bus[idx] | 0x3);
725 d->branchAddrPtr =
726 &(d->prg_cpu[idx]->end.branchAddress);
727 } else { /* quadlet transmit */
728 if (packet->type == hpsb_raw)
729 d->prg_cpu[idx]->begin.control =
730 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
731 DMA_CTL_IMMEDIATE |
732 DMA_CTL_IRQ |
733 DMA_CTL_BRANCH |
734 (packet->header_size + 4));
735 else
736 d->prg_cpu[idx]->begin.control =
737 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
738 DMA_CTL_IMMEDIATE |
739 DMA_CTL_IRQ |
740 DMA_CTL_BRANCH |
741 packet->header_size);
742
743 if (d->branchAddrPtr)
744 *(d->branchAddrPtr) =
745 cpu_to_le32(d->prg_bus[idx] | 0x2);
746 d->branchAddrPtr =
747 &(d->prg_cpu[idx]->begin.branchAddress);
748 }
749
750 } else { /* iso packet */
751 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
752 (packet->header[0] & 0xFFFF);
753 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
9531f13a 754 header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
1da177e4
LT
755
756 d->prg_cpu[idx]->begin.control =
757 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
758 DMA_CTL_IMMEDIATE | 0x8);
759 d->prg_cpu[idx]->end.control =
760 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
761 DMA_CTL_UPDATE |
762 DMA_CTL_IRQ |
763 DMA_CTL_BRANCH |
764 packet->data_size);
765 d->prg_cpu[idx]->end.address = cpu_to_le32(
766 pci_map_single(ohci->dev, packet->data,
767 packet->data_size, PCI_DMA_TODEVICE));
1da177e4
LT
768
769 d->prg_cpu[idx]->end.branchAddress = 0;
770 d->prg_cpu[idx]->end.status = 0;
771 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
772 " begin=%08x %08x %08x %08x\n"
773 " %08x %08x %08x %08x\n"
774 " end =%08x %08x %08x %08x",
775 d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
776 d->prg_cpu[idx]->begin.control,
777 d->prg_cpu[idx]->begin.address,
778 d->prg_cpu[idx]->begin.branchAddress,
779 d->prg_cpu[idx]->begin.status,
780 d->prg_cpu[idx]->data[0],
781 d->prg_cpu[idx]->data[1],
782 d->prg_cpu[idx]->data[2],
783 d->prg_cpu[idx]->data[3],
784 d->prg_cpu[idx]->end.control,
785 d->prg_cpu[idx]->end.address,
786 d->prg_cpu[idx]->end.branchAddress,
787 d->prg_cpu[idx]->end.status);
788 if (d->branchAddrPtr)
789 *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
790 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
791 }
792 d->free_prgs--;
793
794 /* queue the packet in the appropriate context queue */
795 list_add_tail(&packet->driver_list, &d->fifo_list);
796 d->prg_ind = (d->prg_ind + 1) % d->num_desc;
797}
798
799/*
800 * This function fills the FIFO with the (eventual) pending packets
801 * and runs or wakes up the DMA prg if necessary.
802 *
803 * The function MUST be called with the d->lock held.
804 */
805static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
806{
807 struct hpsb_packet *packet, *ptmp;
808 int idx = d->prg_ind;
809 int z = 0;
810
811 /* insert the packets into the dma fifo */
812 list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
813 if (!d->free_prgs)
814 break;
815
816 /* For the first packet only */
817 if (!z)
818 z = (packet->data_size) ? 3 : 2;
819
820 /* Insert the packet */
821 list_del_init(&packet->driver_list);
822 insert_packet(ohci, d, packet);
823 }
824
825 /* Nothing must have been done, either no free_prgs or no packets */
826 if (z == 0)
827 return;
828
829 /* Is the context running ? (should be unless it is
830 the first packet to be sent in this context) */
831 if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
832 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
833
834 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
835 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
836
837 /* Check that the node id is valid, and not 63 */
838 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
839 PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
840 else
841 reg_write(ohci, d->ctrlSet, 0x8000);
842 } else {
843 /* Wake up the dma context if necessary */
844 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
845 DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
846
847 /* do this always, to avoid race condition */
848 reg_write(ohci, d->ctrlSet, 0x1000);
849 }
850
851 return;
852}
853
854/* Transmission of an async or iso packet */
855static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
856{
857 struct ti_ohci *ohci = host->hostdata;
858 struct dma_trm_ctx *d;
859 unsigned long flags;
860
861 if (packet->data_size > ohci->max_packet_size) {
862 PRINT(KERN_ERR,
863 "Transmit packet size %Zd is too big",
864 packet->data_size);
865 return -EOVERFLOW;
866 }
867
1da177e4
LT
868 if (packet->type == hpsb_raw)
869 d = &ohci->at_req_context;
53c96b41 870 else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
1da177e4
LT
871 d = &ohci->at_resp_context;
872 else
873 d = &ohci->at_req_context;
874
875 spin_lock_irqsave(&d->lock,flags);
876
877 list_add_tail(&packet->driver_list, &d->pending_list);
878
879 dma_trm_flush(ohci, d);
880
881 spin_unlock_irqrestore(&d->lock,flags);
882
883 return 0;
884}
885
886static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
887{
888 struct ti_ohci *ohci = host->hostdata;
53c96b41 889 int retval = 0, phy_reg;
1da177e4
LT
890
891 switch (cmd) {
892 case RESET_BUS:
893 switch (arg) {
894 case SHORT_RESET:
895 phy_reg = get_phy_reg(ohci, 5);
896 phy_reg |= 0x40;
897 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
898 break;
899 case LONG_RESET:
900 phy_reg = get_phy_reg(ohci, 1);
901 phy_reg |= 0x40;
902 set_phy_reg(ohci, 1, phy_reg); /* set IBR */
903 break;
904 case SHORT_RESET_NO_FORCE_ROOT:
905 phy_reg = get_phy_reg(ohci, 1);
906 if (phy_reg & 0x80) {
907 phy_reg &= ~0x80;
908 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
909 }
910
911 phy_reg = get_phy_reg(ohci, 5);
912 phy_reg |= 0x40;
913 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
914 break;
915 case LONG_RESET_NO_FORCE_ROOT:
916 phy_reg = get_phy_reg(ohci, 1);
917 phy_reg &= ~0x80;
918 phy_reg |= 0x40;
919 set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
920 break;
921 case SHORT_RESET_FORCE_ROOT:
922 phy_reg = get_phy_reg(ohci, 1);
923 if (!(phy_reg & 0x80)) {
924 phy_reg |= 0x80;
925 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
926 }
927
928 phy_reg = get_phy_reg(ohci, 5);
929 phy_reg |= 0x40;
930 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
931 break;
932 case LONG_RESET_FORCE_ROOT:
933 phy_reg = get_phy_reg(ohci, 1);
934 phy_reg |= 0xc0;
935 set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
936 break;
937 default:
938 retval = -1;
939 }
940 break;
941
942 case GET_CYCLE_COUNTER:
943 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
944 break;
945
946 case SET_CYCLE_COUNTER:
947 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
948 break;
949
950 case SET_BUS_ID:
951 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
952 break;
953
954 case ACT_CYCLE_MASTER:
955 if (arg) {
956 /* check if we are root and other nodes are present */
957 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
958 if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
959 /*
960 * enable cycleTimer, cycleMaster
961 */
962 DBGMSG("Cycle master enabled");
963 reg_write(ohci, OHCI1394_LinkControlSet,
964 OHCI1394_LinkControl_CycleTimerEnable |
965 OHCI1394_LinkControl_CycleMaster);
966 }
967 } else {
968 /* disable cycleTimer, cycleMaster, cycleSource */
969 reg_write(ohci, OHCI1394_LinkControlClear,
970 OHCI1394_LinkControl_CycleTimerEnable |
971 OHCI1394_LinkControl_CycleMaster |
972 OHCI1394_LinkControl_CycleSource);
973 }
974 break;
975
976 case CANCEL_REQUESTS:
977 DBGMSG("Cancel request received");
978 dma_trm_reset(&ohci->at_req_context);
979 dma_trm_reset(&ohci->at_resp_context);
980 break;
981
1da177e4
LT
982 default:
983 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
984 cmd);
985 break;
986 }
987 return retval;
988}
989
990/***********************************
991 * rawiso ISO reception *
992 ***********************************/
993
994/*
995 We use either buffer-fill or packet-per-buffer DMA mode. The DMA
996 buffer is split into "blocks" (regions described by one DMA
997 descriptor). Each block must be one page or less in size, and
998 must not cross a page boundary.
999
1000 There is one little wrinkle with buffer-fill mode: a packet that
1001 starts in the final block may wrap around into the first block. But
1002 the user API expects all packets to be contiguous. Our solution is
1003 to keep the very last page of the DMA buffer in reserve - if a
1004 packet spans the gap, we copy its tail into this page.
1005*/
1006
1007struct ohci_iso_recv {
1008 struct ti_ohci *ohci;
1009
1010 struct ohci1394_iso_tasklet task;
1011 int task_active;
1012
1013 enum { BUFFER_FILL_MODE = 0,
1014 PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1015
1016 /* memory and PCI mapping for the DMA descriptors */
1017 struct dma_prog_region prog;
1018 struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1019
1020 /* how many DMA blocks fit in the buffer */
1021 unsigned int nblocks;
1022
1023 /* stride of DMA blocks */
1024 unsigned int buf_stride;
1025
1026 /* number of blocks to batch between interrupts */
1027 int block_irq_interval;
1028
1029 /* block that DMA will finish next */
1030 int block_dma;
1031
1032 /* (buffer-fill only) block that the reader will release next */
1033 int block_reader;
1034
1035 /* (buffer-fill only) bytes of buffer the reader has released,
1036 less than one block */
1037 int released_bytes;
1038
1039 /* (buffer-fill only) buffer offset at which the next packet will appear */
1040 int dma_offset;
1041
1042 /* OHCI DMA context control registers */
1043 u32 ContextControlSet;
1044 u32 ContextControlClear;
1045 u32 CommandPtr;
1046 u32 ContextMatch;
1047};
1048
1049static void ohci_iso_recv_task(unsigned long data);
1050static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1051static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1052static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1053static void ohci_iso_recv_program(struct hpsb_iso *iso);
1054
1055static int ohci_iso_recv_init(struct hpsb_iso *iso)
1056{
1057 struct ti_ohci *ohci = iso->host->hostdata;
1058 struct ohci_iso_recv *recv;
1059 int ctx;
1060 int ret = -ENOMEM;
1061
e94b1766 1062 recv = kmalloc(sizeof(*recv), GFP_KERNEL);
1da177e4
LT
1063 if (!recv)
1064 return -ENOMEM;
1065
1066 iso->hostdata = recv;
1067 recv->ohci = ohci;
1068 recv->task_active = 0;
1069 dma_prog_region_init(&recv->prog);
1070 recv->block = NULL;
1071
1072 /* use buffer-fill mode, unless irq_interval is 1
1073 (note: multichannel requires buffer-fill) */
1074
1075 if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1076 iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1077 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1078 } else {
1079 recv->dma_mode = BUFFER_FILL_MODE;
1080 }
1081
1082 /* set nblocks, buf_stride, block_irq_interval */
1083
1084 if (recv->dma_mode == BUFFER_FILL_MODE) {
1085 recv->buf_stride = PAGE_SIZE;
1086
1087 /* one block per page of data in the DMA buffer, minus the final guard page */
1088 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1089 if (recv->nblocks < 3) {
1090 DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1091 goto err;
1092 }
1093
1094 /* iso->irq_interval is in packets - translate that to blocks */
1095 if (iso->irq_interval == 1)
1096 recv->block_irq_interval = 1;
1097 else
1098 recv->block_irq_interval = iso->irq_interval *
1099 ((recv->nblocks+1)/iso->buf_packets);
1100 if (recv->block_irq_interval*4 > recv->nblocks)
1101 recv->block_irq_interval = recv->nblocks/4;
1102 if (recv->block_irq_interval < 1)
1103 recv->block_irq_interval = 1;
1104
1105 } else {
1106 int max_packet_size;
1107
1108 recv->nblocks = iso->buf_packets;
1109 recv->block_irq_interval = iso->irq_interval;
1110 if (recv->block_irq_interval * 4 > iso->buf_packets)
1111 recv->block_irq_interval = iso->buf_packets / 4;
1112 if (recv->block_irq_interval < 1)
1113 recv->block_irq_interval = 1;
1114
1115 /* choose a buffer stride */
1116 /* must be a power of 2, and <= PAGE_SIZE */
1117
1118 max_packet_size = iso->buf_size / iso->buf_packets;
1119
1120 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1121 recv->buf_stride *= 2);
1122
1123 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1124 recv->buf_stride > PAGE_SIZE) {
1125 /* this shouldn't happen, but anyway... */
1126 DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1127 goto err;
1128 }
1129 }
1130
1131 recv->block_reader = 0;
1132 recv->released_bytes = 0;
1133 recv->block_dma = 0;
1134 recv->dma_offset = 0;
1135
1136 /* size of DMA program = one descriptor per block */
1137 if (dma_prog_region_alloc(&recv->prog,
1138 sizeof(struct dma_cmd) * recv->nblocks,
1139 recv->ohci->dev))
1140 goto err;
1141
1142 recv->block = (struct dma_cmd*) recv->prog.kvirt;
1143
1144 ohci1394_init_iso_tasklet(&recv->task,
1145 iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1146 OHCI_ISO_RECEIVE,
1147 ohci_iso_recv_task, (unsigned long) iso);
1148
e4ec0f23
JM
1149 if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) {
1150 ret = -EBUSY;
1da177e4 1151 goto err;
e4ec0f23 1152 }
1da177e4
LT
1153
1154 recv->task_active = 1;
1155
1156 /* recv context registers are spaced 32 bytes apart */
1157 ctx = recv->task.context;
1158 recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1159 recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1160 recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1161 recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1162
1163 if (iso->channel == -1) {
1164 /* clear multi-channel selection mask */
1165 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1166 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1167 }
1168
1169 /* write the DMA program */
1170 ohci_iso_recv_program(iso);
1171
1172 DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1173 " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1174 recv->dma_mode == BUFFER_FILL_MODE ?
1175 "buffer-fill" : "packet-per-buffer",
1176 iso->buf_size/PAGE_SIZE, iso->buf_size,
1177 recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1178
1179 return 0;
1180
1181err:
1182 ohci_iso_recv_shutdown(iso);
1183 return ret;
1184}
1185
1186static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1187{
1188 struct ohci_iso_recv *recv = iso->hostdata;
1189
1190 /* disable interrupts */
1191 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1192
1193 /* halt DMA */
1194 ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1195}
1196
1197static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1198{
1199 struct ohci_iso_recv *recv = iso->hostdata;
1200
1201 if (recv->task_active) {
1202 ohci_iso_recv_stop(iso);
1203 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1204 recv->task_active = 0;
1205 }
1206
1207 dma_prog_region_free(&recv->prog);
1208 kfree(recv);
1209 iso->hostdata = NULL;
1210}
1211
1212/* set up a "gapped" ring buffer DMA program */
1213static void ohci_iso_recv_program(struct hpsb_iso *iso)
1214{
1215 struct ohci_iso_recv *recv = iso->hostdata;
1216 int blk;
1217
1218 /* address of 'branch' field in previous DMA descriptor */
1219 u32 *prev_branch = NULL;
1220
1221 for (blk = 0; blk < recv->nblocks; blk++) {
1222 u32 control;
1223
1224 /* the DMA descriptor */
1225 struct dma_cmd *cmd = &recv->block[blk];
1226
1227 /* offset of the DMA descriptor relative to the DMA prog buffer */
1228 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1229
1230 /* offset of this packet's data within the DMA buffer */
1231 unsigned long buf_offset = blk * recv->buf_stride;
1232
1233 if (recv->dma_mode == BUFFER_FILL_MODE) {
1234 control = 2 << 28; /* INPUT_MORE */
1235 } else {
1236 control = 3 << 28; /* INPUT_LAST */
1237 }
1238
1239 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1240
1241 /* interrupt on last block, and at intervals */
1242 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1243 control |= 3 << 20; /* want interrupt */
1244 }
1245
1246 control |= 3 << 18; /* enable branch to address */
1247 control |= recv->buf_stride;
1248
1249 cmd->control = cpu_to_le32(control);
1250 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1251 cmd->branchAddress = 0; /* filled in on next loop */
1252 cmd->status = cpu_to_le32(recv->buf_stride);
1253
1254 /* link the previous descriptor to this one */
1255 if (prev_branch) {
1256 *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1257 }
1258
1259 prev_branch = &cmd->branchAddress;
1260 }
1261
1262 /* the final descriptor's branch address and Z should be left at 0 */
1263}
1264
1265/* listen or unlisten to a specific channel (multi-channel mode only) */
1266static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1267{
1268 struct ohci_iso_recv *recv = iso->hostdata;
1269 int reg, i;
1270
1271 if (channel < 32) {
1272 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1273 i = channel;
1274 } else {
1275 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1276 i = channel - 32;
1277 }
1278
1279 reg_write(recv->ohci, reg, (1 << i));
1280
1281 /* issue a dummy read to force all PCI writes to be posted immediately */
1282 mb();
1283 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1284}
1285
1286static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1287{
1288 struct ohci_iso_recv *recv = iso->hostdata;
1289 int i;
1290
1291 for (i = 0; i < 64; i++) {
1292 if (mask & (1ULL << i)) {
1293 if (i < 32)
1294 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1295 else
1296 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1297 } else {
1298 if (i < 32)
1299 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1300 else
1301 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1302 }
1303 }
1304
1305 /* issue a dummy read to force all PCI writes to be posted immediately */
1306 mb();
1307 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1308}
1309
1310static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1311{
1312 struct ohci_iso_recv *recv = iso->hostdata;
1313 struct ti_ohci *ohci = recv->ohci;
1314 u32 command, contextMatch;
1315
1316 reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1317 wmb();
1318
1319 /* always keep ISO headers */
1320 command = (1 << 30);
1321
1322 if (recv->dma_mode == BUFFER_FILL_MODE)
1323 command |= (1 << 31);
1324
1325 reg_write(recv->ohci, recv->ContextControlSet, command);
1326
1327 /* match on specified tags */
1328 contextMatch = tag_mask << 28;
1329
1330 if (iso->channel == -1) {
1331 /* enable multichannel reception */
1332 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1333 } else {
1334 /* listen on channel */
1335 contextMatch |= iso->channel;
1336 }
1337
1338 if (cycle != -1) {
1339 u32 seconds;
1340
1341 /* enable cycleMatch */
1342 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1343
1344 /* set starting cycle */
1345 cycle &= 0x1FFF;
1346
1347 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1348 just snarf them from the current time */
1349 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1350
1351 /* advance one second to give some extra time for DMA to start */
1352 seconds += 1;
1353
1354 cycle |= (seconds & 3) << 13;
1355
1356 contextMatch |= cycle << 12;
1357 }
1358
1359 if (sync != -1) {
1360 /* set sync flag on first DMA descriptor */
1361 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1362 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1363
1364 /* match sync field */
1365 contextMatch |= (sync&0xf)<<8;
1366 }
1367
1368 reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1369
1370 /* address of first descriptor block */
1371 command = dma_prog_region_offset_to_bus(&recv->prog,
1372 recv->block_dma * sizeof(struct dma_cmd));
1373 command |= 1; /* Z=1 */
1374
1375 reg_write(recv->ohci, recv->CommandPtr, command);
1376
1377 /* enable interrupts */
1378 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1379
1380 wmb();
1381
1382 /* run */
1383 reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1384
1385 /* issue a dummy read of the cycle timer register to force
1386 all PCI writes to be posted immediately */
1387 mb();
1388 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1389
1390 /* check RUN */
1391 if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1392 PRINT(KERN_ERR,
1393 "Error starting IR DMA (ContextControl 0x%08x)\n",
1394 reg_read(recv->ohci, recv->ContextControlSet));
1395 return -1;
1396 }
1397
1398 return 0;
1399}
1400
1401static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1402{
1403 /* re-use the DMA descriptor for the block */
1404 /* by linking the previous descriptor to it */
1405
1406 int next_i = block;
1407 int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1408
1409 struct dma_cmd *next = &recv->block[next_i];
1410 struct dma_cmd *prev = &recv->block[prev_i];
1934b8b6
BC
1411
1412 /* ignore out-of-range requests */
1413 if ((block < 0) || (block > recv->nblocks))
1414 return;
1da177e4
LT
1415
1416 /* 'next' becomes the new end of the DMA chain,
1417 so disable branch and enable interrupt */
1418 next->branchAddress = 0;
1419 next->control |= cpu_to_le32(3 << 20);
1420 next->status = cpu_to_le32(recv->buf_stride);
1421
1422 /* link prev to next */
1423 prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1424 sizeof(struct dma_cmd) * next_i)
1425 | 1); /* Z=1 */
1426
1427 /* disable interrupt on previous DMA descriptor, except at intervals */
1428 if ((prev_i % recv->block_irq_interval) == 0) {
1429 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1430 } else {
1431 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1432 }
1433 wmb();
1434
1435 /* wake up DMA in case it fell asleep */
1436 reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1437}
1438
1439static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1440 struct hpsb_iso_packet_info *info)
1441{
1da177e4 1442 /* release the memory where the packet was */
1934b8b6 1443 recv->released_bytes += info->total_len;
1da177e4
LT
1444
1445 /* have we released enough memory for one block? */
1446 while (recv->released_bytes > recv->buf_stride) {
1447 ohci_iso_recv_release_block(recv, recv->block_reader);
1448 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1449 recv->released_bytes -= recv->buf_stride;
1450 }
1451}
1452
1453static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1454{
1455 struct ohci_iso_recv *recv = iso->hostdata;
1456 if (recv->dma_mode == BUFFER_FILL_MODE) {
1457 ohci_iso_recv_bufferfill_release(recv, info);
1458 } else {
1459 ohci_iso_recv_release_block(recv, info - iso->infos);
1460 }
1461}
1462
1463/* parse all packets from blocks that have been fully received */
1464static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1465{
1466 int wake = 0;
1467 int runaway = 0;
1468 struct ti_ohci *ohci = recv->ohci;
1469
1470 while (1) {
1471 /* we expect the next parsable packet to begin at recv->dma_offset */
1472 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1473
1474 unsigned int offset;
1934b8b6 1475 unsigned short len, cycle, total_len;
1da177e4
LT
1476 unsigned char channel, tag, sy;
1477
1478 unsigned char *p = iso->data_buf.kvirt;
1479
1480 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1481
1482 /* don't loop indefinitely */
1483 if (runaway++ > 100000) {
1484 atomic_inc(&iso->overflows);
1485 PRINT(KERN_ERR,
1486 "IR DMA error - Runaway during buffer parsing!\n");
1487 break;
1488 }
1489
1490 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1491 if (this_block == recv->block_dma)
1492 break;
1493
1494 wake = 1;
1495
1496 /* parse data length, tag, channel, and sy */
1497
1498 /* note: we keep our own local copies of 'len' and 'offset'
1499 so the user can't mess with them by poking in the mmap area */
1500
1501 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1502
1503 if (len > 4096) {
1504 PRINT(KERN_ERR,
1505 "IR DMA error - bogus 'len' value %u\n", len);
1506 }
1507
1508 channel = p[recv->dma_offset+1] & 0x3F;
1509 tag = p[recv->dma_offset+1] >> 6;
1510 sy = p[recv->dma_offset+0] & 0xF;
1511
1512 /* advance to data payload */
1513 recv->dma_offset += 4;
1514
1515 /* check for wrap-around */
1516 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1517 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1518 }
1519
1520 /* dma_offset now points to the first byte of the data payload */
1521 offset = recv->dma_offset;
1522
1523 /* advance to xferStatus/timeStamp */
1524 recv->dma_offset += len;
1525
1934b8b6 1526 total_len = len + 8; /* 8 bytes header+trailer in OHCI packet */
1da177e4
LT
1527 /* payload is padded to 4 bytes */
1528 if (len % 4) {
1529 recv->dma_offset += 4 - (len%4);
1934b8b6 1530 total_len += 4 - (len%4);
1da177e4
LT
1531 }
1532
1533 /* check for wrap-around */
1534 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1535 /* uh oh, the packet data wraps from the last
1536 to the first DMA block - make the packet
1537 contiguous by copying its "tail" into the
1538 guard page */
1539
1540 int guard_off = recv->buf_stride*recv->nblocks;
1541 int tail_len = len - (guard_off - offset);
1542
1543 if (tail_len > 0 && tail_len < recv->buf_stride) {
1544 memcpy(iso->data_buf.kvirt + guard_off,
1545 iso->data_buf.kvirt,
1546 tail_len);
1547 }
1548
1549 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1550 }
1551
1552 /* parse timestamp */
1553 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1554 cycle &= 0x1FFF;
1555
1556 /* advance to next packet */
1557 recv->dma_offset += 4;
1558
1559 /* check for wrap-around */
1560 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1561 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1562 }
1563
1934b8b6 1564 hpsb_iso_packet_received(iso, offset, len, total_len, cycle, channel, tag, sy);
1da177e4
LT
1565 }
1566
1567 if (wake)
1568 hpsb_iso_wake(iso);
1569}
1570
1571static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1572{
1573 int loop;
1574 struct ti_ohci *ohci = recv->ohci;
1575
1576 /* loop over all blocks */
1577 for (loop = 0; loop < recv->nblocks; loop++) {
1578
1579 /* check block_dma to see if it's done */
1580 struct dma_cmd *im = &recv->block[recv->block_dma];
1581
1582 /* check the DMA descriptor for new writes to xferStatus */
1583 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1584
1585 /* rescount is the number of bytes *remaining to be written* in the block */
1586 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1587
1588 unsigned char event = xferstatus & 0x1F;
1589
1590 if (!event) {
1591 /* nothing has happened to this block yet */
1592 break;
1593 }
1594
1595 if (event != 0x11) {
1596 atomic_inc(&iso->overflows);
1597 PRINT(KERN_ERR,
1598 "IR DMA error - OHCI error code 0x%02x\n", event);
1599 }
1600
1601 if (rescount != 0) {
1602 /* the card is still writing to this block;
1603 we can't touch it until it's done */
1604 break;
1605 }
1606
1607 /* OK, the block is finished... */
1608
1609 /* sync our view of the block */
1610 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1611
1612 /* reset the DMA descriptor */
1613 im->status = recv->buf_stride;
1614
1615 /* advance block_dma */
1616 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1617
1618 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1619 atomic_inc(&iso->overflows);
1620 DBGMSG("ISO reception overflow - "
1621 "ran out of DMA blocks");
1622 }
1623 }
1624
1625 /* parse any packets that have arrived */
1626 ohci_iso_recv_bufferfill_parse(iso, recv);
1627}
1628
1629static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1630{
1631 int count;
1632 int wake = 0;
1633 struct ti_ohci *ohci = recv->ohci;
1634
1635 /* loop over the entire buffer */
1636 for (count = 0; count < recv->nblocks; count++) {
1637 u32 packet_len = 0;
1638
1639 /* pointer to the DMA descriptor */
1640 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1641
1642 /* check the DMA descriptor for new writes to xferStatus */
1643 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1644 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1645
1646 unsigned char event = xferstatus & 0x1F;
1647
1648 if (!event) {
1649 /* this packet hasn't come in yet; we are done for now */
1650 goto out;
1651 }
1652
1653 if (event == 0x11) {
1654 /* packet received successfully! */
1655
1656 /* rescount is the number of bytes *remaining* in the packet buffer,
1657 after the packet was written */
1658 packet_len = recv->buf_stride - rescount;
1659
1660 } else if (event == 0x02) {
1661 PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1662 } else if (event) {
1663 PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1664 }
1665
1666 /* sync our view of the buffer */
1667 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1668
1669 /* record the per-packet info */
1670 {
1671 /* iso header is 8 bytes ahead of the data payload */
1672 unsigned char *hdr;
1673
1674 unsigned int offset;
1675 unsigned short cycle;
1676 unsigned char channel, tag, sy;
1677
1678 offset = iso->pkt_dma * recv->buf_stride;
1679 hdr = iso->data_buf.kvirt + offset;
1680
1681 /* skip iso header */
1682 offset += 8;
1683 packet_len -= 8;
1684
1685 cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1686 channel = hdr[5] & 0x3F;
1687 tag = hdr[5] >> 6;
1688 sy = hdr[4] & 0xF;
1689
1934b8b6
BC
1690 hpsb_iso_packet_received(iso, offset, packet_len,
1691 recv->buf_stride, cycle, channel, tag, sy);
1da177e4
LT
1692 }
1693
1694 /* reset the DMA descriptor */
1695 il->status = recv->buf_stride;
1696
1697 wake = 1;
1698 recv->block_dma = iso->pkt_dma;
1699 }
1700
1701out:
1702 if (wake)
1703 hpsb_iso_wake(iso);
1704}
1705
1706static void ohci_iso_recv_task(unsigned long data)
1707{
1708 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1709 struct ohci_iso_recv *recv = iso->hostdata;
1710
1711 if (recv->dma_mode == BUFFER_FILL_MODE)
1712 ohci_iso_recv_bufferfill_task(iso, recv);
1713 else
1714 ohci_iso_recv_packetperbuf_task(iso, recv);
1715}
1716
1717/***********************************
1718 * rawiso ISO transmission *
1719 ***********************************/
1720
1721struct ohci_iso_xmit {
1722 struct ti_ohci *ohci;
1723 struct dma_prog_region prog;
1724 struct ohci1394_iso_tasklet task;
1725 int task_active;
1726
1727 u32 ContextControlSet;
1728 u32 ContextControlClear;
1729 u32 CommandPtr;
1730};
1731
1732/* transmission DMA program:
1733 one OUTPUT_MORE_IMMEDIATE for the IT header
1734 one OUTPUT_LAST for the buffer data */
1735
1736struct iso_xmit_cmd {
1737 struct dma_cmd output_more_immediate;
1738 u8 iso_hdr[8];
1739 u32 unused[2];
1740 struct dma_cmd output_last;
1741};
1742
1743static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1744static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1745static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1746static void ohci_iso_xmit_task(unsigned long data);
1747
1748static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1749{
1750 struct ohci_iso_xmit *xmit;
1751 unsigned int prog_size;
1752 int ctx;
1753 int ret = -ENOMEM;
1754
e94b1766 1755 xmit = kmalloc(sizeof(*xmit), GFP_KERNEL);
1da177e4
LT
1756 if (!xmit)
1757 return -ENOMEM;
1758
1759 iso->hostdata = xmit;
1760 xmit->ohci = iso->host->hostdata;
1761 xmit->task_active = 0;
1762
1763 dma_prog_region_init(&xmit->prog);
1764
1765 prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1766
1767 if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1768 goto err;
1769
1770 ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1771 ohci_iso_xmit_task, (unsigned long) iso);
1772
e4ec0f23
JM
1773 if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0) {
1774 ret = -EBUSY;
1da177e4 1775 goto err;
e4ec0f23 1776 }
1da177e4
LT
1777
1778 xmit->task_active = 1;
1779
1780 /* xmit context registers are spaced 16 bytes apart */
1781 ctx = xmit->task.context;
1782 xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1783 xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1784 xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1785
1786 return 0;
1787
1788err:
1789 ohci_iso_xmit_shutdown(iso);
1790 return ret;
1791}
1792
1793static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1794{
1795 struct ohci_iso_xmit *xmit = iso->hostdata;
1796 struct ti_ohci *ohci = xmit->ohci;
1797
1798 /* disable interrupts */
1799 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1800
1801 /* halt DMA */
1802 if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1803 /* XXX the DMA context will lock up if you try to send too much data! */
1804 PRINT(KERN_ERR,
1805 "you probably exceeded the OHCI card's bandwidth limit - "
1806 "reload the module and reduce xmit bandwidth");
1807 }
1808}
1809
1810static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1811{
1812 struct ohci_iso_xmit *xmit = iso->hostdata;
1813
1814 if (xmit->task_active) {
1815 ohci_iso_xmit_stop(iso);
1816 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1817 xmit->task_active = 0;
1818 }
1819
1820 dma_prog_region_free(&xmit->prog);
1821 kfree(xmit);
1822 iso->hostdata = NULL;
1823}
1824
1825static void ohci_iso_xmit_task(unsigned long data)
1826{
1827 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1828 struct ohci_iso_xmit *xmit = iso->hostdata;
1829 struct ti_ohci *ohci = xmit->ohci;
1830 int wake = 0;
1831 int count;
1832
1833 /* check the whole buffer if necessary, starting at pkt_dma */
1834 for (count = 0; count < iso->buf_packets; count++) {
1835 int cycle;
1836
1837 /* DMA descriptor */
1838 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
1839
1840 /* check for new writes to xferStatus */
1841 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
1842 u8 event = xferstatus & 0x1F;
1843
1844 if (!event) {
1845 /* packet hasn't been sent yet; we are done for now */
1846 break;
1847 }
1848
1849 if (event != 0x11)
1850 PRINT(KERN_ERR,
1851 "IT DMA error - OHCI error code 0x%02x\n", event);
1852
1853 /* at least one packet went out, so wake up the writer */
1854 wake = 1;
1855
1856 /* parse cycle */
1857 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
1858
1859 /* tell the subsystem the packet has gone out */
1860 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
1861
1862 /* reset the DMA descriptor for next time */
1863 cmd->output_last.status = 0;
1864 }
1865
1866 if (wake)
1867 hpsb_iso_wake(iso);
1868}
1869
1870static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1871{
1872 struct ohci_iso_xmit *xmit = iso->hostdata;
1873 struct ti_ohci *ohci = xmit->ohci;
1874
1875 int next_i, prev_i;
1876 struct iso_xmit_cmd *next, *prev;
1877
1878 unsigned int offset;
1879 unsigned short len;
1880 unsigned char tag, sy;
1881
1882 /* check that the packet doesn't cross a page boundary
1883 (we could allow this if we added OUTPUT_MORE descriptor support) */
1884 if (cross_bound(info->offset, info->len)) {
1885 PRINT(KERN_ERR,
1886 "rawiso xmit: packet %u crosses a page boundary",
1887 iso->first_packet);
1888 return -EINVAL;
1889 }
1890
1891 offset = info->offset;
1892 len = info->len;
1893 tag = info->tag;
1894 sy = info->sy;
1895
1896 /* sync up the card's view of the buffer */
1897 dma_region_sync_for_device(&iso->data_buf, offset, len);
1898
1899 /* append first_packet to the DMA chain */
1900 /* by linking the previous descriptor to it */
1901 /* (next will become the new end of the DMA chain) */
1902
1903 next_i = iso->first_packet;
1904 prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
1905
1906 next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
1907 prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
1908
1909 /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
1910 memset(next, 0, sizeof(struct iso_xmit_cmd));
1911 next->output_more_immediate.control = cpu_to_le32(0x02000008);
1912
1913 /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
1914
1915 /* tcode = 0xA, and sy */
1916 next->iso_hdr[0] = 0xA0 | (sy & 0xF);
1917
1918 /* tag and channel number */
1919 next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
1920
1921 /* transmission speed */
1922 next->iso_hdr[2] = iso->speed & 0x7;
1923
1924 /* payload size */
1925 next->iso_hdr[6] = len & 0xFF;
1926 next->iso_hdr[7] = len >> 8;
1927
1928 /* set up the OUTPUT_LAST */
1929 next->output_last.control = cpu_to_le32(1 << 28);
1930 next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
1931 next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
1932 next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
1933 next->output_last.control |= cpu_to_le32(len);
1934
1935 /* payload bus address */
1936 next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
1937
1938 /* leave branchAddress at zero for now */
1939
1940 /* re-write the previous DMA descriptor to chain to this one */
1941
1942 /* set prev branch address to point to next (Z=3) */
1943 prev->output_last.branchAddress = cpu_to_le32(
1944 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
1945
1946 /* disable interrupt, unless required by the IRQ interval */
1947 if (prev_i % iso->irq_interval) {
1948 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
1949 } else {
1950 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
1951 }
1952
1953 wmb();
1954
1955 /* wake DMA in case it is sleeping */
1956 reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
1957
1958 /* issue a dummy read of the cycle timer to force all PCI
1959 writes to be posted immediately */
1960 mb();
1961 reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
1962
1963 return 0;
1964}
1965
1966static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
1967{
1968 struct ohci_iso_xmit *xmit = iso->hostdata;
1969 struct ti_ohci *ohci = xmit->ohci;
1970
1971 /* clear out the control register */
1972 reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
1973 wmb();
1974
1975 /* address and length of first descriptor block (Z=3) */
1976 reg_write(xmit->ohci, xmit->CommandPtr,
1977 dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
1978
1979 /* cycle match */
1980 if (cycle != -1) {
1981 u32 start = cycle & 0x1FFF;
1982
1983 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1984 just snarf them from the current time */
1985 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1986
1987 /* advance one second to give some extra time for DMA to start */
1988 seconds += 1;
1989
1990 start |= (seconds & 3) << 13;
1991
1992 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
1993 }
1994
1995 /* enable interrupts */
1996 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
1997
1998 /* run */
1999 reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2000 mb();
2001
2002 /* wait 100 usec to give the card time to go active */
2003 udelay(100);
2004
2005 /* check the RUN bit */
2006 if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2007 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2008 reg_read(xmit->ohci, xmit->ContextControlSet));
2009 return -1;
2010 }
2011
2012 return 0;
2013}
2014
2015static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2016{
2017
2018 switch(cmd) {
2019 case XMIT_INIT:
2020 return ohci_iso_xmit_init(iso);
2021 case XMIT_START:
2022 return ohci_iso_xmit_start(iso, arg);
2023 case XMIT_STOP:
2024 ohci_iso_xmit_stop(iso);
2025 return 0;
2026 case XMIT_QUEUE:
2027 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2028 case XMIT_SHUTDOWN:
2029 ohci_iso_xmit_shutdown(iso);
2030 return 0;
2031
2032 case RECV_INIT:
2033 return ohci_iso_recv_init(iso);
2034 case RECV_START: {
2035 int *args = (int*) arg;
2036 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2037 }
2038 case RECV_STOP:
2039 ohci_iso_recv_stop(iso);
2040 return 0;
2041 case RECV_RELEASE:
2042 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2043 return 0;
2044 case RECV_FLUSH:
2045 ohci_iso_recv_task((unsigned long) iso);
2046 return 0;
2047 case RECV_SHUTDOWN:
2048 ohci_iso_recv_shutdown(iso);
2049 return 0;
2050 case RECV_LISTEN_CHANNEL:
2051 ohci_iso_recv_change_channel(iso, arg, 1);
2052 return 0;
2053 case RECV_UNLISTEN_CHANNEL:
2054 ohci_iso_recv_change_channel(iso, arg, 0);
2055 return 0;
2056 case RECV_SET_CHANNEL_MASK:
2057 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2058 return 0;
2059
2060 default:
2061 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2062 cmd);
2063 break;
2064 }
2065 return -EINVAL;
2066}
2067
2068/***************************************
2069 * IEEE-1394 functionality section END *
2070 ***************************************/
2071
2072
2073/********************************************************
2074 * Global stuff (interrupt handler, init/shutdown code) *
2075 ********************************************************/
2076
2077static void dma_trm_reset(struct dma_trm_ctx *d)
2078{
2079 unsigned long flags;
2080 LIST_HEAD(packet_list);
2081 struct ti_ohci *ohci = d->ohci;
2082 struct hpsb_packet *packet, *ptmp;
2083
2084 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2085
2086 /* Lock the context, reset it and release it. Move the packets
2087 * that were pending in the context to packet_list and free
2088 * them after releasing the lock. */
2089
2090 spin_lock_irqsave(&d->lock, flags);
2091
e351c4d0
RD
2092 list_splice_init(&d->fifo_list, &packet_list);
2093 list_splice_init(&d->pending_list, &packet_list);
1da177e4
LT
2094
2095 d->branchAddrPtr = NULL;
2096 d->sent_ind = d->prg_ind;
2097 d->free_prgs = d->num_desc;
2098
2099 spin_unlock_irqrestore(&d->lock, flags);
2100
2101 if (list_empty(&packet_list))
2102 return;
2103
2104 PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2105
2106 /* Now process subsystem callbacks for the packets from this
2107 * context. */
2108 list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2109 list_del_init(&packet->driver_list);
2110 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2111 }
2112}
2113
2114static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2115 quadlet_t rx_event,
2116 quadlet_t tx_event)
2117{
2118 struct ohci1394_iso_tasklet *t;
2119 unsigned long mask;
4a9949d7 2120 unsigned long flags;
1da177e4 2121
4a9949d7 2122 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
1da177e4
LT
2123
2124 list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2125 mask = 1 << t->context;
2126
85c5798b
SR
2127 if (t->type == OHCI_ISO_TRANSMIT) {
2128 if (tx_event & mask)
2129 tasklet_schedule(&t->tasklet);
2130 } else {
2131 /* OHCI_ISO_RECEIVE or OHCI_ISO_MULTICHANNEL_RECEIVE */
2132 if (rx_event & mask)
2133 tasklet_schedule(&t->tasklet);
2134 }
1da177e4
LT
2135 }
2136
4a9949d7 2137 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
1da177e4
LT
2138}
2139
7d12e780 2140static irqreturn_t ohci_irq_handler(int irq, void *dev_id)
1da177e4
LT
2141{
2142 quadlet_t event, node_id;
2143 struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2144 struct hpsb_host *host = ohci->host;
2145 int phyid = -1, isroot = 0;
2146 unsigned long flags;
2147
2148 /* Read and clear the interrupt event register. Don't clear
2149 * the busReset event, though. This is done when we get the
2150 * selfIDComplete interrupt. */
2151 spin_lock_irqsave(&ohci->event_lock, flags);
2152 event = reg_read(ohci, OHCI1394_IntEventClear);
2153 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2154 spin_unlock_irqrestore(&ohci->event_lock, flags);
2155
2156 if (!event)
2157 return IRQ_NONE;
2158
2159 /* If event is ~(u32)0 cardbus card was ejected. In this case
2160 * we just return, and clean up in the ohci1394_pci_remove
2161 * function. */
2162 if (event == ~(u32) 0) {
2163 DBGMSG("Device removed.");
2164 return IRQ_NONE;
2165 }
2166
2167 DBGMSG("IntEvent: %08x", event);
2168
2169 if (event & OHCI1394_unrecoverableError) {
2170 int ctx;
2171 PRINT(KERN_ERR, "Unrecoverable error!");
2172
2173 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2174 PRINT(KERN_ERR, "Async Req Tx Context died: "
2175 "ctrl[%08x] cmdptr[%08x]",
2176 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2177 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2178
2179 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2180 PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2181 "ctrl[%08x] cmdptr[%08x]",
2182 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2183 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2184
2185 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2186 PRINT(KERN_ERR, "Async Req Rcv Context died: "
2187 "ctrl[%08x] cmdptr[%08x]",
2188 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2189 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2190
2191 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2192 PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2193 "ctrl[%08x] cmdptr[%08x]",
2194 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2195 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2196
2197 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2198 if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2199 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2200 "ctrl[%08x] cmdptr[%08x]", ctx,
2201 reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2202 reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2203 }
2204
2205 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2206 if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2207 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2208 "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2209 reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2210 reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2211 reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2212 }
2213
2214 event &= ~OHCI1394_unrecoverableError;
2215 }
e2f8165d
JM
2216 if (event & OHCI1394_postedWriteErr) {
2217 PRINT(KERN_ERR, "physical posted write error");
2218 /* no recovery strategy yet, had to involve protocol drivers */
c13596b0 2219 event &= ~OHCI1394_postedWriteErr;
e2f8165d 2220 }
57fdb58f
BC
2221 if (event & OHCI1394_cycleTooLong) {
2222 if(printk_ratelimit())
2223 PRINT(KERN_WARNING, "isochronous cycle too long");
2224 else
2225 DBGMSG("OHCI1394_cycleTooLong");
2226 reg_write(ohci, OHCI1394_LinkControlSet,
2227 OHCI1394_LinkControl_CycleMaster);
2228 event &= ~OHCI1394_cycleTooLong;
2229 }
1da177e4
LT
2230 if (event & OHCI1394_cycleInconsistent) {
2231 /* We subscribe to the cycleInconsistent event only to
2232 * clear the corresponding event bit... otherwise,
2233 * isochronous cycleMatch DMA won't work. */
2234 DBGMSG("OHCI1394_cycleInconsistent");
2235 event &= ~OHCI1394_cycleInconsistent;
2236 }
1da177e4
LT
2237 if (event & OHCI1394_busReset) {
2238 /* The busReset event bit can't be cleared during the
2239 * selfID phase, so we disable busReset interrupts, to
2240 * avoid burying the cpu in interrupt requests. */
2241 spin_lock_irqsave(&ohci->event_lock, flags);
2242 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2243
2244 if (ohci->check_busreset) {
2245 int loop_count = 0;
2246
2247 udelay(10);
2248
2249 while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2250 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2251
2252 spin_unlock_irqrestore(&ohci->event_lock, flags);
2253 udelay(10);
2254 spin_lock_irqsave(&ohci->event_lock, flags);
2255
2256 /* The loop counter check is to prevent the driver
2257 * from remaining in this state forever. For the
2258 * initial bus reset, the loop continues for ever
2259 * and the system hangs, until some device is plugged-in
2260 * or out manually into a port! The forced reset seems
2261 * to solve this problem. This mainly effects nForce2. */
2262 if (loop_count > 10000) {
2263 ohci_devctl(host, RESET_BUS, LONG_RESET);
2264 DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2265 loop_count = 0;
2266 }
2267
2268 loop_count++;
2269 }
2270 }
2271 spin_unlock_irqrestore(&ohci->event_lock, flags);
2272 if (!host->in_bus_reset) {
2273 DBGMSG("irq_handler: Bus reset requested");
2274
2275 /* Subsystem call */
2276 hpsb_bus_reset(ohci->host);
2277 }
2278 event &= ~OHCI1394_busReset;
2279 }
1da177e4
LT
2280 if (event & OHCI1394_reqTxComplete) {
2281 struct dma_trm_ctx *d = &ohci->at_req_context;
2282 DBGMSG("Got reqTxComplete interrupt "
2283 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2284 if (reg_read(ohci, d->ctrlSet) & 0x800)
2285 ohci1394_stop_context(ohci, d->ctrlClear,
2286 "reqTxComplete");
2287 else
2288 dma_trm_tasklet((unsigned long)d);
2289 //tasklet_schedule(&d->task);
2290 event &= ~OHCI1394_reqTxComplete;
2291 }
2292 if (event & OHCI1394_respTxComplete) {
2293 struct dma_trm_ctx *d = &ohci->at_resp_context;
2294 DBGMSG("Got respTxComplete interrupt "
2295 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2296 if (reg_read(ohci, d->ctrlSet) & 0x800)
2297 ohci1394_stop_context(ohci, d->ctrlClear,
2298 "respTxComplete");
2299 else
2300 tasklet_schedule(&d->task);
2301 event &= ~OHCI1394_respTxComplete;
2302 }
2303 if (event & OHCI1394_RQPkt) {
2304 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2305 DBGMSG("Got RQPkt interrupt status=0x%08X",
2306 reg_read(ohci, d->ctrlSet));
2307 if (reg_read(ohci, d->ctrlSet) & 0x800)
2308 ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2309 else
2310 tasklet_schedule(&d->task);
2311 event &= ~OHCI1394_RQPkt;
2312 }
2313 if (event & OHCI1394_RSPkt) {
2314 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2315 DBGMSG("Got RSPkt interrupt status=0x%08X",
2316 reg_read(ohci, d->ctrlSet));
2317 if (reg_read(ohci, d->ctrlSet) & 0x800)
2318 ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2319 else
2320 tasklet_schedule(&d->task);
2321 event &= ~OHCI1394_RSPkt;
2322 }
2323 if (event & OHCI1394_isochRx) {
2324 quadlet_t rx_event;
2325
2326 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2327 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2328 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2329 event &= ~OHCI1394_isochRx;
2330 }
2331 if (event & OHCI1394_isochTx) {
2332 quadlet_t tx_event;
2333
2334 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2335 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2336 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2337 event &= ~OHCI1394_isochTx;
2338 }
2339 if (event & OHCI1394_selfIDComplete) {
2340 if (host->in_bus_reset) {
2341 node_id = reg_read(ohci, OHCI1394_NodeID);
2342
2343 if (!(node_id & 0x80000000)) {
2344 PRINT(KERN_ERR,
2345 "SelfID received, but NodeID invalid "
2346 "(probably new bus reset occurred): %08X",
2347 node_id);
2348 goto selfid_not_valid;
2349 }
2350
2351 phyid = node_id & 0x0000003f;
2352 isroot = (node_id & 0x40000000) != 0;
2353
2354 DBGMSG("SelfID interrupt received "
2355 "(phyid %d, %s)", phyid,
2356 (isroot ? "root" : "not root"));
2357
2358 handle_selfid(ohci, host, phyid, isroot);
2359
2360 /* Clear the bus reset event and re-enable the
2361 * busReset interrupt. */
2362 spin_lock_irqsave(&ohci->event_lock, flags);
2363 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2364 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2365 spin_unlock_irqrestore(&ohci->event_lock, flags);
2366
1da177e4
LT
2367 /* Turn on phys dma reception.
2368 *
2369 * TODO: Enable some sort of filtering management.
2370 */
2371 if (phys_dma) {
180a4304
SR
2372 reg_write(ohci, OHCI1394_PhyReqFilterHiSet,
2373 0xffffffff);
2374 reg_write(ohci, OHCI1394_PhyReqFilterLoSet,
2375 0xffffffff);
1da177e4
LT
2376 }
2377
2378 DBGMSG("PhyReqFilter=%08x%08x",
180a4304
SR
2379 reg_read(ohci, OHCI1394_PhyReqFilterHiSet),
2380 reg_read(ohci, OHCI1394_PhyReqFilterLoSet));
1da177e4
LT
2381
2382 hpsb_selfid_complete(host, phyid, isroot);
2383 } else
2384 PRINT(KERN_ERR,
2385 "SelfID received outside of bus reset sequence");
2386
2387selfid_not_valid:
2388 event &= ~OHCI1394_selfIDComplete;
2389 }
2390
2391 /* Make sure we handle everything, just in case we accidentally
2392 * enabled an interrupt that we didn't write a handler for. */
2393 if (event)
2394 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2395 event);
2396
2397 return IRQ_HANDLED;
2398}
2399
2400/* Put the buffer back into the dma context */
2401static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2402{
2403 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2404 DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2405
2406 d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2407 d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2408 idx = (idx + d->num_desc - 1 ) % d->num_desc;
2409 d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2410
2411 /* To avoid a race, ensure 1394 interface hardware sees the inserted
2412 * context program descriptors before it sees the wakeup bit set. */
2413 wmb();
2414
2415 /* wake up the dma context if necessary */
2416 if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2417 PRINT(KERN_INFO,
2418 "Waking dma ctx=%d ... processing is probably too slow",
2419 d->ctx);
2420 }
2421
2422 /* do this always, to avoid race condition */
2423 reg_write(ohci, d->ctrlSet, 0x1000);
2424}
2425
2426#define cond_le32_to_cpu(data, noswap) \
2427 (noswap ? data : le32_to_cpu(data))
2428
2429static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2430 -1, 0, -1, 0, -1, -1, 16, -1};
2431
2432/*
2433 * Determine the length of a packet in the buffer
2434 * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2435 */
2b01b80b
SR
2436static inline int packet_length(struct dma_rcv_ctx *d, int idx,
2437 quadlet_t *buf_ptr, int offset,
2438 unsigned char tcode, int noswap)
1da177e4
LT
2439{
2440 int length = -1;
2441
2442 if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2443 length = TCODE_SIZE[tcode];
2444 if (length == 0) {
2445 if (offset + 12 >= d->buf_size) {
2446 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2447 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2448 } else {
2449 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2450 }
2451 length += 20;
2452 }
2453 } else if (d->type == DMA_CTX_ISO) {
2454 /* Assumption: buffer fill mode with header/trailer */
2455 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2456 }
2457
2458 if (length > 0 && length % 4)
2459 length += 4 - (length % 4);
2460
2461 return length;
2462}
2463
2464/* Tasklet that processes dma receive buffers */
2465static void dma_rcv_tasklet (unsigned long data)
2466{
2467 struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2468 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2469 unsigned int split_left, idx, offset, rescount;
2470 unsigned char tcode;
2471 int length, bytes_left, ack;
2472 unsigned long flags;
2473 quadlet_t *buf_ptr;
2474 char *split_ptr;
2475 char msg[256];
2476
2477 spin_lock_irqsave(&d->lock, flags);
2478
2479 idx = d->buf_ind;
2480 offset = d->buf_offset;
2481 buf_ptr = d->buf_cpu[idx] + offset/4;
2482
2483 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2484 bytes_left = d->buf_size - rescount - offset;
2485
2486 while (bytes_left > 0) {
2487 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2488
2489 /* packet_length() will return < 4 for an error */
2490 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2491
2492 if (length < 4) { /* something is wrong */
2493 sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2494 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2495 d->ctx, length);
2496 ohci1394_stop_context(ohci, d->ctrlClear, msg);
2497 spin_unlock_irqrestore(&d->lock, flags);
2498 return;
2499 }
2500
2501 /* The first case is where we have a packet that crosses
2502 * over more than one descriptor. The next case is where
2503 * it's all in the first descriptor. */
2504 if ((offset + length) > d->buf_size) {
2505 DBGMSG("Split packet rcv'd");
2506 if (length > d->split_buf_size) {
2507 ohci1394_stop_context(ohci, d->ctrlClear,
2508 "Split packet size exceeded");
2509 d->buf_ind = idx;
2510 d->buf_offset = offset;
2511 spin_unlock_irqrestore(&d->lock, flags);
2512 return;
2513 }
2514
2515 if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2516 == d->buf_size) {
2517 /* Other part of packet not written yet.
2518 * this should never happen I think
2519 * anyway we'll get it on the next call. */
2520 PRINT(KERN_INFO,
2521 "Got only half a packet!");
2522 d->buf_ind = idx;
2523 d->buf_offset = offset;
2524 spin_unlock_irqrestore(&d->lock, flags);
2525 return;
2526 }
2527
2528 split_left = length;
2529 split_ptr = (char *)d->spb;
2530 memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2531 split_left -= d->buf_size-offset;
2532 split_ptr += d->buf_size-offset;
2533 insert_dma_buffer(d, idx);
2534 idx = (idx+1) % d->num_desc;
2535 buf_ptr = d->buf_cpu[idx];
2536 offset=0;
2537
2538 while (split_left >= d->buf_size) {
2539 memcpy(split_ptr,buf_ptr,d->buf_size);
2540 split_ptr += d->buf_size;
2541 split_left -= d->buf_size;
2542 insert_dma_buffer(d, idx);
2543 idx = (idx+1) % d->num_desc;
2544 buf_ptr = d->buf_cpu[idx];
2545 }
2546
2547 if (split_left > 0) {
2548 memcpy(split_ptr, buf_ptr, split_left);
2549 offset = split_left;
2550 buf_ptr += offset/4;
2551 }
2552 } else {
2553 DBGMSG("Single packet rcv'd");
2554 memcpy(d->spb, buf_ptr, length);
2555 offset += length;
2556 buf_ptr += length/4;
2557 if (offset==d->buf_size) {
2558 insert_dma_buffer(d, idx);
2559 idx = (idx+1) % d->num_desc;
2560 buf_ptr = d->buf_cpu[idx];
2561 offset=0;
2562 }
2563 }
2564
2565 /* We get one phy packet to the async descriptor for each
2566 * bus reset. We always ignore it. */
2567 if (tcode != OHCI1394_TCODE_PHY) {
2568 if (!ohci->no_swap_incoming)
9531f13a 2569 header_le32_to_cpu(d->spb, tcode);
1da177e4
LT
2570 DBGMSG("Packet received from node"
2571 " %d ack=0x%02X spd=%d tcode=0x%X"
2572 " length=%d ctx=%d tlabel=%d",
2573 (d->spb[1]>>16)&0x3f,
2574 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2575 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2576 tcode, length, d->ctx,
cf82703d 2577 (d->spb[0]>>10)&0x3f);
1da177e4
LT
2578
2579 ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2580 == 0x11) ? 1 : 0;
2581
2582 hpsb_packet_received(ohci->host, d->spb,
2583 length-4, ack);
2584 }
2585#ifdef OHCI1394_DEBUG
2586 else
2587 PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2588 d->ctx);
2589#endif
2590
2591 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2592
2593 bytes_left = d->buf_size - rescount - offset;
2594
2595 }
2596
2597 d->buf_ind = idx;
2598 d->buf_offset = offset;
2599
2600 spin_unlock_irqrestore(&d->lock, flags);
2601}
2602
2603/* Bottom half that processes sent packets */
2604static void dma_trm_tasklet (unsigned long data)
2605{
2606 struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2607 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2608 struct hpsb_packet *packet, *ptmp;
2609 unsigned long flags;
2610 u32 status, ack;
2611 size_t datasize;
2612
2613 spin_lock_irqsave(&d->lock, flags);
2614
2615 list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2616 datasize = packet->data_size;
2617 if (datasize && packet->type != hpsb_raw)
2618 status = le32_to_cpu(
2619 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2620 else
2621 status = le32_to_cpu(
2622 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2623
2624 if (status == 0)
2625 /* this packet hasn't been sent yet*/
2626 break;
2627
2628#ifdef OHCI1394_DEBUG
2629 if (datasize)
2630 if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2631 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2632 "ack=0x%X spd=%d dataLength=%d ctx=%d",
2633 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2634 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2635 status&0x1f, (status>>5)&0x3,
2636 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2637 d->ctx);
2638 else
2639 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
dfe547ab 2640 "%d ack=0x%X spd=%d dataLength=%d ctx=%d",
1da177e4
LT
2641 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2642 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2643 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2644 status&0x1f, (status>>5)&0x3,
2645 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2646 d->ctx);
2647 else
2648 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
dfe547ab 2649 "%d ack=0x%X spd=%d data=0x%08X ctx=%d",
1da177e4
LT
2650 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2651 >>16)&0x3f,
2652 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2653 >>4)&0xf,
2654 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2655 >>10)&0x3f,
2656 status&0x1f, (status>>5)&0x3,
2657 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2658 d->ctx);
2659#endif
2660
2661 if (status & 0x10) {
2662 ack = status & 0xf;
2663 } else {
2664 switch (status & 0x1f) {
2665 case EVT_NO_STATUS: /* that should never happen */
2666 case EVT_RESERVED_A: /* that should never happen */
2667 case EVT_LONG_PACKET: /* that should never happen */
2668 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2669 ack = ACKX_SEND_ERROR;
2670 break;
2671 case EVT_MISSING_ACK:
2672 ack = ACKX_TIMEOUT;
2673 break;
2674 case EVT_UNDERRUN:
2675 ack = ACKX_SEND_ERROR;
2676 break;
2677 case EVT_OVERRUN: /* that should never happen */
2678 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2679 ack = ACKX_SEND_ERROR;
2680 break;
2681 case EVT_DESCRIPTOR_READ:
2682 case EVT_DATA_READ:
2683 case EVT_DATA_WRITE:
2684 ack = ACKX_SEND_ERROR;
2685 break;
2686 case EVT_BUS_RESET: /* that should never happen */
2687 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2688 ack = ACKX_SEND_ERROR;
2689 break;
2690 case EVT_TIMEOUT:
2691 ack = ACKX_TIMEOUT;
2692 break;
2693 case EVT_TCODE_ERR:
2694 ack = ACKX_SEND_ERROR;
2695 break;
2696 case EVT_RESERVED_B: /* that should never happen */
2697 case EVT_RESERVED_C: /* that should never happen */
2698 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2699 ack = ACKX_SEND_ERROR;
2700 break;
2701 case EVT_UNKNOWN:
2702 case EVT_FLUSHED:
2703 ack = ACKX_SEND_ERROR;
2704 break;
2705 default:
2706 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2707 ack = ACKX_SEND_ERROR;
2708 BUG();
2709 }
2710 }
2711
2712 list_del_init(&packet->driver_list);
2713 hpsb_packet_sent(ohci->host, packet, ack);
2714
17a62486 2715 if (datasize)
1da177e4
LT
2716 pci_unmap_single(ohci->dev,
2717 cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2718 datasize, PCI_DMA_TODEVICE);
1da177e4
LT
2719
2720 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2721 d->free_prgs++;
2722 }
2723
2724 dma_trm_flush(ohci, d);
2725
2726 spin_unlock_irqrestore(&d->lock, flags);
2727}
2728
1da177e4
LT
2729static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2730{
2731 int i;
2732 struct ti_ohci *ohci = d->ohci;
2733
2734 if (ohci == NULL)
2735 return;
2736
2737 DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2738
2739 if (d->buf_cpu) {
2740 for (i=0; i<d->num_desc; i++)
17a62486 2741 if (d->buf_cpu[i] && d->buf_bus[i])
1da177e4
LT
2742 pci_free_consistent(
2743 ohci->dev, d->buf_size,
2744 d->buf_cpu[i], d->buf_bus[i]);
1da177e4
LT
2745 kfree(d->buf_cpu);
2746 kfree(d->buf_bus);
2747 }
2748 if (d->prg_cpu) {
2749 for (i=0; i<d->num_desc; i++)
17a62486
SR
2750 if (d->prg_cpu[i] && d->prg_bus[i])
2751 pci_pool_free(d->prg_pool, d->prg_cpu[i],
2752 d->prg_bus[i]);
1da177e4 2753 pci_pool_destroy(d->prg_pool);
1da177e4
LT
2754 kfree(d->prg_cpu);
2755 kfree(d->prg_bus);
2756 }
616b859f 2757 kfree(d->spb);
1da177e4
LT
2758
2759 /* Mark this context as freed. */
2760 d->ohci = NULL;
2761}
2762
2763static int
2764alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2765 enum context_type type, int ctx, int num_desc,
2766 int buf_size, int split_buf_size, int context_base)
2767{
2768 int i, len;
2769 static int num_allocs;
2770 static char pool_name[20];
2771
2772 d->ohci = ohci;
2773 d->type = type;
2774 d->ctx = ctx;
2775
2776 d->num_desc = num_desc;
2777 d->buf_size = buf_size;
2778 d->split_buf_size = split_buf_size;
2779
2780 d->ctrlSet = 0;
2781 d->ctrlClear = 0;
2782 d->cmdPtr = 0;
2783
8551158a
SR
2784 d->buf_cpu = kzalloc(d->num_desc * sizeof(*d->buf_cpu), GFP_ATOMIC);
2785 d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC);
1da177e4
LT
2786
2787 if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2788 PRINT(KERN_ERR, "Failed to allocate dma buffer");
2789 free_dma_rcv_ctx(d);
2790 return -ENOMEM;
2791 }
1da177e4 2792
8551158a
SR
2793 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_ATOMIC);
2794 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC);
1da177e4
LT
2795
2796 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2797 PRINT(KERN_ERR, "Failed to allocate dma prg");
2798 free_dma_rcv_ctx(d);
2799 return -ENOMEM;
2800 }
1da177e4
LT
2801
2802 d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2803
2804 if (d->spb == NULL) {
2805 PRINT(KERN_ERR, "Failed to allocate split buffer");
2806 free_dma_rcv_ctx(d);
2807 return -ENOMEM;
2808 }
2809
2810 len = sprintf(pool_name, "ohci1394_rcv_prg");
2811 sprintf(pool_name+len, "%d", num_allocs);
2812 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2813 sizeof(struct dma_cmd), 4, 0);
2814 if(d->prg_pool == NULL)
2815 {
2816 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
2817 free_dma_rcv_ctx(d);
2818 return -ENOMEM;
2819 }
2820 num_allocs++;
2821
1da177e4
LT
2822 for (i=0; i<d->num_desc; i++) {
2823 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
2824 d->buf_size,
2825 d->buf_bus+i);
1da177e4
LT
2826
2827 if (d->buf_cpu[i] != NULL) {
2828 memset(d->buf_cpu[i], 0, d->buf_size);
2829 } else {
2830 PRINT(KERN_ERR,
2831 "Failed to allocate dma buffer");
2832 free_dma_rcv_ctx(d);
2833 return -ENOMEM;
2834 }
2835
e94b1766 2836 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
1da177e4
LT
2837
2838 if (d->prg_cpu[i] != NULL) {
2839 memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
2840 } else {
2841 PRINT(KERN_ERR,
2842 "Failed to allocate dma prg");
2843 free_dma_rcv_ctx(d);
2844 return -ENOMEM;
2845 }
2846 }
2847
2848 spin_lock_init(&d->lock);
2849
53c96b41
SR
2850 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
2851 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
2852 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
1da177e4 2853
53c96b41 2854 tasklet_init(&d->task, dma_rcv_tasklet, (unsigned long) d);
1da177e4
LT
2855 return 0;
2856}
2857
2858static void free_dma_trm_ctx(struct dma_trm_ctx *d)
2859{
2860 int i;
2861 struct ti_ohci *ohci = d->ohci;
2862
2863 if (ohci == NULL)
2864 return;
2865
2866 DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
2867
2868 if (d->prg_cpu) {
2869 for (i=0; i<d->num_desc; i++)
17a62486
SR
2870 if (d->prg_cpu[i] && d->prg_bus[i])
2871 pci_pool_free(d->prg_pool, d->prg_cpu[i],
2872 d->prg_bus[i]);
1da177e4 2873 pci_pool_destroy(d->prg_pool);
1da177e4
LT
2874 kfree(d->prg_cpu);
2875 kfree(d->prg_bus);
2876 }
2877
2878 /* Mark this context as freed. */
2879 d->ohci = NULL;
2880}
2881
2882static int
2883alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
2884 enum context_type type, int ctx, int num_desc,
2885 int context_base)
2886{
2887 int i, len;
2888 static char pool_name[20];
2889 static int num_allocs=0;
2890
2891 d->ohci = ohci;
2892 d->type = type;
2893 d->ctx = ctx;
2894 d->num_desc = num_desc;
2895 d->ctrlSet = 0;
2896 d->ctrlClear = 0;
2897 d->cmdPtr = 0;
2898
8551158a
SR
2899 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_KERNEL);
2900 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL);
1da177e4
LT
2901
2902 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2903 PRINT(KERN_ERR, "Failed to allocate at dma prg");
2904 free_dma_trm_ctx(d);
2905 return -ENOMEM;
2906 }
1da177e4
LT
2907
2908 len = sprintf(pool_name, "ohci1394_trm_prg");
2909 sprintf(pool_name+len, "%d", num_allocs);
2910 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2911 sizeof(struct at_dma_prg), 4, 0);
2912 if (d->prg_pool == NULL) {
2913 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
2914 free_dma_trm_ctx(d);
2915 return -ENOMEM;
2916 }
2917 num_allocs++;
2918
1da177e4 2919 for (i = 0; i < d->num_desc; i++) {
e94b1766 2920 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
1da177e4
LT
2921
2922 if (d->prg_cpu[i] != NULL) {
2923 memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
2924 } else {
2925 PRINT(KERN_ERR,
2926 "Failed to allocate at dma prg");
2927 free_dma_trm_ctx(d);
2928 return -ENOMEM;
2929 }
2930 }
2931
2932 spin_lock_init(&d->lock);
2933
2934 /* initialize tasklet */
53c96b41
SR
2935 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
2936 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
2937 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
2938 tasklet_init(&d->task, dma_trm_tasklet, (unsigned long)d);
1da177e4
LT
2939 return 0;
2940}
2941
2942static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
2943{
2944 struct ti_ohci *ohci = host->hostdata;
2945
2946 reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
2947 reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
2948
2949 memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
2950}
2951
2952
2953static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
2954 quadlet_t data, quadlet_t compare)
2955{
2956 struct ti_ohci *ohci = host->hostdata;
2957 int i;
2958
2959 reg_write(ohci, OHCI1394_CSRData, data);
2960 reg_write(ohci, OHCI1394_CSRCompareData, compare);
2961 reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
2962
2963 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
2964 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
2965 break;
2966
2967 mdelay(1);
2968 }
2969
2970 return reg_read(ohci, OHCI1394_CSRData);
2971}
2972
2973static struct hpsb_host_driver ohci1394_driver = {
2974 .owner = THIS_MODULE,
2975 .name = OHCI1394_DRIVER_NAME,
2976 .set_hw_config_rom = ohci_set_hw_config_rom,
2977 .transmit_packet = ohci_transmit,
2978 .devctl = ohci_devctl,
2979 .isoctl = ohci_isoctl,
2980 .hw_csr_reg = ohci_hw_csr_reg,
2981};
2982
1da177e4
LT
2983/***********************************
2984 * PCI Driver Interface functions *
2985 ***********************************/
2986
48cfae44 2987#ifdef CONFIG_PPC_PMAC
b25d1666
SR
2988static void ohci1394_pmac_on(struct pci_dev *dev)
2989{
48cfae44 2990 if (machine_is(powermac)) {
5370f1f5 2991 struct device_node *ofn = pci_device_to_OF_node(dev);
48cfae44 2992
5370f1f5
SR
2993 if (ofn) {
2994 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
2995 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
48cfae44
SR
2996 }
2997 }
b25d1666
SR
2998}
2999
3000static void ohci1394_pmac_off(struct pci_dev *dev)
3001{
3002 if (machine_is(powermac)) {
3003 struct device_node *ofn = pci_device_to_OF_node(dev);
3004
3005 if (ofn) {
3006 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3007 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3008 }
3009 }
3010}
3011#else
3012#define ohci1394_pmac_on(dev)
3013#define ohci1394_pmac_off(dev)
48cfae44
SR
3014#endif /* CONFIG_PPC_PMAC */
3015
b25d1666
SR
3016static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3017 const struct pci_device_id *ent)
3018{
3019 struct hpsb_host *host;
3020 struct ti_ohci *ohci; /* shortcut to currently handled device */
3021 resource_size_t ohci_base;
d09c6803 3022 int err = -ENOMEM;
b25d1666
SR
3023
3024 ohci1394_pmac_on(dev);
d09c6803
SR
3025 if (pci_enable_device(dev)) {
3026 PRINT_G(KERN_ERR, "Failed to enable OHCI hardware");
3027 err = -ENXIO;
3028 goto err;
3029 }
1da177e4
LT
3030 pci_set_master(dev);
3031
3032 host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
d09c6803
SR
3033 if (!host) {
3034 PRINT_G(KERN_ERR, "Failed to allocate host structure");
3035 goto err;
3036 }
1da177e4
LT
3037 ohci = host->hostdata;
3038 ohci->dev = dev;
3039 ohci->host = host;
3040 ohci->init_state = OHCI_INIT_ALLOC_HOST;
3041 host->pdev = dev;
3042 pci_set_drvdata(dev, ohci);
3043
3044 /* We don't want hardware swapping */
3045 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3046
3047 /* Some oddball Apple controllers do not order the selfid
3048 * properly, so we make up for it here. */
3049#ifndef __LITTLE_ENDIAN
3050 /* XXX: Need a better way to check this. I'm wondering if we can
3051 * read the values of the OHCI1394_PCI_HCI_Control and the
3052 * noByteSwapData registers to see if they were not cleared to
3053 * zero. Should this work? Obviously it's not defined what these
3054 * registers will read when they aren't supported. Bleh! */
3055 if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3056 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3057 ohci->no_swap_incoming = 1;
3058 ohci->selfid_swap = 0;
3059 } else
3060 ohci->selfid_swap = 1;
3061#endif
3062
3063
3064#ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3065#define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3066#endif
3067
3068 /* These chipsets require a bit of extra care when checking after
3069 * a busreset. */
3070 if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3071 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3072 (dev->vendor == PCI_VENDOR_ID_NVIDIA &&
3073 dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3074 ohci->check_busreset = 1;
3075
3076 /* We hardwire the MMIO length, since some CardBus adaptors
3077 * fail to report the right length. Anyway, the ohci spec
3078 * clearly says it's 2kb, so this shouldn't be a problem. */
3079 ohci_base = pci_resource_start(dev, 0);
94c2d01a 3080 if (pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE)
e29419ff
GKH
3081 PRINT(KERN_WARNING, "PCI resource length of 0x%llx too small!",
3082 (unsigned long long)pci_resource_len(dev, 0));
1da177e4 3083
083922fe 3084 if (!request_mem_region(ohci_base, OHCI1394_REGISTER_SIZE,
d09c6803
SR
3085 OHCI1394_DRIVER_NAME)) {
3086 PRINT_G(KERN_ERR, "MMIO resource (0x%llx - 0x%llx) unavailable",
e29419ff
GKH
3087 (unsigned long long)ohci_base,
3088 (unsigned long long)ohci_base + OHCI1394_REGISTER_SIZE);
d09c6803
SR
3089 goto err;
3090 }
1da177e4
LT
3091 ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3092
3093 ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
d09c6803
SR
3094 if (ohci->registers == NULL) {
3095 PRINT_G(KERN_ERR, "Failed to remap registers");
3096 err = -ENXIO;
3097 goto err;
3098 }
1da177e4
LT
3099 ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3100 DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3101
3102 /* csr_config rom allocation */
3103 ohci->csr_config_rom_cpu =
3104 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3105 &ohci->csr_config_rom_bus);
d09c6803
SR
3106 if (ohci->csr_config_rom_cpu == NULL) {
3107 PRINT_G(KERN_ERR, "Failed to allocate buffer config rom");
3108 goto err;
3109 }
1da177e4
LT
3110 ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3111
3112 /* self-id dma buffer allocation */
3113 ohci->selfid_buf_cpu =
3114 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3115 &ohci->selfid_buf_bus);
d09c6803
SR
3116 if (ohci->selfid_buf_cpu == NULL) {
3117 PRINT_G(KERN_ERR, "Failed to allocate self-ID buffer");
3118 goto err;
3119 }
1da177e4
LT
3120 ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3121
3122 if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3123 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3124 "8Kb boundary... may cause problems on some CXD3222 chip",
3125 ohci->selfid_buf_cpu);
3126
3127 /* No self-id errors at startup */
3128 ohci->self_id_errors = 0;
3129
3130 ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3131 /* AR DMA request context allocation */
3132 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3133 DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3134 AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
d09c6803
SR
3135 OHCI1394_AsReqRcvContextBase) < 0) {
3136 PRINT_G(KERN_ERR, "Failed to allocate AR Req context");
3137 goto err;
3138 }
1da177e4
LT
3139 /* AR DMA response context allocation */
3140 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3141 DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3142 AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
d09c6803
SR
3143 OHCI1394_AsRspRcvContextBase) < 0) {
3144 PRINT_G(KERN_ERR, "Failed to allocate AR Resp context");
3145 goto err;
3146 }
1da177e4
LT
3147 /* AT DMA request context */
3148 if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3149 DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
d09c6803
SR
3150 OHCI1394_AsReqTrContextBase) < 0) {
3151 PRINT_G(KERN_ERR, "Failed to allocate AT Req context");
3152 goto err;
3153 }
1da177e4
LT
3154 /* AT DMA response context */
3155 if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3156 DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
d09c6803
SR
3157 OHCI1394_AsRspTrContextBase) < 0) {
3158 PRINT_G(KERN_ERR, "Failed to allocate AT Resp context");
3159 goto err;
3160 }
1da177e4
LT
3161 /* Start off with a soft reset, to clear everything to a sane
3162 * state. */
3163 ohci_soft_reset(ohci);
3164
3165 /* Now enable LPS, which we need in order to start accessing
3166 * most of the registers. In fact, on some cards (ALI M5251),
3167 * accessing registers in the SClk domain without LPS enabled
3168 * will lock up the machine. Wait 50msec to make sure we have
3169 * full link enabled. */
3170 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3171
3172 /* Disable and clear interrupts */
3173 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3174 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3175
3176 mdelay(50);
3177
3178 /* Determine the number of available IR and IT contexts. */
3179 ohci->nb_iso_rcv_ctx =
3180 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
1da177e4
LT
3181 ohci->nb_iso_xmit_ctx =
3182 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
1da177e4
LT
3183
3184 /* Set the usage bits for non-existent contexts so they can't
3185 * be allocated */
3186 ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3187 ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3188
3189 INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3190 spin_lock_init(&ohci->iso_tasklet_list_lock);
3191 ohci->ISO_channel_usage = 0;
3192 spin_lock_init(&ohci->IR_channel_lock);
3193
3515d016 3194 spin_lock_init(&ohci->event_lock);
1da177e4 3195
3515d016 3196 /*
272ee69c 3197 * interrupts are disabled, all right, but... due to IRQF_SHARED we
3515d016
AV
3198 * might get called anyway. We'll see no event, of course, but
3199 * we need to get to that "no event", so enough should be initialized
3200 * by that point.
3201 */
272ee69c 3202 if (request_irq(dev->irq, ohci_irq_handler, IRQF_SHARED,
d09c6803
SR
3203 OHCI1394_DRIVER_NAME, ohci)) {
3204 PRINT_G(KERN_ERR, "Failed to allocate interrupt %d", dev->irq);
3205 goto err;
3206 }
1da177e4
LT
3207 ohci->init_state = OHCI_INIT_HAVE_IRQ;
3208 ohci_initialize(ohci);
3209
3210 /* Set certain csr values */
3211 host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3212 host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3213 host->csr.cyc_clk_acc = 100; /* how do we determine clk accuracy? */
3214 host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3215 host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3216
4611ed38
BC
3217 if (phys_dma) {
3218 host->low_addr_space =
3219 (u64) reg_read(ohci, OHCI1394_PhyUpperBound) << 16;
3220 if (!host->low_addr_space)
3221 host->low_addr_space = OHCI1394_PHYS_UPPER_BOUND_FIXED;
3222 }
3223 host->middle_addr_space = OHCI1394_MIDDLE_ADDRESS_SPACE;
3224
1da177e4 3225 /* Tell the highlevel this host is ready */
d09c6803
SR
3226 if (hpsb_add_host(host)) {
3227 PRINT_G(KERN_ERR, "Failed to register host with highlevel");
3228 goto err;
3229 }
1da177e4
LT
3230 ohci->init_state = OHCI_INIT_DONE;
3231
3232 return 0;
d09c6803
SR
3233err:
3234 ohci1394_pci_remove(dev);
3235 return err;
1da177e4
LT
3236}
3237
b25d1666 3238static void ohci1394_pci_remove(struct pci_dev *dev)
1da177e4
LT
3239{
3240 struct ti_ohci *ohci;
b25d1666 3241 struct device *device;
1da177e4 3242
b25d1666 3243 ohci = pci_get_drvdata(dev);
1da177e4
LT
3244 if (!ohci)
3245 return;
3246
b25d1666 3247 device = get_device(&ohci->host->device);
1da177e4
LT
3248
3249 switch (ohci->init_state) {
3250 case OHCI_INIT_DONE:
1da177e4
LT
3251 hpsb_remove_host(ohci->host);
3252
3253 /* Clear out BUS Options */
3254 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3255 reg_write(ohci, OHCI1394_BusOptions,
3256 (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3257 0x00ff0000);
3258 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3259
3260 case OHCI_INIT_HAVE_IRQ:
3261 /* Clear interrupt registers */
3262 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3263 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3264 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3265 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3266 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3267 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3268
3269 /* Disable IRM Contender */
3270 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3271
3272 /* Clear link control register */
3273 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3274
3275 /* Let all other nodes know to ignore us */
3276 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3277
3278 /* Soft reset before we start - this disables
3279 * interrupts and clears linkEnable and LPS. */
3280 ohci_soft_reset(ohci);
b25d1666 3281 free_irq(dev->irq, ohci);
1da177e4
LT
3282
3283 case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3284 /* The ohci_soft_reset() stops all DMA contexts, so we
3285 * dont need to do this. */
1da177e4
LT
3286 free_dma_rcv_ctx(&ohci->ar_req_context);
3287 free_dma_rcv_ctx(&ohci->ar_resp_context);
1da177e4
LT
3288 free_dma_trm_ctx(&ohci->at_req_context);
3289 free_dma_trm_ctx(&ohci->at_resp_context);
1da177e4 3290
1da177e4 3291 case OHCI_INIT_HAVE_SELFID_BUFFER:
b25d1666 3292 pci_free_consistent(dev, OHCI1394_SI_DMA_BUF_SIZE,
1da177e4
LT
3293 ohci->selfid_buf_cpu,
3294 ohci->selfid_buf_bus);
1da177e4
LT
3295
3296 case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
b25d1666 3297 pci_free_consistent(dev, OHCI_CONFIG_ROM_LEN,
1da177e4
LT
3298 ohci->csr_config_rom_cpu,
3299 ohci->csr_config_rom_bus);
1da177e4
LT
3300
3301 case OHCI_INIT_HAVE_IOMAPPING:
3302 iounmap(ohci->registers);
3303
3304 case OHCI_INIT_HAVE_MEM_REGION:
b25d1666 3305 release_mem_region(pci_resource_start(dev, 0),
1da177e4 3306 OHCI1394_REGISTER_SIZE);
1da177e4 3307
b25d1666 3308 ohci1394_pmac_off(dev);
1da177e4
LT
3309
3310 case OHCI_INIT_ALLOC_HOST:
b25d1666 3311 pci_set_drvdata(dev, NULL);
1da177e4
LT
3312 }
3313
b25d1666
SR
3314 if (device)
3315 put_device(device);
1da177e4
LT
3316}
3317
2a874182 3318#ifdef CONFIG_PM
b25d1666 3319static int ohci1394_pci_suspend(struct pci_dev *dev, pm_message_t state)
1da177e4 3320{
f011bf08 3321 int err;
b25d1666 3322 struct ti_ohci *ohci = pci_get_drvdata(dev);
f011bf08 3323
5f8d17f6
SR
3324 if (!ohci) {
3325 printk(KERN_ERR "%s: tried to suspend nonexisting host\n",
3326 OHCI1394_DRIVER_NAME);
da256c55 3327 return -ENXIO;
5f8d17f6
SR
3328 }
3329 DBGMSG("suspend called");
f011bf08 3330
da256c55 3331 /* Clear the async DMA contexts and stop using the controller */
f011bf08
BK
3332 hpsb_bus_reset(ohci->host);
3333
da256c55 3334 /* See ohci1394_pci_remove() for comments on this sequence */
f011bf08
BK
3335 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3336 reg_write(ohci, OHCI1394_BusOptions,
3337 (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3338 0x00ff0000);
f011bf08
BK
3339 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3340 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3341 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3342 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3343 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3344 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
f011bf08 3345 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
f011bf08 3346 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
f011bf08 3347 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
f011bf08 3348 ohci_soft_reset(ohci);
346f5c7e 3349
b25d1666 3350 err = pci_save_state(dev);
2a3f8ad3 3351 if (err) {
5f8d17f6 3352 PRINT(KERN_ERR, "pci_save_state failed with %d", err);
da256c55 3353 return err;
2a3f8ad3 3354 }
b25d1666 3355 err = pci_set_power_state(dev, pci_choose_state(dev, state));
ea6104c2 3356 if (err)
5f8d17f6 3357 DBGMSG("pci_set_power_state failed with %d", err);
b25d1666 3358 ohci1394_pmac_off(dev);
da256c55
SR
3359
3360 return 0;
3361}
3362
b25d1666 3363static int ohci1394_pci_resume(struct pci_dev *dev)
da256c55
SR
3364{
3365 int err;
b25d1666 3366 struct ti_ohci *ohci = pci_get_drvdata(dev);
da256c55 3367
5f8d17f6
SR
3368 if (!ohci) {
3369 printk(KERN_ERR "%s: tried to resume nonexisting host\n",
3370 OHCI1394_DRIVER_NAME);
da256c55 3371 return -ENXIO;
5f8d17f6
SR
3372 }
3373 DBGMSG("resume called");
da256c55 3374
b25d1666
SR
3375 ohci1394_pmac_on(dev);
3376 pci_set_power_state(dev, PCI_D0);
3377 pci_restore_state(dev);
3378 err = pci_enable_device(dev);
5f8d17f6
SR
3379 if (err) {
3380 PRINT(KERN_ERR, "pci_enable_device failed with %d", err);
da256c55 3381 return err;
5f8d17f6 3382 }
da256c55
SR
3383
3384 /* See ohci1394_pci_probe() for comments on this sequence */
3385 ohci_soft_reset(ohci);
3386 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3387 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3388 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3389 mdelay(50);
3390 ohci_initialize(ohci);
3391
3360177c 3392 hpsb_resume_host(ohci->host);
da256c55 3393 return 0;
1da177e4 3394}
f0645e77 3395#endif /* CONFIG_PM */
1da177e4 3396
1da177e4
LT
3397static struct pci_device_id ohci1394_pci_tbl[] = {
3398 {