[SCSI] aic79xx: Remove busyq
[deliverable/linux.git] / drivers / scsi / aic7xxx / aic79xx_osm.h
1 /*
2 * Adaptec AIC79xx device driver for Linux.
3 *
4 * Copyright (c) 2000-2001 Adaptec Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
25 *
26 * NO WARRANTY
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
38 *
39 * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic79xx_osm.h#137 $
40 *
41 */
42 #ifndef _AIC79XX_LINUX_H_
43 #define _AIC79XX_LINUX_H_
44
45 #include <linux/types.h>
46 #include <linux/blkdev.h>
47 #include <linux/delay.h>
48 #include <linux/ioport.h>
49 #include <linux/pci.h>
50 #include <linux/smp_lock.h>
51 #include <linux/version.h>
52 #include <linux/module.h>
53 #include <asm/byteorder.h>
54 #include <asm/io.h>
55
56 #include <linux/interrupt.h> /* For tasklet support. */
57 #include <linux/config.h>
58 #include <linux/slab.h>
59
60 /* Core SCSI definitions */
61 #define AIC_LIB_PREFIX ahd
62 #include "scsi.h"
63 #include <scsi/scsi_host.h>
64
65 /* Name space conflict with BSD queue macros */
66 #ifdef LIST_HEAD
67 #undef LIST_HEAD
68 #endif
69
70 #include "cam.h"
71 #include "queue.h"
72 #include "scsi_message.h"
73 #include "scsi_iu.h"
74 #include "aiclib.h"
75
76 /*********************************** Debugging ********************************/
77 #ifdef CONFIG_AIC79XX_DEBUG_ENABLE
78 #ifdef CONFIG_AIC79XX_DEBUG_MASK
79 #define AHD_DEBUG 1
80 #define AHD_DEBUG_OPTS CONFIG_AIC79XX_DEBUG_MASK
81 #else
82 /*
83 * Compile in debugging code, but do not enable any printfs.
84 */
85 #define AHD_DEBUG 1
86 #define AHD_DEBUG_OPTS 0
87 #endif
88 /* No debugging code. */
89 #endif
90
91 /********************************** Misc Macros *******************************/
92 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
93 #define powerof2(x) ((((x)-1)&(x))==0)
94
95 /************************* Forward Declarations *******************************/
96 struct ahd_softc;
97 typedef struct pci_dev *ahd_dev_softc_t;
98 typedef Scsi_Cmnd *ahd_io_ctx_t;
99
100 /******************************* Byte Order ***********************************/
101 #define ahd_htobe16(x) cpu_to_be16(x)
102 #define ahd_htobe32(x) cpu_to_be32(x)
103 #define ahd_htobe64(x) cpu_to_be64(x)
104 #define ahd_htole16(x) cpu_to_le16(x)
105 #define ahd_htole32(x) cpu_to_le32(x)
106 #define ahd_htole64(x) cpu_to_le64(x)
107
108 #define ahd_be16toh(x) be16_to_cpu(x)
109 #define ahd_be32toh(x) be32_to_cpu(x)
110 #define ahd_be64toh(x) be64_to_cpu(x)
111 #define ahd_le16toh(x) le16_to_cpu(x)
112 #define ahd_le32toh(x) le32_to_cpu(x)
113 #define ahd_le64toh(x) le64_to_cpu(x)
114
115 /************************* Configuration Data *********************************/
116 extern uint32_t aic79xx_allow_memio;
117 extern int aic79xx_detect_complete;
118 extern Scsi_Host_Template aic79xx_driver_template;
119
120 /***************************** Bus Space/DMA **********************************/
121
122 typedef uint32_t bus_size_t;
123
124 typedef enum {
125 BUS_SPACE_MEMIO,
126 BUS_SPACE_PIO
127 } bus_space_tag_t;
128
129 typedef union {
130 u_long ioport;
131 volatile uint8_t __iomem *maddr;
132 } bus_space_handle_t;
133
134 typedef struct bus_dma_segment
135 {
136 dma_addr_t ds_addr;
137 bus_size_t ds_len;
138 } bus_dma_segment_t;
139
140 struct ahd_linux_dma_tag
141 {
142 bus_size_t alignment;
143 bus_size_t boundary;
144 bus_size_t maxsize;
145 };
146 typedef struct ahd_linux_dma_tag* bus_dma_tag_t;
147
148 struct ahd_linux_dmamap
149 {
150 dma_addr_t bus_addr;
151 };
152 typedef struct ahd_linux_dmamap* bus_dmamap_t;
153
154 typedef int bus_dma_filter_t(void*, dma_addr_t);
155 typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int);
156
157 #define BUS_DMA_WAITOK 0x0
158 #define BUS_DMA_NOWAIT 0x1
159 #define BUS_DMA_ALLOCNOW 0x2
160 #define BUS_DMA_LOAD_SEGS 0x4 /*
161 * Argument is an S/G list not
162 * a single buffer.
163 */
164
165 #define BUS_SPACE_MAXADDR 0xFFFFFFFF
166 #define BUS_SPACE_MAXADDR_32BIT 0xFFFFFFFF
167 #define BUS_SPACE_MAXSIZE_32BIT 0xFFFFFFFF
168
169 int ahd_dma_tag_create(struct ahd_softc *, bus_dma_tag_t /*parent*/,
170 bus_size_t /*alignment*/, bus_size_t /*boundary*/,
171 dma_addr_t /*lowaddr*/, dma_addr_t /*highaddr*/,
172 bus_dma_filter_t*/*filter*/, void */*filterarg*/,
173 bus_size_t /*maxsize*/, int /*nsegments*/,
174 bus_size_t /*maxsegsz*/, int /*flags*/,
175 bus_dma_tag_t */*dma_tagp*/);
176
177 void ahd_dma_tag_destroy(struct ahd_softc *, bus_dma_tag_t /*tag*/);
178
179 int ahd_dmamem_alloc(struct ahd_softc *, bus_dma_tag_t /*dmat*/,
180 void** /*vaddr*/, int /*flags*/,
181 bus_dmamap_t* /*mapp*/);
182
183 void ahd_dmamem_free(struct ahd_softc *, bus_dma_tag_t /*dmat*/,
184 void* /*vaddr*/, bus_dmamap_t /*map*/);
185
186 void ahd_dmamap_destroy(struct ahd_softc *, bus_dma_tag_t /*tag*/,
187 bus_dmamap_t /*map*/);
188
189 int ahd_dmamap_load(struct ahd_softc *ahd, bus_dma_tag_t /*dmat*/,
190 bus_dmamap_t /*map*/, void * /*buf*/,
191 bus_size_t /*buflen*/, bus_dmamap_callback_t *,
192 void */*callback_arg*/, int /*flags*/);
193
194 int ahd_dmamap_unload(struct ahd_softc *, bus_dma_tag_t, bus_dmamap_t);
195
196 /*
197 * Operations performed by ahd_dmamap_sync().
198 */
199 #define BUS_DMASYNC_PREREAD 0x01 /* pre-read synchronization */
200 #define BUS_DMASYNC_POSTREAD 0x02 /* post-read synchronization */
201 #define BUS_DMASYNC_PREWRITE 0x04 /* pre-write synchronization */
202 #define BUS_DMASYNC_POSTWRITE 0x08 /* post-write synchronization */
203
204 /*
205 * XXX
206 * ahd_dmamap_sync is only used on buffers allocated with
207 * the pci_alloc_consistent() API. Although I'm not sure how
208 * this works on architectures with a write buffer, Linux does
209 * not have an API to sync "coherent" memory. Perhaps we need
210 * to do an mb()?
211 */
212 #define ahd_dmamap_sync(ahd, dma_tag, dmamap, offset, len, op)
213
214 /************************** Timer DataStructures ******************************/
215 typedef struct timer_list ahd_timer_t;
216
217 /********************************** Includes **********************************/
218 #ifdef CONFIG_AIC79XX_REG_PRETTY_PRINT
219 #define AIC_DEBUG_REGISTERS 1
220 #else
221 #define AIC_DEBUG_REGISTERS 0
222 #endif
223 #include "aic79xx.h"
224
225 /***************************** Timer Facilities *******************************/
226 #define ahd_timer_init init_timer
227 #define ahd_timer_stop del_timer_sync
228 typedef void ahd_linux_callback_t (u_long);
229 static __inline void ahd_timer_reset(ahd_timer_t *timer, u_int usec,
230 ahd_callback_t *func, void *arg);
231 static __inline void ahd_scb_timer_reset(struct scb *scb, u_int usec);
232
233 static __inline void
234 ahd_timer_reset(ahd_timer_t *timer, u_int usec, ahd_callback_t *func, void *arg)
235 {
236 struct ahd_softc *ahd;
237
238 ahd = (struct ahd_softc *)arg;
239 del_timer(timer);
240 timer->data = (u_long)arg;
241 timer->expires = jiffies + (usec * HZ)/1000000;
242 timer->function = (ahd_linux_callback_t*)func;
243 add_timer(timer);
244 }
245
246 static __inline void
247 ahd_scb_timer_reset(struct scb *scb, u_int usec)
248 {
249 mod_timer(&scb->io_ctx->eh_timeout, jiffies + (usec * HZ)/1000000);
250 }
251
252 /***************************** SMP support ************************************/
253 #include <linux/spinlock.h>
254
255 #define AHD_SCSI_HAS_HOST_LOCK 1
256
257 #define AIC79XX_DRIVER_VERSION "1.3.11"
258
259 /**************************** Front End Queues ********************************/
260 /*
261 * Data structure used to cast the Linux struct scsi_cmnd to something
262 * that allows us to use the queue macros. The linux structure has
263 * plenty of space to hold the links fields as required by the queue
264 * macros, but the queue macors require them to have the correct type.
265 */
266 struct ahd_cmd_internal {
267 /* Area owned by the Linux scsi layer. */
268 uint8_t private[offsetof(struct scsi_cmnd, SCp.Status)];
269 union {
270 STAILQ_ENTRY(ahd_cmd) ste;
271 LIST_ENTRY(ahd_cmd) le;
272 TAILQ_ENTRY(ahd_cmd) tqe;
273 } links;
274 uint32_t end;
275 };
276
277 struct ahd_cmd {
278 union {
279 struct ahd_cmd_internal icmd;
280 struct scsi_cmnd scsi_cmd;
281 } un;
282 };
283
284 #define acmd_icmd(cmd) ((cmd)->un.icmd)
285 #define acmd_scsi_cmd(cmd) ((cmd)->un.scsi_cmd)
286 #define acmd_links un.icmd.links
287
288 /*************************** Device Data Structures ***************************/
289 /*
290 * A per probed device structure used to deal with some error recovery
291 * scenarios that the Linux mid-layer code just doesn't know how to
292 * handle. The structure allocated for a device only becomes persistent
293 * after a successfully completed inquiry command to the target when
294 * that inquiry data indicates a lun is present.
295 */
296
297 typedef enum {
298 AHD_DEV_UNCONFIGURED = 0x01,
299 AHD_DEV_FREEZE_TIL_EMPTY = 0x02, /* Freeze queue until active == 0 */
300 AHD_DEV_TIMER_ACTIVE = 0x04, /* Our timer is active */
301 AHD_DEV_Q_BASIC = 0x10, /* Allow basic device queuing */
302 AHD_DEV_Q_TAGGED = 0x20, /* Allow full SCSI2 command queueing */
303 AHD_DEV_PERIODIC_OTAG = 0x40, /* Send OTAG to prevent starvation */
304 AHD_DEV_SLAVE_CONFIGURED = 0x80 /* slave_configure() has been called */
305 } ahd_linux_dev_flags;
306
307 struct ahd_linux_target;
308 struct ahd_linux_device {
309 TAILQ_ENTRY(ahd_linux_device) links;
310
311 /*
312 * The number of transactions currently
313 * queued to the device.
314 */
315 int active;
316
317 /*
318 * The currently allowed number of
319 * transactions that can be queued to
320 * the device. Must be signed for
321 * conversion from tagged to untagged
322 * mode where the device may have more
323 * than one outstanding active transaction.
324 */
325 int openings;
326
327 /*
328 * A positive count indicates that this
329 * device's queue is halted.
330 */
331 u_int qfrozen;
332
333 /*
334 * Cumulative command counter.
335 */
336 u_long commands_issued;
337
338 /*
339 * The number of tagged transactions when
340 * running at our current opening level
341 * that have been successfully received by
342 * this device since the last QUEUE FULL.
343 */
344 u_int tag_success_count;
345 #define AHD_TAG_SUCCESS_INTERVAL 50
346
347 ahd_linux_dev_flags flags;
348
349 /*
350 * Per device timer.
351 */
352 struct timer_list timer;
353
354 /*
355 * The high limit for the tags variable.
356 */
357 u_int maxtags;
358
359 /*
360 * The computed number of tags outstanding
361 * at the time of the last QUEUE FULL event.
362 */
363 u_int tags_on_last_queuefull;
364
365 /*
366 * How many times we have seen a queue full
367 * with the same number of tags. This is used
368 * to stop our adaptive queue depth algorithm
369 * on devices with a fixed number of tags.
370 */
371 u_int last_queuefull_same_count;
372 #define AHD_LOCK_TAGS_COUNT 50
373
374 /*
375 * How many transactions have been queued
376 * without the device going idle. We use
377 * this statistic to determine when to issue
378 * an ordered tag to prevent transaction
379 * starvation. This statistic is only updated
380 * if the AHD_DEV_PERIODIC_OTAG flag is set
381 * on this device.
382 */
383 u_int commands_since_idle_or_otag;
384 #define AHD_OTAG_THRESH 500
385
386 int lun;
387 Scsi_Device *scsi_device;
388 struct ahd_linux_target *target;
389 };
390
391 typedef enum {
392 AHD_DV_REQUIRED = 0x01,
393 AHD_INQ_VALID = 0x02,
394 AHD_BASIC_DV = 0x04,
395 AHD_ENHANCED_DV = 0x08
396 } ahd_linux_targ_flags;
397
398 /* DV States */
399 typedef enum {
400 AHD_DV_STATE_EXIT = 0,
401 AHD_DV_STATE_INQ_SHORT_ASYNC,
402 AHD_DV_STATE_INQ_ASYNC,
403 AHD_DV_STATE_INQ_ASYNC_VERIFY,
404 AHD_DV_STATE_TUR,
405 AHD_DV_STATE_REBD,
406 AHD_DV_STATE_INQ_VERIFY,
407 AHD_DV_STATE_WEB,
408 AHD_DV_STATE_REB,
409 AHD_DV_STATE_SU,
410 AHD_DV_STATE_BUSY
411 } ahd_dv_state;
412
413 struct ahd_linux_target {
414 struct ahd_linux_device *devices[AHD_NUM_LUNS];
415 int channel;
416 int target;
417 int refcount;
418 struct ahd_transinfo last_tinfo;
419 struct ahd_softc *ahd;
420 ahd_linux_targ_flags flags;
421 struct scsi_inquiry_data *inq_data;
422 /*
423 * The next "fallback" period to use for narrow/wide transfers.
424 */
425 uint8_t dv_next_narrow_period;
426 uint8_t dv_next_wide_period;
427 uint8_t dv_max_width;
428 uint8_t dv_max_ppr_options;
429 uint8_t dv_last_ppr_options;
430 u_int dv_echo_size;
431 ahd_dv_state dv_state;
432 u_int dv_state_retry;
433 uint8_t *dv_buffer;
434 uint8_t *dv_buffer1;
435
436 /*
437 * Cumulative counter of errors.
438 */
439 u_long errors_detected;
440 u_long cmds_since_error;
441 };
442
443 /********************* Definitions Required by the Core ***********************/
444 /*
445 * Number of SG segments we require. So long as the S/G segments for
446 * a particular transaction are allocated in a physically contiguous
447 * manner and are allocated below 4GB, the number of S/G segments is
448 * unrestricted.
449 */
450 #define AHD_NSEG 128
451
452 /*
453 * Per-SCB OSM storage.
454 */
455 typedef enum {
456 AHD_SCB_UP_EH_SEM = 0x1
457 } ahd_linux_scb_flags;
458
459 struct scb_platform_data {
460 struct ahd_linux_device *dev;
461 dma_addr_t buf_busaddr;
462 uint32_t xfer_len;
463 uint32_t sense_resid; /* Auto-Sense residual */
464 ahd_linux_scb_flags flags;
465 };
466
467 /*
468 * Define a structure used for each host adapter. All members are
469 * aligned on a boundary >= the size of the member to honor the
470 * alignment restrictions of the various platforms supported by
471 * this driver.
472 */
473 typedef enum {
474 AHD_DV_WAIT_SIMQ_EMPTY = 0x01,
475 AHD_DV_WAIT_SIMQ_RELEASE = 0x02,
476 AHD_DV_ACTIVE = 0x04,
477 AHD_DV_SHUTDOWN = 0x08,
478 AHD_RUN_CMPLT_Q_TIMER = 0x10
479 } ahd_linux_softc_flags;
480
481 TAILQ_HEAD(ahd_completeq, ahd_cmd);
482
483 struct ahd_platform_data {
484 /*
485 * Fields accessed from interrupt context.
486 */
487 struct ahd_linux_target *targets[AHD_NUM_TARGETS];
488 struct ahd_completeq completeq;
489
490 spinlock_t spin_lock;
491 u_int qfrozen;
492 pid_t dv_pid;
493 struct timer_list completeq_timer;
494 struct timer_list reset_timer;
495 struct timer_list stats_timer;
496 struct semaphore eh_sem;
497 struct semaphore dv_sem;
498 struct semaphore dv_cmd_sem; /* XXX This needs to be in
499 * the target struct
500 */
501 struct scsi_device *dv_scsi_dev;
502 struct Scsi_Host *host; /* pointer to scsi host */
503 #define AHD_LINUX_NOIRQ ((uint32_t)~0)
504 uint32_t irq; /* IRQ for this adapter */
505 uint32_t bios_address;
506 uint32_t mem_busaddr; /* Mem Base Addr */
507 uint64_t hw_dma_mask;
508 ahd_linux_softc_flags flags;
509 };
510
511 /************************** OS Utility Wrappers *******************************/
512 #define printf printk
513 #define M_NOWAIT GFP_ATOMIC
514 #define M_WAITOK 0
515 #define malloc(size, type, flags) kmalloc(size, flags)
516 #define free(ptr, type) kfree(ptr)
517
518 static __inline void ahd_delay(long);
519 static __inline void
520 ahd_delay(long usec)
521 {
522 /*
523 * udelay on Linux can have problems for
524 * multi-millisecond waits. Wait at most
525 * 1024us per call.
526 */
527 while (usec > 0) {
528 udelay(usec % 1024);
529 usec -= 1024;
530 }
531 }
532
533
534 /***************************** Low Level I/O **********************************/
535 static __inline uint8_t ahd_inb(struct ahd_softc * ahd, long port);
536 static __inline uint16_t ahd_inw_atomic(struct ahd_softc * ahd, long port);
537 static __inline void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val);
538 static __inline void ahd_outw_atomic(struct ahd_softc * ahd,
539 long port, uint16_t val);
540 static __inline void ahd_outsb(struct ahd_softc * ahd, long port,
541 uint8_t *, int count);
542 static __inline void ahd_insb(struct ahd_softc * ahd, long port,
543 uint8_t *, int count);
544
545 static __inline uint8_t
546 ahd_inb(struct ahd_softc * ahd, long port)
547 {
548 uint8_t x;
549
550 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
551 x = readb(ahd->bshs[0].maddr + port);
552 } else {
553 x = inb(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
554 }
555 mb();
556 return (x);
557 }
558
559 static __inline uint16_t
560 ahd_inw_atomic(struct ahd_softc * ahd, long port)
561 {
562 uint8_t x;
563
564 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
565 x = readw(ahd->bshs[0].maddr + port);
566 } else {
567 x = inw(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
568 }
569 mb();
570 return (x);
571 }
572
573 static __inline void
574 ahd_outb(struct ahd_softc * ahd, long port, uint8_t val)
575 {
576 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
577 writeb(val, ahd->bshs[0].maddr + port);
578 } else {
579 outb(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
580 }
581 mb();
582 }
583
584 static __inline void
585 ahd_outw_atomic(struct ahd_softc * ahd, long port, uint16_t val)
586 {
587 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
588 writew(val, ahd->bshs[0].maddr + port);
589 } else {
590 outw(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
591 }
592 mb();
593 }
594
595 static __inline void
596 ahd_outsb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
597 {
598 int i;
599
600 /*
601 * There is probably a more efficient way to do this on Linux
602 * but we don't use this for anything speed critical and this
603 * should work.
604 */
605 for (i = 0; i < count; i++)
606 ahd_outb(ahd, port, *array++);
607 }
608
609 static __inline void
610 ahd_insb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
611 {
612 int i;
613
614 /*
615 * There is probably a more efficient way to do this on Linux
616 * but we don't use this for anything speed critical and this
617 * should work.
618 */
619 for (i = 0; i < count; i++)
620 *array++ = ahd_inb(ahd, port);
621 }
622
623 /**************************** Initialization **********************************/
624 int ahd_linux_register_host(struct ahd_softc *,
625 Scsi_Host_Template *);
626
627 uint64_t ahd_linux_get_memsize(void);
628
629 /*************************** Pretty Printing **********************************/
630 struct info_str {
631 char *buffer;
632 int length;
633 off_t offset;
634 int pos;
635 };
636
637 void ahd_format_transinfo(struct info_str *info,
638 struct ahd_transinfo *tinfo);
639
640 /******************************** Locking *************************************/
641 /* Lock protecting internal data structures */
642 static __inline void ahd_lockinit(struct ahd_softc *);
643 static __inline void ahd_lock(struct ahd_softc *, unsigned long *flags);
644 static __inline void ahd_unlock(struct ahd_softc *, unsigned long *flags);
645
646 /* Lock acquisition and release of the above lock in midlayer entry points. */
647 static __inline void ahd_midlayer_entrypoint_lock(struct ahd_softc *,
648 unsigned long *flags);
649 static __inline void ahd_midlayer_entrypoint_unlock(struct ahd_softc *,
650 unsigned long *flags);
651
652 /* Lock held during command compeletion to the upper layer */
653 static __inline void ahd_done_lockinit(struct ahd_softc *);
654 static __inline void ahd_done_lock(struct ahd_softc *, unsigned long *flags);
655 static __inline void ahd_done_unlock(struct ahd_softc *, unsigned long *flags);
656
657 /* Lock held during ahd_list manipulation and ahd softc frees */
658 extern spinlock_t ahd_list_spinlock;
659 static __inline void ahd_list_lockinit(void);
660 static __inline void ahd_list_lock(unsigned long *flags);
661 static __inline void ahd_list_unlock(unsigned long *flags);
662
663 static __inline void
664 ahd_lockinit(struct ahd_softc *ahd)
665 {
666 spin_lock_init(&ahd->platform_data->spin_lock);
667 }
668
669 static __inline void
670 ahd_lock(struct ahd_softc *ahd, unsigned long *flags)
671 {
672 spin_lock_irqsave(&ahd->platform_data->spin_lock, *flags);
673 }
674
675 static __inline void
676 ahd_unlock(struct ahd_softc *ahd, unsigned long *flags)
677 {
678 spin_unlock_irqrestore(&ahd->platform_data->spin_lock, *flags);
679 }
680
681 static __inline void
682 ahd_midlayer_entrypoint_lock(struct ahd_softc *ahd, unsigned long *flags)
683 {
684 /*
685 * In 2.5.X and some 2.4.X versions, the midlayer takes our
686 * lock just before calling us, so we avoid locking again.
687 * For other kernel versions, the io_request_lock is taken
688 * just before our entry point is called. In this case, we
689 * trade the io_request_lock for our per-softc lock.
690 */
691 #if AHD_SCSI_HAS_HOST_LOCK == 0
692 spin_unlock(&io_request_lock);
693 spin_lock(&ahd->platform_data->spin_lock);
694 #endif
695 }
696
697 static __inline void
698 ahd_midlayer_entrypoint_unlock(struct ahd_softc *ahd, unsigned long *flags)
699 {
700 #if AHD_SCSI_HAS_HOST_LOCK == 0
701 spin_unlock(&ahd->platform_data->spin_lock);
702 spin_lock(&io_request_lock);
703 #endif
704 }
705
706 static __inline void
707 ahd_done_lockinit(struct ahd_softc *ahd)
708 {
709 /*
710 * In 2.5.X, our own lock is held during completions.
711 * In previous versions, the io_request_lock is used.
712 * In either case, we can't initialize this lock again.
713 */
714 }
715
716 static __inline void
717 ahd_done_lock(struct ahd_softc *ahd, unsigned long *flags)
718 {
719 #if AHD_SCSI_HAS_HOST_LOCK == 0
720 spin_lock(&io_request_lock);
721 #endif
722 }
723
724 static __inline void
725 ahd_done_unlock(struct ahd_softc *ahd, unsigned long *flags)
726 {
727 #if AHD_SCSI_HAS_HOST_LOCK == 0
728 spin_unlock(&io_request_lock);
729 #endif
730 }
731
732 static __inline void
733 ahd_list_lockinit(void)
734 {
735 spin_lock_init(&ahd_list_spinlock);
736 }
737
738 static __inline void
739 ahd_list_lock(unsigned long *flags)
740 {
741 spin_lock_irqsave(&ahd_list_spinlock, *flags);
742 }
743
744 static __inline void
745 ahd_list_unlock(unsigned long *flags)
746 {
747 spin_unlock_irqrestore(&ahd_list_spinlock, *flags);
748 }
749
750 /******************************* PCI Definitions ******************************/
751 /*
752 * PCIM_xxx: mask to locate subfield in register
753 * PCIR_xxx: config register offset
754 * PCIC_xxx: device class
755 * PCIS_xxx: device subclass
756 * PCIP_xxx: device programming interface
757 * PCIV_xxx: PCI vendor ID (only required to fixup ancient devices)
758 * PCID_xxx: device ID
759 */
760 #define PCIR_DEVVENDOR 0x00
761 #define PCIR_VENDOR 0x00
762 #define PCIR_DEVICE 0x02
763 #define PCIR_COMMAND 0x04
764 #define PCIM_CMD_PORTEN 0x0001
765 #define PCIM_CMD_MEMEN 0x0002
766 #define PCIM_CMD_BUSMASTEREN 0x0004
767 #define PCIM_CMD_MWRICEN 0x0010
768 #define PCIM_CMD_PERRESPEN 0x0040
769 #define PCIM_CMD_SERRESPEN 0x0100
770 #define PCIR_STATUS 0x06
771 #define PCIR_REVID 0x08
772 #define PCIR_PROGIF 0x09
773 #define PCIR_SUBCLASS 0x0a
774 #define PCIR_CLASS 0x0b
775 #define PCIR_CACHELNSZ 0x0c
776 #define PCIR_LATTIMER 0x0d
777 #define PCIR_HEADERTYPE 0x0e
778 #define PCIM_MFDEV 0x80
779 #define PCIR_BIST 0x0f
780 #define PCIR_CAP_PTR 0x34
781
782 /* config registers for header type 0 devices */
783 #define PCIR_MAPS 0x10
784 #define PCIR_SUBVEND_0 0x2c
785 #define PCIR_SUBDEV_0 0x2e
786
787 /****************************** PCI-X definitions *****************************/
788 #define PCIXR_COMMAND 0x96
789 #define PCIXR_DEVADDR 0x98
790 #define PCIXM_DEVADDR_FNUM 0x0003 /* Function Number */
791 #define PCIXM_DEVADDR_DNUM 0x00F8 /* Device Number */
792 #define PCIXM_DEVADDR_BNUM 0xFF00 /* Bus Number */
793 #define PCIXR_STATUS 0x9A
794 #define PCIXM_STATUS_64BIT 0x0001 /* Active 64bit connection to device. */
795 #define PCIXM_STATUS_133CAP 0x0002 /* Device is 133MHz capable */
796 #define PCIXM_STATUS_SCDISC 0x0004 /* Split Completion Discarded */
797 #define PCIXM_STATUS_UNEXPSC 0x0008 /* Unexpected Split Completion */
798 #define PCIXM_STATUS_CMPLEXDEV 0x0010 /* Device Complexity (set == bridge) */
799 #define PCIXM_STATUS_MAXMRDBC 0x0060 /* Maximum Burst Read Count */
800 #define PCIXM_STATUS_MAXSPLITS 0x0380 /* Maximum Split Transactions */
801 #define PCIXM_STATUS_MAXCRDS 0x1C00 /* Maximum Cumulative Read Size */
802 #define PCIXM_STATUS_RCVDSCEM 0x2000 /* Received a Split Comp w/Error msg */
803
804 extern struct pci_driver aic79xx_pci_driver;
805
806 typedef enum
807 {
808 AHD_POWER_STATE_D0,
809 AHD_POWER_STATE_D1,
810 AHD_POWER_STATE_D2,
811 AHD_POWER_STATE_D3
812 } ahd_power_state;
813
814 void ahd_power_state_change(struct ahd_softc *ahd,
815 ahd_power_state new_state);
816
817 /******************************* PCI Routines *********************************/
818 int ahd_linux_pci_init(void);
819 void ahd_linux_pci_exit(void);
820 int ahd_pci_map_registers(struct ahd_softc *ahd);
821 int ahd_pci_map_int(struct ahd_softc *ahd);
822
823 static __inline uint32_t ahd_pci_read_config(ahd_dev_softc_t pci,
824 int reg, int width);
825
826 static __inline uint32_t
827 ahd_pci_read_config(ahd_dev_softc_t pci, int reg, int width)
828 {
829 switch (width) {
830 case 1:
831 {
832 uint8_t retval;
833
834 pci_read_config_byte(pci, reg, &retval);
835 return (retval);
836 }
837 case 2:
838 {
839 uint16_t retval;
840 pci_read_config_word(pci, reg, &retval);
841 return (retval);
842 }
843 case 4:
844 {
845 uint32_t retval;
846 pci_read_config_dword(pci, reg, &retval);
847 return (retval);
848 }
849 default:
850 panic("ahd_pci_read_config: Read size too big");
851 /* NOTREACHED */
852 return (0);
853 }
854 }
855
856 static __inline void ahd_pci_write_config(ahd_dev_softc_t pci,
857 int reg, uint32_t value,
858 int width);
859
860 static __inline void
861 ahd_pci_write_config(ahd_dev_softc_t pci, int reg, uint32_t value, int width)
862 {
863 switch (width) {
864 case 1:
865 pci_write_config_byte(pci, reg, value);
866 break;
867 case 2:
868 pci_write_config_word(pci, reg, value);
869 break;
870 case 4:
871 pci_write_config_dword(pci, reg, value);
872 break;
873 default:
874 panic("ahd_pci_write_config: Write size too big");
875 /* NOTREACHED */
876 }
877 }
878
879 static __inline int ahd_get_pci_function(ahd_dev_softc_t);
880 static __inline int
881 ahd_get_pci_function(ahd_dev_softc_t pci)
882 {
883 return (PCI_FUNC(pci->devfn));
884 }
885
886 static __inline int ahd_get_pci_slot(ahd_dev_softc_t);
887 static __inline int
888 ahd_get_pci_slot(ahd_dev_softc_t pci)
889 {
890 return (PCI_SLOT(pci->devfn));
891 }
892
893 static __inline int ahd_get_pci_bus(ahd_dev_softc_t);
894 static __inline int
895 ahd_get_pci_bus(ahd_dev_softc_t pci)
896 {
897 return (pci->bus->number);
898 }
899
900 static __inline void ahd_flush_device_writes(struct ahd_softc *);
901 static __inline void
902 ahd_flush_device_writes(struct ahd_softc *ahd)
903 {
904 /* XXX Is this sufficient for all architectures??? */
905 ahd_inb(ahd, INTSTAT);
906 }
907
908 /**************************** Proc FS Support *********************************/
909 int ahd_linux_proc_info(struct Scsi_Host *, char *, char **,
910 off_t, int, int);
911
912 /*************************** Domain Validation ********************************/
913 #define AHD_DV_CMD(cmd) ((cmd)->scsi_done == ahd_linux_dv_complete)
914 #define AHD_DV_SIMQ_FROZEN(ahd) \
915 ((((ahd)->platform_data->flags & AHD_DV_ACTIVE) != 0) \
916 && (ahd)->platform_data->qfrozen == 1)
917
918 /*********************** Transaction Access Wrappers **************************/
919 static __inline void ahd_cmd_set_transaction_status(Scsi_Cmnd *, uint32_t);
920 static __inline void ahd_set_transaction_status(struct scb *, uint32_t);
921 static __inline void ahd_cmd_set_scsi_status(Scsi_Cmnd *, uint32_t);
922 static __inline void ahd_set_scsi_status(struct scb *, uint32_t);
923 static __inline uint32_t ahd_cmd_get_transaction_status(Scsi_Cmnd *cmd);
924 static __inline uint32_t ahd_get_transaction_status(struct scb *);
925 static __inline uint32_t ahd_cmd_get_scsi_status(Scsi_Cmnd *cmd);
926 static __inline uint32_t ahd_get_scsi_status(struct scb *);
927 static __inline void ahd_set_transaction_tag(struct scb *, int, u_int);
928 static __inline u_long ahd_get_transfer_length(struct scb *);
929 static __inline int ahd_get_transfer_dir(struct scb *);
930 static __inline void ahd_set_residual(struct scb *, u_long);
931 static __inline void ahd_set_sense_residual(struct scb *scb, u_long resid);
932 static __inline u_long ahd_get_residual(struct scb *);
933 static __inline u_long ahd_get_sense_residual(struct scb *);
934 static __inline int ahd_perform_autosense(struct scb *);
935 static __inline uint32_t ahd_get_sense_bufsize(struct ahd_softc *,
936 struct scb *);
937 static __inline void ahd_notify_xfer_settings_change(struct ahd_softc *,
938 struct ahd_devinfo *);
939 static __inline void ahd_platform_scb_free(struct ahd_softc *ahd,
940 struct scb *scb);
941 static __inline void ahd_freeze_scb(struct scb *scb);
942
943 static __inline
944 void ahd_cmd_set_transaction_status(Scsi_Cmnd *cmd, uint32_t status)
945 {
946 cmd->result &= ~(CAM_STATUS_MASK << 16);
947 cmd->result |= status << 16;
948 }
949
950 static __inline
951 void ahd_set_transaction_status(struct scb *scb, uint32_t status)
952 {
953 ahd_cmd_set_transaction_status(scb->io_ctx,status);
954 }
955
956 static __inline
957 void ahd_cmd_set_scsi_status(Scsi_Cmnd *cmd, uint32_t status)
958 {
959 cmd->result &= ~0xFFFF;
960 cmd->result |= status;
961 }
962
963 static __inline
964 void ahd_set_scsi_status(struct scb *scb, uint32_t status)
965 {
966 ahd_cmd_set_scsi_status(scb->io_ctx, status);
967 }
968
969 static __inline
970 uint32_t ahd_cmd_get_transaction_status(Scsi_Cmnd *cmd)
971 {
972 return ((cmd->result >> 16) & CAM_STATUS_MASK);
973 }
974
975 static __inline
976 uint32_t ahd_get_transaction_status(struct scb *scb)
977 {
978 return (ahd_cmd_get_transaction_status(scb->io_ctx));
979 }
980
981 static __inline
982 uint32_t ahd_cmd_get_scsi_status(Scsi_Cmnd *cmd)
983 {
984 return (cmd->result & 0xFFFF);
985 }
986
987 static __inline
988 uint32_t ahd_get_scsi_status(struct scb *scb)
989 {
990 return (ahd_cmd_get_scsi_status(scb->io_ctx));
991 }
992
993 static __inline
994 void ahd_set_transaction_tag(struct scb *scb, int enabled, u_int type)
995 {
996 /*
997 * Nothing to do for linux as the incoming transaction
998 * has no concept of tag/non tagged, etc.
999 */
1000 }
1001
1002 static __inline
1003 u_long ahd_get_transfer_length(struct scb *scb)
1004 {
1005 return (scb->platform_data->xfer_len);
1006 }
1007
1008 static __inline
1009 int ahd_get_transfer_dir(struct scb *scb)
1010 {
1011 return (scb->io_ctx->sc_data_direction);
1012 }
1013
1014 static __inline
1015 void ahd_set_residual(struct scb *scb, u_long resid)
1016 {
1017 scb->io_ctx->resid = resid;
1018 }
1019
1020 static __inline
1021 void ahd_set_sense_residual(struct scb *scb, u_long resid)
1022 {
1023 scb->platform_data->sense_resid = resid;
1024 }
1025
1026 static __inline
1027 u_long ahd_get_residual(struct scb *scb)
1028 {
1029 return (scb->io_ctx->resid);
1030 }
1031
1032 static __inline
1033 u_long ahd_get_sense_residual(struct scb *scb)
1034 {
1035 return (scb->platform_data->sense_resid);
1036 }
1037
1038 static __inline
1039 int ahd_perform_autosense(struct scb *scb)
1040 {
1041 /*
1042 * We always perform autosense in Linux.
1043 * On other platforms this is set on a
1044 * per-transaction basis.
1045 */
1046 return (1);
1047 }
1048
1049 static __inline uint32_t
1050 ahd_get_sense_bufsize(struct ahd_softc *ahd, struct scb *scb)
1051 {
1052 return (sizeof(struct scsi_sense_data));
1053 }
1054
1055 static __inline void
1056 ahd_notify_xfer_settings_change(struct ahd_softc *ahd,
1057 struct ahd_devinfo *devinfo)
1058 {
1059 /* Nothing to do here for linux */
1060 }
1061
1062 static __inline void
1063 ahd_platform_scb_free(struct ahd_softc *ahd, struct scb *scb)
1064 {
1065 ahd->flags &= ~AHD_RESOURCE_SHORTAGE;
1066 }
1067
1068 int ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg);
1069 void ahd_platform_free(struct ahd_softc *ahd);
1070 void ahd_platform_init(struct ahd_softc *ahd);
1071 void ahd_platform_freeze_devq(struct ahd_softc *ahd, struct scb *scb);
1072 void ahd_freeze_simq(struct ahd_softc *ahd);
1073 void ahd_release_simq(struct ahd_softc *ahd);
1074
1075 static __inline void
1076 ahd_freeze_scb(struct scb *scb)
1077 {
1078 if ((scb->io_ctx->result & (CAM_DEV_QFRZN << 16)) == 0) {
1079 scb->io_ctx->result |= CAM_DEV_QFRZN << 16;
1080 scb->platform_data->dev->qfrozen++;
1081 }
1082 }
1083
1084 void ahd_platform_set_tags(struct ahd_softc *ahd,
1085 struct ahd_devinfo *devinfo, ahd_queue_alg);
1086 int ahd_platform_abort_scbs(struct ahd_softc *ahd, int target,
1087 char channel, int lun, u_int tag,
1088 role_t role, uint32_t status);
1089 irqreturn_t
1090 ahd_linux_isr(int irq, void *dev_id, struct pt_regs * regs);
1091 void ahd_platform_flushwork(struct ahd_softc *ahd);
1092 int ahd_softc_comp(struct ahd_softc *, struct ahd_softc *);
1093 void ahd_done(struct ahd_softc*, struct scb*);
1094 void ahd_send_async(struct ahd_softc *, char channel,
1095 u_int target, u_int lun, ac_code, void *);
1096 void ahd_print_path(struct ahd_softc *, struct scb *);
1097
1098 #ifdef CONFIG_PCI
1099 #define AHD_PCI_CONFIG 1
1100 #else
1101 #define AHD_PCI_CONFIG 0
1102 #endif
1103 #define bootverbose aic79xx_verbose
1104 extern uint32_t aic79xx_verbose;
1105
1106 #endif /* _AIC79XX_LINUX_H_ */
This page took 0.104547 seconds and 5 git commands to generate.