2 * linux/drivers/s390/cio/qdio_main.c
4 * Linux for s390 qdio support, buffer handling, qdio API and module support.
6 * Copyright 2000,2008 IBM Corp.
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Jan Glauber <jang@linux.vnet.ibm.com>
9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/timer.h>
15 #include <linux/delay.h>
16 #include <linux/gfp.h>
17 #include <linux/kernel_stat.h>
18 #include <asm/atomic.h>
19 #include <asm/debug.h>
26 #include "qdio_debug.h"
28 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
29 "Jan Glauber <jang@linux.vnet.ibm.com>");
30 MODULE_DESCRIPTION("QDIO base support");
31 MODULE_LICENSE("GPL");
33 static inline int do_siga_sync(struct subchannel_id schid
,
34 unsigned int out_mask
, unsigned int in_mask
)
36 register unsigned long __fc
asm ("0") = 2;
37 register struct subchannel_id __schid
asm ("1") = schid
;
38 register unsigned long out
asm ("2") = out_mask
;
39 register unsigned long in
asm ("3") = in_mask
;
47 : "d" (__fc
), "d" (__schid
), "d" (out
), "d" (in
) : "cc");
51 static inline int do_siga_input(struct subchannel_id schid
, unsigned int mask
)
53 register unsigned long __fc
asm ("0") = 1;
54 register struct subchannel_id __schid
asm ("1") = schid
;
55 register unsigned long __mask
asm ("2") = mask
;
63 : "d" (__fc
), "d" (__schid
), "d" (__mask
) : "cc", "memory");
68 * do_siga_output - perform SIGA-w/wt function
69 * @schid: subchannel id or in case of QEBSM the subchannel token
70 * @mask: which output queues to process
71 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
72 * @fc: function code to perform
74 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
75 * Note: For IQDC unicast queues only the highest priority queue is processed.
77 static inline int do_siga_output(unsigned long schid
, unsigned long mask
,
78 unsigned int *bb
, unsigned int fc
)
80 register unsigned long __fc
asm("0") = fc
;
81 register unsigned long __schid
asm("1") = schid
;
82 register unsigned long __mask
asm("2") = mask
;
83 int cc
= QDIO_ERROR_SIGA_ACCESS_EXCEPTION
;
91 : "+d" (cc
), "+d" (__fc
), "+d" (__schid
), "+d" (__mask
)
93 *bb
= ((unsigned int) __fc
) >> 31;
97 static inline int qdio_check_ccq(struct qdio_q
*q
, unsigned int ccq
)
99 /* all done or next buffer state different */
100 if (ccq
== 0 || ccq
== 32)
102 /* not all buffers processed */
103 if (ccq
== 96 || ccq
== 97)
105 /* notify devices immediately */
106 DBF_ERROR("%4x ccq:%3d", SCH_NO(q
), ccq
);
111 * qdio_do_eqbs - extract buffer states for QEBSM
112 * @q: queue to manipulate
113 * @state: state of the extracted buffers
114 * @start: buffer number to start at
115 * @count: count of buffers to examine
116 * @auto_ack: automatically acknowledge buffers
118 * Returns the number of successfully extracted equal buffer states.
119 * Stops processing if a state is different from the last buffers state.
121 static int qdio_do_eqbs(struct qdio_q
*q
, unsigned char *state
,
122 int start
, int count
, int auto_ack
)
124 unsigned int ccq
= 0;
125 int tmp_count
= count
, tmp_start
= start
;
129 BUG_ON(!q
->irq_ptr
->sch_token
);
133 nr
+= q
->irq_ptr
->nr_input_qs
;
135 ccq
= do_eqbs(q
->irq_ptr
->sch_token
, state
, nr
, &tmp_start
, &tmp_count
,
137 rc
= qdio_check_ccq(q
, ccq
);
139 /* At least one buffer was processed, return and extract the remaining
142 if ((ccq
== 96) && (count
!= tmp_count
)) {
143 qperf_inc(q
, eqbs_partial
);
144 return (count
- tmp_count
);
148 DBF_DEV_EVENT(DBF_WARN
, q
->irq_ptr
, "EQBS again:%2d", ccq
);
153 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q
));
154 DBF_ERROR("%3d%3d%2d", count
, tmp_count
, nr
);
155 q
->handler(q
->irq_ptr
->cdev
,
156 QDIO_ERROR_ACTIVATE_CHECK_CONDITION
,
157 0, -1, -1, q
->irq_ptr
->int_parm
);
160 return count
- tmp_count
;
164 * qdio_do_sqbs - set buffer states for QEBSM
165 * @q: queue to manipulate
166 * @state: new state of the buffers
167 * @start: first buffer number to change
168 * @count: how many buffers to change
170 * Returns the number of successfully changed buffers.
171 * Does retrying until the specified count of buffer states is set or an
174 static int qdio_do_sqbs(struct qdio_q
*q
, unsigned char state
, int start
,
177 unsigned int ccq
= 0;
178 int tmp_count
= count
, tmp_start
= start
;
185 BUG_ON(!q
->irq_ptr
->sch_token
);
189 nr
+= q
->irq_ptr
->nr_input_qs
;
191 ccq
= do_sqbs(q
->irq_ptr
->sch_token
, state
, nr
, &tmp_start
, &tmp_count
);
192 rc
= qdio_check_ccq(q
, ccq
);
194 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "SQBS again:%2d", ccq
);
195 qperf_inc(q
, sqbs_partial
);
199 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q
));
200 DBF_ERROR("%3d%3d%2d", count
, tmp_count
, nr
);
201 q
->handler(q
->irq_ptr
->cdev
,
202 QDIO_ERROR_ACTIVATE_CHECK_CONDITION
,
203 0, -1, -1, q
->irq_ptr
->int_parm
);
207 return count
- tmp_count
;
210 /* returns number of examined buffers and their common state in *state */
211 static inline int get_buf_states(struct qdio_q
*q
, unsigned int bufnr
,
212 unsigned char *state
, unsigned int count
,
215 unsigned char __state
= 0;
218 BUG_ON(bufnr
> QDIO_MAX_BUFFERS_MASK
);
219 BUG_ON(count
> QDIO_MAX_BUFFERS_PER_Q
);
222 return qdio_do_eqbs(q
, state
, bufnr
, count
, auto_ack
);
224 for (i
= 0; i
< count
; i
++) {
226 __state
= q
->slsb
.val
[bufnr
];
227 else if (q
->slsb
.val
[bufnr
] != __state
)
229 bufnr
= next_buf(bufnr
);
235 static inline int get_buf_state(struct qdio_q
*q
, unsigned int bufnr
,
236 unsigned char *state
, int auto_ack
)
238 return get_buf_states(q
, bufnr
, state
, 1, auto_ack
);
241 /* wrap-around safe setting of slsb states, returns number of changed buffers */
242 static inline int set_buf_states(struct qdio_q
*q
, int bufnr
,
243 unsigned char state
, int count
)
247 BUG_ON(bufnr
> QDIO_MAX_BUFFERS_MASK
);
248 BUG_ON(count
> QDIO_MAX_BUFFERS_PER_Q
);
251 return qdio_do_sqbs(q
, state
, bufnr
, count
);
253 for (i
= 0; i
< count
; i
++) {
254 xchg(&q
->slsb
.val
[bufnr
], state
);
255 bufnr
= next_buf(bufnr
);
260 static inline int set_buf_state(struct qdio_q
*q
, int bufnr
,
263 return set_buf_states(q
, bufnr
, state
, 1);
266 /* set slsb states to initial state */
267 void qdio_init_buf_states(struct qdio_irq
*irq_ptr
)
272 for_each_input_queue(irq_ptr
, q
, i
)
273 set_buf_states(q
, 0, SLSB_P_INPUT_NOT_INIT
,
274 QDIO_MAX_BUFFERS_PER_Q
);
275 for_each_output_queue(irq_ptr
, q
, i
)
276 set_buf_states(q
, 0, SLSB_P_OUTPUT_NOT_INIT
,
277 QDIO_MAX_BUFFERS_PER_Q
);
280 static inline int qdio_siga_sync(struct qdio_q
*q
, unsigned int output
,
285 if (!need_siga_sync(q
))
288 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "siga-s:%1d", q
->nr
);
289 qperf_inc(q
, siga_sync
);
291 cc
= do_siga_sync(q
->irq_ptr
->schid
, output
, input
);
293 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q
), cc
);
297 static inline int qdio_siga_sync_q(struct qdio_q
*q
)
300 return qdio_siga_sync(q
, 0, q
->mask
);
302 return qdio_siga_sync(q
, q
->mask
, 0);
305 static inline int qdio_siga_sync_out(struct qdio_q
*q
)
307 return qdio_siga_sync(q
, ~0U, 0);
310 static inline int qdio_siga_sync_all(struct qdio_q
*q
)
312 return qdio_siga_sync(q
, ~0U, ~0U);
315 static int qdio_siga_output(struct qdio_q
*q
, unsigned int *busy_bit
)
322 if (q
->u
.out
.use_enh_siga
)
326 schid
= q
->irq_ptr
->sch_token
;
330 schid
= *((u32
*)&q
->irq_ptr
->schid
);
333 cc
= do_siga_output(schid
, q
->mask
, busy_bit
, fc
);
335 /* hipersocket busy condition */
337 WARN_ON(queue_type(q
) != QDIO_IQDIO_QFMT
|| cc
!= 2);
340 start_time
= get_clock();
343 if ((get_clock() - start_time
) < QDIO_BUSY_BIT_PATIENCE
)
349 static inline int qdio_siga_input(struct qdio_q
*q
)
353 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "siga-r:%1d", q
->nr
);
354 qperf_inc(q
, siga_read
);
356 cc
= do_siga_input(q
->irq_ptr
->schid
, q
->mask
);
358 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q
), cc
);
362 static inline void qdio_sync_after_thinint(struct qdio_q
*q
)
364 if (pci_out_supported(q
)) {
365 if (need_siga_sync_thinint(q
))
366 qdio_siga_sync_all(q
);
367 else if (need_siga_sync_out_thinint(q
))
368 qdio_siga_sync_out(q
);
373 int debug_get_buf_state(struct qdio_q
*q
, unsigned int bufnr
,
374 unsigned char *state
)
377 return get_buf_states(q
, bufnr
, state
, 1, 0);
380 static inline void qdio_stop_polling(struct qdio_q
*q
)
382 if (!q
->u
.in
.polling
)
386 qperf_inc(q
, stop_polling
);
388 /* show the card that we are not polling anymore */
390 set_buf_states(q
, q
->u
.in
.ack_start
, SLSB_P_INPUT_NOT_INIT
,
392 q
->u
.in
.ack_count
= 0;
394 set_buf_state(q
, q
->u
.in
.ack_start
, SLSB_P_INPUT_NOT_INIT
);
397 static inline void account_sbals(struct qdio_q
*q
, int count
)
401 q
->q_stats
.nr_sbal_total
+= count
;
402 if (count
== QDIO_MAX_BUFFERS_MASK
) {
403 q
->q_stats
.nr_sbals
[7]++;
408 q
->q_stats
.nr_sbals
[pos
]++;
411 static void announce_buffer_error(struct qdio_q
*q
, int count
)
413 q
->qdio_error
|= QDIO_ERROR_SLSB_STATE
;
415 /* special handling for no target buffer empty */
416 if ((!q
->is_input_q
&&
417 (q
->sbal
[q
->first_to_check
]->element
[15].flags
& 0xff) == 0x10)) {
418 qperf_inc(q
, target_full
);
419 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "OUTFULL FTC:%02x",
424 DBF_ERROR("%4x BUF ERROR", SCH_NO(q
));
425 DBF_ERROR((q
->is_input_q
) ? "IN:%2d" : "OUT:%2d", q
->nr
);
426 DBF_ERROR("FTC:%3d C:%3d", q
->first_to_check
, count
);
427 DBF_ERROR("F14:%2x F15:%2x",
428 q
->sbal
[q
->first_to_check
]->element
[14].flags
& 0xff,
429 q
->sbal
[q
->first_to_check
]->element
[15].flags
& 0xff);
432 static inline void inbound_primed(struct qdio_q
*q
, int count
)
436 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "in prim: %02x", count
);
438 /* for QEBSM the ACK was already set by EQBS */
440 if (!q
->u
.in
.polling
) {
442 q
->u
.in
.ack_count
= count
;
443 q
->u
.in
.ack_start
= q
->first_to_check
;
447 /* delete the previous ACK's */
448 set_buf_states(q
, q
->u
.in
.ack_start
, SLSB_P_INPUT_NOT_INIT
,
450 q
->u
.in
.ack_count
= count
;
451 q
->u
.in
.ack_start
= q
->first_to_check
;
456 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
457 * or by the next inbound run.
459 new = add_buf(q
->first_to_check
, count
- 1);
460 if (q
->u
.in
.polling
) {
461 /* reset the previous ACK but first set the new one */
462 set_buf_state(q
, new, SLSB_P_INPUT_ACK
);
463 set_buf_state(q
, q
->u
.in
.ack_start
, SLSB_P_INPUT_NOT_INIT
);
466 set_buf_state(q
, new, SLSB_P_INPUT_ACK
);
469 q
->u
.in
.ack_start
= new;
473 /* need to change ALL buffers to get more interrupts */
474 set_buf_states(q
, q
->first_to_check
, SLSB_P_INPUT_NOT_INIT
, count
);
477 static int get_inbound_buffer_frontier(struct qdio_q
*q
)
483 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
486 count
= min(atomic_read(&q
->nr_buf_used
), QDIO_MAX_BUFFERS_MASK
);
487 stop
= add_buf(q
->first_to_check
, count
);
489 if (q
->first_to_check
== stop
)
493 * No siga sync here, as a PCI or we after a thin interrupt
494 * already sync'ed the queues.
496 count
= get_buf_states(q
, q
->first_to_check
, &state
, count
, 1);
501 case SLSB_P_INPUT_PRIMED
:
502 inbound_primed(q
, count
);
503 q
->first_to_check
= add_buf(q
->first_to_check
, count
);
504 if (atomic_sub(count
, &q
->nr_buf_used
) == 0)
505 qperf_inc(q
, inbound_queue_full
);
506 if (q
->irq_ptr
->perf_stat_enabled
)
507 account_sbals(q
, count
);
509 case SLSB_P_INPUT_ERROR
:
510 announce_buffer_error(q
, count
);
511 /* process the buffer, the upper layer will take care of it */
512 q
->first_to_check
= add_buf(q
->first_to_check
, count
);
513 atomic_sub(count
, &q
->nr_buf_used
);
514 if (q
->irq_ptr
->perf_stat_enabled
)
515 account_sbals_error(q
, count
);
517 case SLSB_CU_INPUT_EMPTY
:
518 case SLSB_P_INPUT_NOT_INIT
:
519 case SLSB_P_INPUT_ACK
:
520 if (q
->irq_ptr
->perf_stat_enabled
)
521 q
->q_stats
.nr_sbal_nop
++;
522 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "in nop");
528 return q
->first_to_check
;
531 static int qdio_inbound_q_moved(struct qdio_q
*q
)
535 bufnr
= get_inbound_buffer_frontier(q
);
537 if ((bufnr
!= q
->last_move
) || q
->qdio_error
) {
538 q
->last_move
= bufnr
;
539 if (!is_thinint_irq(q
->irq_ptr
) && MACHINE_IS_LPAR
)
540 q
->u
.in
.timestamp
= get_clock();
546 static inline int qdio_inbound_q_done(struct qdio_q
*q
)
548 unsigned char state
= 0;
550 if (!atomic_read(&q
->nr_buf_used
))
554 get_buf_state(q
, q
->first_to_check
, &state
, 0);
556 if (state
== SLSB_P_INPUT_PRIMED
|| state
== SLSB_P_INPUT_ERROR
)
557 /* more work coming */
560 if (is_thinint_irq(q
->irq_ptr
))
563 /* don't poll under z/VM */
568 * At this point we know, that inbound first_to_check
569 * has (probably) not moved (see qdio_inbound_processing).
571 if (get_clock() > q
->u
.in
.timestamp
+ QDIO_INPUT_THRESHOLD
) {
572 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "in done:%02x",
579 static void qdio_kick_handler(struct qdio_q
*q
)
581 int start
= q
->first_to_kick
;
582 int end
= q
->first_to_check
;
585 if (unlikely(q
->irq_ptr
->state
!= QDIO_IRQ_STATE_ACTIVE
))
588 count
= sub_buf(end
, start
);
591 qperf_inc(q
, inbound_handler
);
592 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "kih s:%02x c:%02x", start
, count
);
594 qperf_inc(q
, outbound_handler
);
595 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "koh: s:%02x c:%02x",
599 q
->handler(q
->irq_ptr
->cdev
, q
->qdio_error
, q
->nr
, start
, count
,
600 q
->irq_ptr
->int_parm
);
602 /* for the next time */
603 q
->first_to_kick
= end
;
607 static void __qdio_inbound_processing(struct qdio_q
*q
)
609 qperf_inc(q
, tasklet_inbound
);
611 if (!qdio_inbound_q_moved(q
))
614 qdio_kick_handler(q
);
616 if (!qdio_inbound_q_done(q
)) {
617 /* means poll time is not yet over */
618 qperf_inc(q
, tasklet_inbound_resched
);
619 if (likely(q
->irq_ptr
->state
!= QDIO_IRQ_STATE_STOPPED
)) {
620 tasklet_schedule(&q
->tasklet
);
625 qdio_stop_polling(q
);
627 * We need to check again to not lose initiative after
628 * resetting the ACK state.
630 if (!qdio_inbound_q_done(q
)) {
631 qperf_inc(q
, tasklet_inbound_resched2
);
632 if (likely(q
->irq_ptr
->state
!= QDIO_IRQ_STATE_STOPPED
))
633 tasklet_schedule(&q
->tasklet
);
637 void qdio_inbound_processing(unsigned long data
)
639 struct qdio_q
*q
= (struct qdio_q
*)data
;
640 __qdio_inbound_processing(q
);
643 static int get_outbound_buffer_frontier(struct qdio_q
*q
)
648 if (((queue_type(q
) != QDIO_IQDIO_QFMT
) && !pci_out_supported(q
)) ||
649 (queue_type(q
) == QDIO_IQDIO_QFMT
&& multicast_outbound(q
)))
653 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
656 count
= min(atomic_read(&q
->nr_buf_used
), QDIO_MAX_BUFFERS_MASK
);
657 stop
= add_buf(q
->first_to_check
, count
);
659 if (q
->first_to_check
== stop
)
660 return q
->first_to_check
;
662 count
= get_buf_states(q
, q
->first_to_check
, &state
, count
, 0);
664 return q
->first_to_check
;
667 case SLSB_P_OUTPUT_EMPTY
:
668 /* the adapter got it */
669 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "out empty:%1d %02x", q
->nr
, count
);
671 atomic_sub(count
, &q
->nr_buf_used
);
672 q
->first_to_check
= add_buf(q
->first_to_check
, count
);
673 if (q
->irq_ptr
->perf_stat_enabled
)
674 account_sbals(q
, count
);
676 case SLSB_P_OUTPUT_ERROR
:
677 announce_buffer_error(q
, count
);
678 /* process the buffer, the upper layer will take care of it */
679 q
->first_to_check
= add_buf(q
->first_to_check
, count
);
680 atomic_sub(count
, &q
->nr_buf_used
);
681 if (q
->irq_ptr
->perf_stat_enabled
)
682 account_sbals_error(q
, count
);
684 case SLSB_CU_OUTPUT_PRIMED
:
685 /* the adapter has not fetched the output yet */
686 if (q
->irq_ptr
->perf_stat_enabled
)
687 q
->q_stats
.nr_sbal_nop
++;
688 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "out primed:%1d", q
->nr
);
690 case SLSB_P_OUTPUT_NOT_INIT
:
691 case SLSB_P_OUTPUT_HALTED
:
696 return q
->first_to_check
;
699 /* all buffers processed? */
700 static inline int qdio_outbound_q_done(struct qdio_q
*q
)
702 return atomic_read(&q
->nr_buf_used
) == 0;
705 static inline int qdio_outbound_q_moved(struct qdio_q
*q
)
709 bufnr
= get_outbound_buffer_frontier(q
);
711 if ((bufnr
!= q
->last_move
) || q
->qdio_error
) {
712 q
->last_move
= bufnr
;
713 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "out moved:%1d", q
->nr
);
719 static int qdio_kick_outbound_q(struct qdio_q
*q
)
721 unsigned int busy_bit
;
724 if (!need_siga_out(q
))
727 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "siga-w:%1d", q
->nr
);
728 qperf_inc(q
, siga_write
);
730 cc
= qdio_siga_output(q
, &busy_bit
);
736 DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q
), q
->nr
);
737 cc
|= QDIO_ERROR_SIGA_BUSY
;
739 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "siga-w cc2:%1d", q
->nr
);
743 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q
), cc
);
749 static void __qdio_outbound_processing(struct qdio_q
*q
)
751 qperf_inc(q
, tasklet_outbound
);
752 BUG_ON(atomic_read(&q
->nr_buf_used
) < 0);
754 if (qdio_outbound_q_moved(q
))
755 qdio_kick_handler(q
);
757 if (queue_type(q
) == QDIO_ZFCP_QFMT
)
758 if (!pci_out_supported(q
) && !qdio_outbound_q_done(q
))
761 /* bail out for HiperSockets unicast queues */
762 if (queue_type(q
) == QDIO_IQDIO_QFMT
&& !multicast_outbound(q
))
765 if ((queue_type(q
) == QDIO_IQDIO_QFMT
) &&
766 (atomic_read(&q
->nr_buf_used
)) > QDIO_IQDIO_POLL_LVL
)
769 if (q
->u
.out
.pci_out_enabled
)
773 * Now we know that queue type is either qeth without pci enabled
774 * or HiperSockets multicast. Make sure buffer switch from PRIMED to
775 * EMPTY is noticed and outbound_handler is called after some time.
777 if (qdio_outbound_q_done(q
))
778 del_timer(&q
->u
.out
.timer
);
780 if (!timer_pending(&q
->u
.out
.timer
))
781 mod_timer(&q
->u
.out
.timer
, jiffies
+ 10 * HZ
);
785 if (unlikely(q
->irq_ptr
->state
== QDIO_IRQ_STATE_STOPPED
))
787 tasklet_schedule(&q
->tasklet
);
790 /* outbound tasklet */
791 void qdio_outbound_processing(unsigned long data
)
793 struct qdio_q
*q
= (struct qdio_q
*)data
;
794 __qdio_outbound_processing(q
);
797 void qdio_outbound_timer(unsigned long data
)
799 struct qdio_q
*q
= (struct qdio_q
*)data
;
801 if (unlikely(q
->irq_ptr
->state
== QDIO_IRQ_STATE_STOPPED
))
803 tasklet_schedule(&q
->tasklet
);
806 static inline void qdio_check_outbound_after_thinint(struct qdio_q
*q
)
811 if (!pci_out_supported(q
))
814 for_each_output_queue(q
->irq_ptr
, out
, i
)
815 if (!qdio_outbound_q_done(out
))
816 tasklet_schedule(&out
->tasklet
);
819 static void __tiqdio_inbound_processing(struct qdio_q
*q
)
821 qperf_inc(q
, tasklet_inbound
);
822 qdio_sync_after_thinint(q
);
825 * The interrupt could be caused by a PCI request. Check the
826 * PCI capable outbound queues.
828 qdio_check_outbound_after_thinint(q
);
830 if (!qdio_inbound_q_moved(q
))
833 qdio_kick_handler(q
);
835 if (!qdio_inbound_q_done(q
)) {
836 qperf_inc(q
, tasklet_inbound_resched
);
837 if (likely(q
->irq_ptr
->state
!= QDIO_IRQ_STATE_STOPPED
)) {
838 tasklet_schedule(&q
->tasklet
);
843 qdio_stop_polling(q
);
845 * We need to check again to not lose initiative after
846 * resetting the ACK state.
848 if (!qdio_inbound_q_done(q
)) {
849 qperf_inc(q
, tasklet_inbound_resched2
);
850 if (likely(q
->irq_ptr
->state
!= QDIO_IRQ_STATE_STOPPED
))
851 tasklet_schedule(&q
->tasklet
);
855 void tiqdio_inbound_processing(unsigned long data
)
857 struct qdio_q
*q
= (struct qdio_q
*)data
;
858 __tiqdio_inbound_processing(q
);
861 static inline void qdio_set_state(struct qdio_irq
*irq_ptr
,
862 enum qdio_irq_states state
)
864 DBF_DEV_EVENT(DBF_INFO
, irq_ptr
, "newstate: %1d", state
);
866 irq_ptr
->state
= state
;
870 static void qdio_irq_check_sense(struct qdio_irq
*irq_ptr
, struct irb
*irb
)
872 if (irb
->esw
.esw0
.erw
.cons
) {
873 DBF_ERROR("%4x sense:", irq_ptr
->schid
.sch_no
);
874 DBF_ERROR_HEX(irb
, 64);
875 DBF_ERROR_HEX(irb
->ecw
, 64);
879 /* PCI interrupt handler */
880 static void qdio_int_handler_pci(struct qdio_irq
*irq_ptr
)
885 if (unlikely(irq_ptr
->state
== QDIO_IRQ_STATE_STOPPED
))
888 for_each_input_queue(irq_ptr
, q
, i
) {
889 if (q
->u
.in
.queue_start_poll
) {
890 /* skip if polling is enabled or already in work */
891 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED
,
892 &q
->u
.in
.queue_irq_state
)) {
893 qperf_inc(q
, int_discarded
);
896 q
->u
.in
.queue_start_poll(q
->irq_ptr
->cdev
, q
->nr
,
897 q
->irq_ptr
->int_parm
);
899 tasklet_schedule(&q
->tasklet
);
902 if (!(irq_ptr
->qib
.ac
& QIB_AC_OUTBOUND_PCI_SUPPORTED
))
905 for_each_output_queue(irq_ptr
, q
, i
) {
906 if (qdio_outbound_q_done(q
))
909 if (!siga_syncs_out_pci(q
))
912 tasklet_schedule(&q
->tasklet
);
916 static void qdio_handle_activate_check(struct ccw_device
*cdev
,
917 unsigned long intparm
, int cstat
, int dstat
)
919 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
922 DBF_ERROR("%4x ACT CHECK", irq_ptr
->schid
.sch_no
);
923 DBF_ERROR("intp :%lx", intparm
);
924 DBF_ERROR("ds: %2x cs:%2x", dstat
, cstat
);
926 if (irq_ptr
->nr_input_qs
) {
927 q
= irq_ptr
->input_qs
[0];
928 } else if (irq_ptr
->nr_output_qs
) {
929 q
= irq_ptr
->output_qs
[0];
934 q
->handler(q
->irq_ptr
->cdev
, QDIO_ERROR_ACTIVATE_CHECK_CONDITION
,
935 0, -1, -1, irq_ptr
->int_parm
);
937 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_STOPPED
);
940 static void qdio_establish_handle_irq(struct ccw_device
*cdev
, int cstat
,
943 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
945 DBF_DEV_EVENT(DBF_INFO
, irq_ptr
, "qest irq");
949 if (dstat
& ~(DEV_STAT_DEV_END
| DEV_STAT_CHN_END
))
951 if (!(dstat
& DEV_STAT_DEV_END
))
953 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_ESTABLISHED
);
957 DBF_ERROR("%4x EQ:error", irq_ptr
->schid
.sch_no
);
958 DBF_ERROR("ds: %2x cs:%2x", dstat
, cstat
);
959 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_ERR
);
962 /* qdio interrupt handler */
963 void qdio_int_handler(struct ccw_device
*cdev
, unsigned long intparm
,
966 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
969 if (!intparm
|| !irq_ptr
) {
970 DBF_ERROR("qint:%4x", cdev
->private->schid
.sch_no
);
974 kstat_cpu(smp_processor_id()).irqs
[IOINT_QDI
]++;
975 if (irq_ptr
->perf_stat_enabled
)
976 irq_ptr
->perf_stat
.qdio_int
++;
979 switch (PTR_ERR(irb
)) {
981 DBF_ERROR("%4x IO error", irq_ptr
->schid
.sch_no
);
982 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_ERR
);
983 wake_up(&cdev
->private->wait_q
);
990 qdio_irq_check_sense(irq_ptr
, irb
);
991 cstat
= irb
->scsw
.cmd
.cstat
;
992 dstat
= irb
->scsw
.cmd
.dstat
;
994 switch (irq_ptr
->state
) {
995 case QDIO_IRQ_STATE_INACTIVE
:
996 qdio_establish_handle_irq(cdev
, cstat
, dstat
);
998 case QDIO_IRQ_STATE_CLEANUP
:
999 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_INACTIVE
);
1001 case QDIO_IRQ_STATE_ESTABLISHED
:
1002 case QDIO_IRQ_STATE_ACTIVE
:
1003 if (cstat
& SCHN_STAT_PCI
) {
1004 qdio_int_handler_pci(irq_ptr
);
1008 qdio_handle_activate_check(cdev
, intparm
, cstat
,
1011 case QDIO_IRQ_STATE_STOPPED
:
1016 wake_up(&cdev
->private->wait_q
);
1020 * qdio_get_ssqd_desc - get qdio subchannel description
1021 * @cdev: ccw device to get description for
1022 * @data: where to store the ssqd
1024 * Returns 0 or an error code. The results of the chsc are stored in the
1025 * specified structure.
1027 int qdio_get_ssqd_desc(struct ccw_device
*cdev
,
1028 struct qdio_ssqd_desc
*data
)
1031 if (!cdev
|| !cdev
->private)
1034 DBF_EVENT("get ssqd:%4x", cdev
->private->schid
.sch_no
);
1035 return qdio_setup_get_ssqd(NULL
, &cdev
->private->schid
, data
);
1037 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc
);
1039 static void qdio_shutdown_queues(struct ccw_device
*cdev
)
1041 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1045 for_each_input_queue(irq_ptr
, q
, i
)
1046 tasklet_kill(&q
->tasklet
);
1048 for_each_output_queue(irq_ptr
, q
, i
) {
1049 del_timer(&q
->u
.out
.timer
);
1050 tasklet_kill(&q
->tasklet
);
1055 * qdio_shutdown - shut down a qdio subchannel
1056 * @cdev: associated ccw device
1057 * @how: use halt or clear to shutdown
1059 int qdio_shutdown(struct ccw_device
*cdev
, int how
)
1061 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1063 unsigned long flags
;
1068 BUG_ON(irqs_disabled());
1069 DBF_EVENT("qshutdown:%4x", cdev
->private->schid
.sch_no
);
1071 mutex_lock(&irq_ptr
->setup_mutex
);
1073 * Subchannel was already shot down. We cannot prevent being called
1074 * twice since cio may trigger a shutdown asynchronously.
1076 if (irq_ptr
->state
== QDIO_IRQ_STATE_INACTIVE
) {
1077 mutex_unlock(&irq_ptr
->setup_mutex
);
1082 * Indicate that the device is going down. Scheduling the queue
1083 * tasklets is forbidden from here on.
1085 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_STOPPED
);
1087 tiqdio_remove_input_queues(irq_ptr
);
1088 qdio_shutdown_queues(cdev
);
1089 qdio_shutdown_debug_entries(irq_ptr
, cdev
);
1091 /* cleanup subchannel */
1092 spin_lock_irqsave(get_ccwdev_lock(cdev
), flags
);
1094 if (how
& QDIO_FLAG_CLEANUP_USING_CLEAR
)
1095 rc
= ccw_device_clear(cdev
, QDIO_DOING_CLEANUP
);
1097 /* default behaviour is halt */
1098 rc
= ccw_device_halt(cdev
, QDIO_DOING_CLEANUP
);
1100 DBF_ERROR("%4x SHUTD ERR", irq_ptr
->schid
.sch_no
);
1101 DBF_ERROR("rc:%4d", rc
);
1105 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_CLEANUP
);
1106 spin_unlock_irqrestore(get_ccwdev_lock(cdev
), flags
);
1107 wait_event_interruptible_timeout(cdev
->private->wait_q
,
1108 irq_ptr
->state
== QDIO_IRQ_STATE_INACTIVE
||
1109 irq_ptr
->state
== QDIO_IRQ_STATE_ERR
,
1111 spin_lock_irqsave(get_ccwdev_lock(cdev
), flags
);
1114 qdio_shutdown_thinint(irq_ptr
);
1116 /* restore interrupt handler */
1117 if ((void *)cdev
->handler
== (void *)qdio_int_handler
)
1118 cdev
->handler
= irq_ptr
->orig_handler
;
1119 spin_unlock_irqrestore(get_ccwdev_lock(cdev
), flags
);
1121 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_INACTIVE
);
1122 mutex_unlock(&irq_ptr
->setup_mutex
);
1127 EXPORT_SYMBOL_GPL(qdio_shutdown
);
1130 * qdio_free - free data structures for a qdio subchannel
1131 * @cdev: associated ccw device
1133 int qdio_free(struct ccw_device
*cdev
)
1135 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1140 DBF_EVENT("qfree:%4x", cdev
->private->schid
.sch_no
);
1141 mutex_lock(&irq_ptr
->setup_mutex
);
1143 if (irq_ptr
->debug_area
!= NULL
) {
1144 debug_unregister(irq_ptr
->debug_area
);
1145 irq_ptr
->debug_area
= NULL
;
1147 cdev
->private->qdio_data
= NULL
;
1148 mutex_unlock(&irq_ptr
->setup_mutex
);
1150 qdio_release_memory(irq_ptr
);
1153 EXPORT_SYMBOL_GPL(qdio_free
);
1156 * qdio_allocate - allocate qdio queues and associated data
1157 * @init_data: initialization data
1159 int qdio_allocate(struct qdio_initialize
*init_data
)
1161 struct qdio_irq
*irq_ptr
;
1163 DBF_EVENT("qallocate:%4x", init_data
->cdev
->private->schid
.sch_no
);
1165 if ((init_data
->no_input_qs
&& !init_data
->input_handler
) ||
1166 (init_data
->no_output_qs
&& !init_data
->output_handler
))
1169 if ((init_data
->no_input_qs
> QDIO_MAX_QUEUES_PER_IRQ
) ||
1170 (init_data
->no_output_qs
> QDIO_MAX_QUEUES_PER_IRQ
))
1173 if ((!init_data
->input_sbal_addr_array
) ||
1174 (!init_data
->output_sbal_addr_array
))
1177 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1178 irq_ptr
= (void *) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
1182 mutex_init(&irq_ptr
->setup_mutex
);
1183 qdio_allocate_dbf(init_data
, irq_ptr
);
1186 * Allocate a page for the chsc calls in qdio_establish.
1187 * Must be pre-allocated since a zfcp recovery will call
1188 * qdio_establish. In case of low memory and swap on a zfcp disk
1189 * we may not be able to allocate memory otherwise.
1191 irq_ptr
->chsc_page
= get_zeroed_page(GFP_KERNEL
);
1192 if (!irq_ptr
->chsc_page
)
1195 /* qdr is used in ccw1.cda which is u32 */
1196 irq_ptr
->qdr
= (struct qdr
*) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
1199 WARN_ON((unsigned long)irq_ptr
->qdr
& 0xfff);
1201 if (qdio_allocate_qs(irq_ptr
, init_data
->no_input_qs
,
1202 init_data
->no_output_qs
))
1205 init_data
->cdev
->private->qdio_data
= irq_ptr
;
1206 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_INACTIVE
);
1209 qdio_release_memory(irq_ptr
);
1213 EXPORT_SYMBOL_GPL(qdio_allocate
);
1216 * qdio_establish - establish queues on a qdio subchannel
1217 * @init_data: initialization data
1219 int qdio_establish(struct qdio_initialize
*init_data
)
1221 struct qdio_irq
*irq_ptr
;
1222 struct ccw_device
*cdev
= init_data
->cdev
;
1223 unsigned long saveflags
;
1226 DBF_EVENT("qestablish:%4x", cdev
->private->schid
.sch_no
);
1228 irq_ptr
= cdev
->private->qdio_data
;
1232 if (cdev
->private->state
!= DEV_STATE_ONLINE
)
1235 mutex_lock(&irq_ptr
->setup_mutex
);
1236 qdio_setup_irq(init_data
);
1238 rc
= qdio_establish_thinint(irq_ptr
);
1240 mutex_unlock(&irq_ptr
->setup_mutex
);
1241 qdio_shutdown(cdev
, QDIO_FLAG_CLEANUP_USING_CLEAR
);
1246 irq_ptr
->ccw
.cmd_code
= irq_ptr
->equeue
.cmd
;
1247 irq_ptr
->ccw
.flags
= CCW_FLAG_SLI
;
1248 irq_ptr
->ccw
.count
= irq_ptr
->equeue
.count
;
1249 irq_ptr
->ccw
.cda
= (u32
)((addr_t
)irq_ptr
->qdr
);
1251 spin_lock_irqsave(get_ccwdev_lock(cdev
), saveflags
);
1252 ccw_device_set_options_mask(cdev
, 0);
1254 rc
= ccw_device_start(cdev
, &irq_ptr
->ccw
, QDIO_DOING_ESTABLISH
, 0, 0);
1256 DBF_ERROR("%4x est IO ERR", irq_ptr
->schid
.sch_no
);
1257 DBF_ERROR("rc:%4x", rc
);
1259 spin_unlock_irqrestore(get_ccwdev_lock(cdev
), saveflags
);
1262 mutex_unlock(&irq_ptr
->setup_mutex
);
1263 qdio_shutdown(cdev
, QDIO_FLAG_CLEANUP_USING_CLEAR
);
1267 wait_event_interruptible_timeout(cdev
->private->wait_q
,
1268 irq_ptr
->state
== QDIO_IRQ_STATE_ESTABLISHED
||
1269 irq_ptr
->state
== QDIO_IRQ_STATE_ERR
, HZ
);
1271 if (irq_ptr
->state
!= QDIO_IRQ_STATE_ESTABLISHED
) {
1272 mutex_unlock(&irq_ptr
->setup_mutex
);
1273 qdio_shutdown(cdev
, QDIO_FLAG_CLEANUP_USING_CLEAR
);
1277 qdio_setup_ssqd_info(irq_ptr
);
1278 DBF_EVENT("qDmmwc:%2x", irq_ptr
->ssqd_desc
.mmwc
);
1279 DBF_EVENT("qib ac:%4x", irq_ptr
->qib
.ac
);
1281 /* qebsm is now setup if available, initialize buffer states */
1282 qdio_init_buf_states(irq_ptr
);
1284 mutex_unlock(&irq_ptr
->setup_mutex
);
1285 qdio_print_subchannel_info(irq_ptr
, cdev
);
1286 qdio_setup_debug_entries(irq_ptr
, cdev
);
1289 EXPORT_SYMBOL_GPL(qdio_establish
);
1292 * qdio_activate - activate queues on a qdio subchannel
1293 * @cdev: associated cdev
1295 int qdio_activate(struct ccw_device
*cdev
)
1297 struct qdio_irq
*irq_ptr
;
1299 unsigned long saveflags
;
1301 DBF_EVENT("qactivate:%4x", cdev
->private->schid
.sch_no
);
1303 irq_ptr
= cdev
->private->qdio_data
;
1307 if (cdev
->private->state
!= DEV_STATE_ONLINE
)
1310 mutex_lock(&irq_ptr
->setup_mutex
);
1311 if (irq_ptr
->state
== QDIO_IRQ_STATE_INACTIVE
) {
1316 irq_ptr
->ccw
.cmd_code
= irq_ptr
->aqueue
.cmd
;
1317 irq_ptr
->ccw
.flags
= CCW_FLAG_SLI
;
1318 irq_ptr
->ccw
.count
= irq_ptr
->aqueue
.count
;
1319 irq_ptr
->ccw
.cda
= 0;
1321 spin_lock_irqsave(get_ccwdev_lock(cdev
), saveflags
);
1322 ccw_device_set_options(cdev
, CCWDEV_REPORT_ALL
);
1324 rc
= ccw_device_start(cdev
, &irq_ptr
->ccw
, QDIO_DOING_ACTIVATE
,
1325 0, DOIO_DENY_PREFETCH
);
1327 DBF_ERROR("%4x act IO ERR", irq_ptr
->schid
.sch_no
);
1328 DBF_ERROR("rc:%4x", rc
);
1330 spin_unlock_irqrestore(get_ccwdev_lock(cdev
), saveflags
);
1335 if (is_thinint_irq(irq_ptr
))
1336 tiqdio_add_input_queues(irq_ptr
);
1338 /* wait for subchannel to become active */
1341 switch (irq_ptr
->state
) {
1342 case QDIO_IRQ_STATE_STOPPED
:
1343 case QDIO_IRQ_STATE_ERR
:
1347 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_ACTIVE
);
1351 mutex_unlock(&irq_ptr
->setup_mutex
);
1354 EXPORT_SYMBOL_GPL(qdio_activate
);
1356 static inline int buf_in_between(int bufnr
, int start
, int count
)
1358 int end
= add_buf(start
, count
);
1361 if (bufnr
>= start
&& bufnr
< end
)
1367 /* wrap-around case */
1368 if ((bufnr
>= start
&& bufnr
<= QDIO_MAX_BUFFERS_PER_Q
) ||
1376 * handle_inbound - reset processed input buffers
1377 * @q: queue containing the buffers
1379 * @bufnr: first buffer to process
1380 * @count: how many buffers are emptied
1382 static int handle_inbound(struct qdio_q
*q
, unsigned int callflags
,
1383 int bufnr
, int count
)
1387 qperf_inc(q
, inbound_call
);
1389 if (!q
->u
.in
.polling
)
1392 /* protect against stop polling setting an ACK for an emptied slsb */
1393 if (count
== QDIO_MAX_BUFFERS_PER_Q
) {
1394 /* overwriting everything, just delete polling status */
1395 q
->u
.in
.polling
= 0;
1396 q
->u
.in
.ack_count
= 0;
1398 } else if (buf_in_between(q
->u
.in
.ack_start
, bufnr
, count
)) {
1400 /* partial overwrite, just update ack_start */
1401 diff
= add_buf(bufnr
, count
);
1402 diff
= sub_buf(diff
, q
->u
.in
.ack_start
);
1403 q
->u
.in
.ack_count
-= diff
;
1404 if (q
->u
.in
.ack_count
<= 0) {
1405 q
->u
.in
.polling
= 0;
1406 q
->u
.in
.ack_count
= 0;
1409 q
->u
.in
.ack_start
= add_buf(q
->u
.in
.ack_start
, diff
);
1412 /* the only ACK will be deleted, so stop polling */
1413 q
->u
.in
.polling
= 0;
1417 count
= set_buf_states(q
, bufnr
, SLSB_CU_INPUT_EMPTY
, count
);
1419 used
= atomic_add_return(count
, &q
->nr_buf_used
) - count
;
1420 BUG_ON(used
+ count
> QDIO_MAX_BUFFERS_PER_Q
);
1422 /* no need to signal as long as the adapter had free buffers */
1426 if (need_siga_in(q
))
1427 return qdio_siga_input(q
);
1432 * handle_outbound - process filled outbound buffers
1433 * @q: queue containing the buffers
1435 * @bufnr: first buffer to process
1436 * @count: how many buffers are filled
1438 static int handle_outbound(struct qdio_q
*q
, unsigned int callflags
,
1439 int bufnr
, int count
)
1441 unsigned char state
;
1444 qperf_inc(q
, outbound_call
);
1446 count
= set_buf_states(q
, bufnr
, SLSB_CU_OUTPUT_PRIMED
, count
);
1447 used
= atomic_add_return(count
, &q
->nr_buf_used
);
1448 BUG_ON(used
> QDIO_MAX_BUFFERS_PER_Q
);
1450 if (used
== QDIO_MAX_BUFFERS_PER_Q
)
1451 qperf_inc(q
, outbound_queue_full
);
1453 if (callflags
& QDIO_FLAG_PCI_OUT
) {
1454 q
->u
.out
.pci_out_enabled
= 1;
1455 qperf_inc(q
, pci_request_int
);
1458 q
->u
.out
.pci_out_enabled
= 0;
1460 if (queue_type(q
) == QDIO_IQDIO_QFMT
) {
1461 if (multicast_outbound(q
))
1462 rc
= qdio_kick_outbound_q(q
);
1464 if ((q
->irq_ptr
->ssqd_desc
.mmwc
> 1) &&
1466 (count
<= q
->irq_ptr
->ssqd_desc
.mmwc
)) {
1467 /* exploit enhanced SIGA */
1468 q
->u
.out
.use_enh_siga
= 1;
1469 rc
= qdio_kick_outbound_q(q
);
1472 * One siga-w per buffer required for unicast
1475 q
->u
.out
.use_enh_siga
= 0;
1477 rc
= qdio_kick_outbound_q(q
);
1485 if (need_siga_sync(q
)) {
1486 qdio_siga_sync_q(q
);
1490 /* try to fast requeue buffers */
1491 get_buf_state(q
, prev_buf(bufnr
), &state
, 0);
1492 if (state
!= SLSB_CU_OUTPUT_PRIMED
)
1493 rc
= qdio_kick_outbound_q(q
);
1495 qperf_inc(q
, fast_requeue
);
1498 /* in case of SIGA errors we must process the error immediately */
1499 if (used
>= q
->u
.out
.scan_threshold
|| rc
)
1500 tasklet_schedule(&q
->tasklet
);
1502 /* free the SBALs in case of no further traffic */
1503 if (!timer_pending(&q
->u
.out
.timer
))
1504 mod_timer(&q
->u
.out
.timer
, jiffies
+ HZ
);
1509 * do_QDIO - process input or output buffers
1510 * @cdev: associated ccw_device for the qdio subchannel
1511 * @callflags: input or output and special flags from the program
1512 * @q_nr: queue number
1513 * @bufnr: buffer number
1514 * @count: how many buffers to process
1516 int do_QDIO(struct ccw_device
*cdev
, unsigned int callflags
,
1517 int q_nr
, unsigned int bufnr
, unsigned int count
)
1519 struct qdio_irq
*irq_ptr
;
1521 if (bufnr
>= QDIO_MAX_BUFFERS_PER_Q
|| count
> QDIO_MAX_BUFFERS_PER_Q
)
1524 irq_ptr
= cdev
->private->qdio_data
;
1528 DBF_DEV_EVENT(DBF_INFO
, irq_ptr
,
1529 "do%02x b:%02x c:%02x", callflags
, bufnr
, count
);
1531 if (irq_ptr
->state
!= QDIO_IRQ_STATE_ACTIVE
)
1534 if (callflags
& QDIO_FLAG_SYNC_INPUT
)
1535 return handle_inbound(irq_ptr
->input_qs
[q_nr
],
1536 callflags
, bufnr
, count
);
1537 else if (callflags
& QDIO_FLAG_SYNC_OUTPUT
)
1538 return handle_outbound(irq_ptr
->output_qs
[q_nr
],
1539 callflags
, bufnr
, count
);
1542 EXPORT_SYMBOL_GPL(do_QDIO
);
1545 * qdio_start_irq - process input buffers
1546 * @cdev: associated ccw_device for the qdio subchannel
1547 * @nr: input queue number
1551 * 1 - irqs not started since new data is available
1553 int qdio_start_irq(struct ccw_device
*cdev
, int nr
)
1556 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1560 q
= irq_ptr
->input_qs
[nr
];
1562 WARN_ON(queue_irqs_enabled(q
));
1564 if (!shared_ind(q
->irq_ptr
->dsci
))
1565 xchg(q
->irq_ptr
->dsci
, 0);
1567 qdio_stop_polling(q
);
1568 clear_bit(QDIO_QUEUE_IRQS_DISABLED
, &q
->u
.in
.queue_irq_state
);
1571 * We need to check again to not lose initiative after
1572 * resetting the ACK state.
1574 if (!shared_ind(q
->irq_ptr
->dsci
) && *q
->irq_ptr
->dsci
)
1576 if (!qdio_inbound_q_done(q
))
1581 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED
,
1582 &q
->u
.in
.queue_irq_state
))
1588 EXPORT_SYMBOL(qdio_start_irq
);
1591 * qdio_get_next_buffers - process input buffers
1592 * @cdev: associated ccw_device for the qdio subchannel
1593 * @nr: input queue number
1594 * @bufnr: first filled buffer number
1595 * @error: buffers are in error state
1599 * = 0 - no new buffers found
1600 * > 0 - number of processed buffers
1602 int qdio_get_next_buffers(struct ccw_device
*cdev
, int nr
, int *bufnr
,
1607 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1611 q
= irq_ptr
->input_qs
[nr
];
1612 WARN_ON(queue_irqs_enabled(q
));
1614 qdio_sync_after_thinint(q
);
1617 * The interrupt could be caused by a PCI request. Check the
1618 * PCI capable outbound queues.
1620 qdio_check_outbound_after_thinint(q
);
1622 if (!qdio_inbound_q_moved(q
))
1625 /* Note: upper-layer MUST stop processing immediately here ... */
1626 if (unlikely(q
->irq_ptr
->state
!= QDIO_IRQ_STATE_ACTIVE
))
1629 start
= q
->first_to_kick
;
1630 end
= q
->first_to_check
;
1632 *error
= q
->qdio_error
;
1634 /* for the next time */
1635 q
->first_to_kick
= end
;
1637 return sub_buf(end
, start
);
1639 EXPORT_SYMBOL(qdio_get_next_buffers
);
1642 * qdio_stop_irq - disable interrupt processing for the device
1643 * @cdev: associated ccw_device for the qdio subchannel
1644 * @nr: input queue number
1647 * 0 - interrupts were already disabled
1648 * 1 - interrupts successfully disabled
1650 int qdio_stop_irq(struct ccw_device
*cdev
, int nr
)
1653 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1657 q
= irq_ptr
->input_qs
[nr
];
1659 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED
,
1660 &q
->u
.in
.queue_irq_state
))
1665 EXPORT_SYMBOL(qdio_stop_irq
);
1667 static int __init
init_QDIO(void)
1671 rc
= qdio_setup_init();
1674 rc
= tiqdio_allocate_memory();
1677 rc
= qdio_debug_init();
1680 rc
= tiqdio_register_thinints();
1688 tiqdio_free_memory();
1694 static void __exit
exit_QDIO(void)
1696 tiqdio_unregister_thinints();
1697 tiqdio_free_memory();
1702 module_init(init_QDIO
);
1703 module_exit(exit_QDIO
);