[S390] qdio: improve inbound buffer acknowledgement
[deliverable/linux.git] / drivers / s390 / cio / qdio_main.c
CommitLineData
779e6e1c
JG
1/*
2 * linux/drivers/s390/cio/qdio_main.c
3 *
4 * Linux for s390 qdio support, buffer handling, qdio API and module support.
5 *
6 * Copyright 2000,2008 IBM Corp.
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Jan Glauber <jang@linux.vnet.ibm.com>
9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
10 */
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/timer.h>
15#include <linux/delay.h>
16#include <asm/atomic.h>
17#include <asm/debug.h>
18#include <asm/qdio.h>
19
20#include "cio.h"
21#include "css.h"
22#include "device.h"
23#include "qdio.h"
24#include "qdio_debug.h"
25#include "qdio_perf.h"
26
27MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
28 "Jan Glauber <jang@linux.vnet.ibm.com>");
29MODULE_DESCRIPTION("QDIO base support");
30MODULE_LICENSE("GPL");
31
32static inline int do_siga_sync(struct subchannel_id schid,
33 unsigned int out_mask, unsigned int in_mask)
34{
35 register unsigned long __fc asm ("0") = 2;
36 register struct subchannel_id __schid asm ("1") = schid;
37 register unsigned long out asm ("2") = out_mask;
38 register unsigned long in asm ("3") = in_mask;
39 int cc;
40
41 asm volatile(
42 " siga 0\n"
43 " ipm %0\n"
44 " srl %0,28\n"
45 : "=d" (cc)
46 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
47 return cc;
48}
49
50static inline int do_siga_input(struct subchannel_id schid, unsigned int mask)
51{
52 register unsigned long __fc asm ("0") = 1;
53 register struct subchannel_id __schid asm ("1") = schid;
54 register unsigned long __mask asm ("2") = mask;
55 int cc;
56
57 asm volatile(
58 " siga 0\n"
59 " ipm %0\n"
60 " srl %0,28\n"
61 : "=d" (cc)
62 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
63 return cc;
64}
65
66/**
67 * do_siga_output - perform SIGA-w/wt function
68 * @schid: subchannel id or in case of QEBSM the subchannel token
69 * @mask: which output queues to process
70 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
71 * @fc: function code to perform
72 *
73 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
74 * Note: For IQDC unicast queues only the highest priority queue is processed.
75 */
76static inline int do_siga_output(unsigned long schid, unsigned long mask,
77 u32 *bb, unsigned int fc)
78{
79 register unsigned long __fc asm("0") = fc;
80 register unsigned long __schid asm("1") = schid;
81 register unsigned long __mask asm("2") = mask;
82 int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
83
84 asm volatile(
85 " siga 0\n"
86 "0: ipm %0\n"
87 " srl %0,28\n"
88 "1:\n"
89 EX_TABLE(0b, 1b)
90 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
91 : : "cc", "memory");
92 *bb = ((unsigned int) __fc) >> 31;
93 return cc;
94}
95
96static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
97{
779e6e1c
JG
98 /* all done or next buffer state different */
99 if (ccq == 0 || ccq == 32)
100 return 0;
101 /* not all buffers processed */
102 if (ccq == 96 || ccq == 97)
103 return 1;
104 /* notify devices immediately */
22f99347 105 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
779e6e1c
JG
106 return -EIO;
107}
108
109/**
110 * qdio_do_eqbs - extract buffer states for QEBSM
111 * @q: queue to manipulate
112 * @state: state of the extracted buffers
113 * @start: buffer number to start at
114 * @count: count of buffers to examine
50f769df 115 * @auto_ack: automatically acknowledge buffers
779e6e1c
JG
116 *
117 * Returns the number of successfull extracted equal buffer states.
118 * Stops processing if a state is different from the last buffers state.
119 */
120static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
50f769df 121 int start, int count, int auto_ack)
779e6e1c
JG
122{
123 unsigned int ccq = 0;
124 int tmp_count = count, tmp_start = start;
125 int nr = q->nr;
126 int rc;
779e6e1c
JG
127
128 BUG_ON(!q->irq_ptr->sch_token);
23589d05 129 qdio_perf_stat_inc(&perf_stats.debug_eqbs_all);
779e6e1c
JG
130
131 if (!q->is_input_q)
132 nr += q->irq_ptr->nr_input_qs;
133again:
50f769df
JG
134 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
135 auto_ack);
779e6e1c
JG
136 rc = qdio_check_ccq(q, ccq);
137
138 /* At least one buffer was processed, return and extract the remaining
139 * buffers later.
140 */
23589d05
JG
141 if ((ccq == 96) && (count != tmp_count)) {
142 qdio_perf_stat_inc(&perf_stats.debug_eqbs_incomplete);
779e6e1c 143 return (count - tmp_count);
23589d05 144 }
22f99347 145
779e6e1c 146 if (rc == 1) {
22f99347 147 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
779e6e1c
JG
148 goto again;
149 }
150
151 if (rc < 0) {
22f99347
JG
152 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
153 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
779e6e1c
JG
154 q->handler(q->irq_ptr->cdev,
155 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
156 0, -1, -1, q->irq_ptr->int_parm);
157 return 0;
158 }
159 return count - tmp_count;
160}
161
162/**
163 * qdio_do_sqbs - set buffer states for QEBSM
164 * @q: queue to manipulate
165 * @state: new state of the buffers
166 * @start: first buffer number to change
167 * @count: how many buffers to change
168 *
169 * Returns the number of successfully changed buffers.
170 * Does retrying until the specified count of buffer states is set or an
171 * error occurs.
172 */
173static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
174 int count)
175{
176 unsigned int ccq = 0;
177 int tmp_count = count, tmp_start = start;
178 int nr = q->nr;
179 int rc;
779e6e1c 180
50f769df
JG
181 if (!count)
182 return 0;
183
779e6e1c 184 BUG_ON(!q->irq_ptr->sch_token);
23589d05 185 qdio_perf_stat_inc(&perf_stats.debug_sqbs_all);
779e6e1c
JG
186
187 if (!q->is_input_q)
188 nr += q->irq_ptr->nr_input_qs;
189again:
190 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
191 rc = qdio_check_ccq(q, ccq);
192 if (rc == 1) {
22f99347 193 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
23589d05 194 qdio_perf_stat_inc(&perf_stats.debug_sqbs_incomplete);
779e6e1c
JG
195 goto again;
196 }
197 if (rc < 0) {
22f99347
JG
198 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
199 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
779e6e1c
JG
200 q->handler(q->irq_ptr->cdev,
201 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
202 0, -1, -1, q->irq_ptr->int_parm);
203 return 0;
204 }
205 WARN_ON(tmp_count);
206 return count - tmp_count;
207}
208
209/* returns number of examined buffers and their common state in *state */
210static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
50f769df
JG
211 unsigned char *state, unsigned int count,
212 int auto_ack)
779e6e1c
JG
213{
214 unsigned char __state = 0;
215 int i;
216
217 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
218 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
219
220 if (is_qebsm(q))
50f769df 221 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
779e6e1c
JG
222
223 for (i = 0; i < count; i++) {
224 if (!__state)
225 __state = q->slsb.val[bufnr];
226 else if (q->slsb.val[bufnr] != __state)
227 break;
228 bufnr = next_buf(bufnr);
229 }
230 *state = __state;
231 return i;
232}
233
234inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
50f769df 235 unsigned char *state, int auto_ack)
779e6e1c 236{
50f769df 237 return get_buf_states(q, bufnr, state, 1, auto_ack);
779e6e1c
JG
238}
239
240/* wrap-around safe setting of slsb states, returns number of changed buffers */
241static inline int set_buf_states(struct qdio_q *q, int bufnr,
242 unsigned char state, int count)
243{
244 int i;
245
246 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
247 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
248
249 if (is_qebsm(q))
250 return qdio_do_sqbs(q, state, bufnr, count);
251
252 for (i = 0; i < count; i++) {
253 xchg(&q->slsb.val[bufnr], state);
254 bufnr = next_buf(bufnr);
255 }
256 return count;
257}
258
259static inline int set_buf_state(struct qdio_q *q, int bufnr,
260 unsigned char state)
261{
262 return set_buf_states(q, bufnr, state, 1);
263}
264
265/* set slsb states to initial state */
266void qdio_init_buf_states(struct qdio_irq *irq_ptr)
267{
268 struct qdio_q *q;
269 int i;
270
271 for_each_input_queue(irq_ptr, q, i)
272 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
273 QDIO_MAX_BUFFERS_PER_Q);
274 for_each_output_queue(irq_ptr, q, i)
275 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
276 QDIO_MAX_BUFFERS_PER_Q);
277}
278
279static int qdio_siga_sync(struct qdio_q *q, unsigned int output,
280 unsigned int input)
281{
282 int cc;
283
284 if (!need_siga_sync(q))
285 return 0;
286
22f99347
JG
287 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:");
288 DBF_DEV_HEX(DBF_INFO, q->irq_ptr, q, sizeof(void *));
779e6e1c
JG
289 qdio_perf_stat_inc(&perf_stats.siga_sync);
290
291 cc = do_siga_sync(q->irq_ptr->schid, output, input);
22f99347
JG
292 if (cc)
293 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
779e6e1c
JG
294 return cc;
295}
296
297inline int qdio_siga_sync_q(struct qdio_q *q)
298{
299 if (q->is_input_q)
300 return qdio_siga_sync(q, 0, q->mask);
301 else
302 return qdio_siga_sync(q, q->mask, 0);
303}
304
305static inline int qdio_siga_sync_out(struct qdio_q *q)
306{
307 return qdio_siga_sync(q, ~0U, 0);
308}
309
310static inline int qdio_siga_sync_all(struct qdio_q *q)
311{
312 return qdio_siga_sync(q, ~0U, ~0U);
313}
314
315static inline int qdio_do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
316{
317 unsigned int fc = 0;
318 unsigned long schid;
319
7a0f4755
KDW
320 if (q->u.out.use_enh_siga) {
321 fc = 3;
322 }
779e6e1c
JG
323 if (!is_qebsm(q))
324 schid = *((u32 *)&q->irq_ptr->schid);
325 else {
326 schid = q->irq_ptr->sch_token;
327 fc |= 0x80;
328 }
329 return do_siga_output(schid, q->mask, busy_bit, fc);
330}
331
332static int qdio_siga_output(struct qdio_q *q)
333{
334 int cc;
335 u32 busy_bit;
336 u64 start_time = 0;
779e6e1c 337
22f99347 338 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
779e6e1c
JG
339 qdio_perf_stat_inc(&perf_stats.siga_out);
340again:
341 cc = qdio_do_siga_output(q, &busy_bit);
342 if (queue_type(q) == QDIO_IQDIO_QFMT && cc == 2 && busy_bit) {
22f99347 343 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w bb:%2d", q->nr);
58eb27cd 344
779e6e1c
JG
345 if (!start_time)
346 start_time = get_usecs();
347 else if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE)
348 goto again;
349 }
350
351 if (cc == 2 && busy_bit)
352 cc |= QDIO_ERROR_SIGA_BUSY;
353 if (cc)
22f99347 354 DBF_ERROR("%4x SIGA-W:%2d", SCH_NO(q), cc);
779e6e1c
JG
355 return cc;
356}
357
358static inline int qdio_siga_input(struct qdio_q *q)
359{
360 int cc;
361
22f99347 362 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
779e6e1c
JG
363 qdio_perf_stat_inc(&perf_stats.siga_in);
364
365 cc = do_siga_input(q->irq_ptr->schid, q->mask);
366 if (cc)
22f99347 367 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
779e6e1c
JG
368 return cc;
369}
370
371/* called from thinint inbound handler */
372void qdio_sync_after_thinint(struct qdio_q *q)
373{
374 if (pci_out_supported(q)) {
375 if (need_siga_sync_thinint(q))
376 qdio_siga_sync_all(q);
377 else if (need_siga_sync_out_thinint(q))
378 qdio_siga_sync_out(q);
379 } else
380 qdio_siga_sync_q(q);
381}
382
383inline void qdio_stop_polling(struct qdio_q *q)
384{
50f769df 385 if (!q->u.in.polling)
779e6e1c 386 return;
50f769df 387
779e6e1c
JG
388 q->u.in.polling = 0;
389 qdio_perf_stat_inc(&perf_stats.debug_stop_polling);
390
391 /* show the card that we are not polling anymore */
50f769df
JG
392 if (is_qebsm(q)) {
393 set_buf_states(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT,
394 q->u.in.ack_count);
395 q->u.in.ack_count = 0;
396 } else
397 set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT);
779e6e1c
JG
398}
399
50f769df 400static void announce_buffer_error(struct qdio_q *q, int count)
779e6e1c 401{
50f769df
JG
402 q->qdio_error = QDIO_ERROR_SLSB_STATE;
403
404 /* special handling for no target buffer empty */
405 if ((!q->is_input_q &&
406 (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) {
407 qdio_perf_stat_inc(&perf_stats.outbound_target_full);
408 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%3d",
409 q->first_to_check);
410 return;
411 }
412
22f99347
JG
413 DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
414 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
50f769df 415 DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
22f99347
JG
416 DBF_ERROR("F14:%2x F15:%2x",
417 q->sbal[q->first_to_check]->element[14].flags & 0xff,
418 q->sbal[q->first_to_check]->element[15].flags & 0xff);
50f769df 419}
779e6e1c 420
50f769df
JG
421static inline void inbound_primed(struct qdio_q *q, int count)
422{
423 int new;
424
425 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %3d", count);
426
427 /* for QEBSM the ACK was already set by EQBS */
428 if (is_qebsm(q)) {
429 if (!q->u.in.polling) {
430 q->u.in.polling = 1;
431 q->u.in.ack_count = count;
432 q->last_move_ftc = q->first_to_check;
433 return;
434 }
435
436 /* delete the previous ACK's */
437 set_buf_states(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT,
438 q->u.in.ack_count);
439 q->u.in.ack_count = count;
440 q->last_move_ftc = q->first_to_check;
441 return;
442 }
443
444 /*
445 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
446 * or by the next inbound run.
447 */
448 new = add_buf(q->first_to_check, count - 1);
449 if (q->u.in.polling) {
450 /* reset the previous ACK but first set the new one */
451 set_buf_state(q, new, SLSB_P_INPUT_ACK);
452 set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT);
453 }
454 else {
455 q->u.in.polling = 1;
456 set_buf_state(q, q->first_to_check, SLSB_P_INPUT_ACK);
457 }
458
459 q->last_move_ftc = new;
460 count--;
461 if (!count)
462 return;
463
464 /*
465 * Need to change all PRIMED buffers to NOT_INIT, otherwise
466 * we're loosing initiative in the thinint code.
467 */
468 set_buf_states(q, next_buf(q->first_to_check), SLSB_P_INPUT_NOT_INIT,
469 count);
779e6e1c
JG
470}
471
472static int get_inbound_buffer_frontier(struct qdio_q *q)
473{
474 int count, stop;
475 unsigned char state;
476
779e6e1c
JG
477 /*
478 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
479 * would return 0.
480 */
481 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
482 stop = add_buf(q->first_to_check, count);
483
484 /*
485 * No siga sync here, as a PCI or we after a thin interrupt
486 * will sync the queues.
487 */
488
489 /* need to set count to 1 for non-qebsm */
490 if (!is_qebsm(q))
491 count = 1;
492
493check_next:
494 if (q->first_to_check == stop)
495 goto out;
496
50f769df 497 count = get_buf_states(q, q->first_to_check, &state, count, 1);
779e6e1c
JG
498 if (!count)
499 goto out;
500
501 switch (state) {
502 case SLSB_P_INPUT_PRIMED:
50f769df 503 inbound_primed(q, count);
779e6e1c
JG
504 /*
505 * No siga-sync needed for non-qebsm here, as the inbound queue
506 * will be synced on the next siga-r, resp.
507 * tiqdio_is_inbound_q_done will do the siga-sync.
508 */
509 q->first_to_check = add_buf(q->first_to_check, count);
510 atomic_sub(count, &q->nr_buf_used);
511 goto check_next;
512 case SLSB_P_INPUT_ERROR:
50f769df 513 announce_buffer_error(q, count);
779e6e1c
JG
514 /* process the buffer, the upper layer will take care of it */
515 q->first_to_check = add_buf(q->first_to_check, count);
516 atomic_sub(count, &q->nr_buf_used);
517 break;
518 case SLSB_CU_INPUT_EMPTY:
519 case SLSB_P_INPUT_NOT_INIT:
520 case SLSB_P_INPUT_ACK:
22f99347 521 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
779e6e1c
JG
522 break;
523 default:
524 BUG();
525 }
526out:
779e6e1c
JG
527 return q->first_to_check;
528}
529
530int qdio_inbound_q_moved(struct qdio_q *q)
531{
532 int bufnr;
533
534 bufnr = get_inbound_buffer_frontier(q);
535
536 if ((bufnr != q->last_move_ftc) || q->qdio_error) {
537 if (!need_siga_sync(q) && !pci_out_supported(q))
538 q->u.in.timestamp = get_usecs();
539
22f99347 540 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in moved");
779e6e1c
JG
541 return 1;
542 } else
543 return 0;
544}
545
546static int qdio_inbound_q_done(struct qdio_q *q)
547{
9a1ce28a 548 unsigned char state = 0;
779e6e1c
JG
549
550 if (!atomic_read(&q->nr_buf_used))
551 return 1;
552
553 /*
554 * We need that one for synchronization with the adapter, as it
555 * does a kind of PCI avoidance.
556 */
557 qdio_siga_sync_q(q);
558
50f769df 559 get_buf_state(q, q->first_to_check, &state, 0);
779e6e1c
JG
560 if (state == SLSB_P_INPUT_PRIMED)
561 /* we got something to do */
562 return 0;
563
564 /* on VM, we don't poll, so the q is always done here */
565 if (need_siga_sync(q) || pci_out_supported(q))
566 return 1;
567
568 /*
569 * At this point we know, that inbound first_to_check
570 * has (probably) not moved (see qdio_inbound_processing).
571 */
572 if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
22f99347
JG
573 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d",
574 q->first_to_check);
779e6e1c
JG
575 return 1;
576 } else {
22f99347
JG
577 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in notd:%3d",
578 q->first_to_check);
779e6e1c
JG
579 return 0;
580 }
581}
582
583void qdio_kick_inbound_handler(struct qdio_q *q)
584{
585 int count, start, end;
779e6e1c
JG
586
587 qdio_perf_stat_inc(&perf_stats.inbound_handler);
588
589 start = q->first_to_kick;
590 end = q->first_to_check;
591 if (end >= start)
592 count = end - start;
593 else
594 count = end + QDIO_MAX_BUFFERS_PER_Q - start;
595
22f99347 596 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%3d c:%3d", start, count);
779e6e1c
JG
597
598 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
599 return;
600
601 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr,
602 start, count, q->irq_ptr->int_parm);
603
604 /* for the next time */
605 q->first_to_kick = q->first_to_check;
606 q->qdio_error = 0;
607}
608
609static void __qdio_inbound_processing(struct qdio_q *q)
610{
611 qdio_perf_stat_inc(&perf_stats.tasklet_inbound);
612again:
613 if (!qdio_inbound_q_moved(q))
614 return;
615
616 qdio_kick_inbound_handler(q);
617
618 if (!qdio_inbound_q_done(q))
619 /* means poll time is not yet over */
620 goto again;
621
622 qdio_stop_polling(q);
623 /*
624 * We need to check again to not lose initiative after
625 * resetting the ACK state.
626 */
627 if (!qdio_inbound_q_done(q))
628 goto again;
629}
630
631/* inbound tasklet */
632void qdio_inbound_processing(unsigned long data)
633{
634 struct qdio_q *q = (struct qdio_q *)data;
635 __qdio_inbound_processing(q);
636}
637
638static int get_outbound_buffer_frontier(struct qdio_q *q)
639{
640 int count, stop;
641 unsigned char state;
642
643 if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) ||
644 (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q)))
645 qdio_siga_sync_q(q);
646
647 /*
648 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
649 * would return 0.
650 */
651 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
652 stop = add_buf(q->first_to_check, count);
653
654 /* need to set count to 1 for non-qebsm */
655 if (!is_qebsm(q))
656 count = 1;
657
658check_next:
659 if (q->first_to_check == stop)
660 return q->first_to_check;
661
50f769df 662 count = get_buf_states(q, q->first_to_check, &state, count, 0);
779e6e1c
JG
663 if (!count)
664 return q->first_to_check;
665
666 switch (state) {
667 case SLSB_P_OUTPUT_EMPTY:
668 /* the adapter got it */
22f99347 669 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %3d", q->nr, count);
779e6e1c
JG
670
671 atomic_sub(count, &q->nr_buf_used);
672 q->first_to_check = add_buf(q->first_to_check, count);
673 /*
674 * We fetch all buffer states at once. get_buf_states may
675 * return count < stop. For QEBSM we do not loop.
676 */
677 if (is_qebsm(q))
678 break;
679 goto check_next;
680 case SLSB_P_OUTPUT_ERROR:
50f769df 681 announce_buffer_error(q, count);
779e6e1c
JG
682 /* process the buffer, the upper layer will take care of it */
683 q->first_to_check = add_buf(q->first_to_check, count);
684 atomic_sub(count, &q->nr_buf_used);
685 break;
686 case SLSB_CU_OUTPUT_PRIMED:
687 /* the adapter has not fetched the output yet */
22f99347 688 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr);
779e6e1c
JG
689 break;
690 case SLSB_P_OUTPUT_NOT_INIT:
691 case SLSB_P_OUTPUT_HALTED:
692 break;
693 default:
694 BUG();
695 }
696 return q->first_to_check;
697}
698
699/* all buffers processed? */
700static inline int qdio_outbound_q_done(struct qdio_q *q)
701{
702 return atomic_read(&q->nr_buf_used) == 0;
703}
704
705static inline int qdio_outbound_q_moved(struct qdio_q *q)
706{
707 int bufnr;
708
709 bufnr = get_outbound_buffer_frontier(q);
710
711 if ((bufnr != q->last_move_ftc) || q->qdio_error) {
712 q->last_move_ftc = bufnr;
22f99347 713 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
779e6e1c
JG
714 return 1;
715 } else
716 return 0;
717}
718
719/*
720 * VM could present us cc=2 and busy bit set on SIGA-write
721 * during reconfiguration of their Guest LAN (only in iqdio mode,
722 * otherwise qdio is asynchronous and cc=2 and busy bit there will take
723 * the queues down immediately).
724 *
725 * Therefore qdio_siga_output will try for a short time constantly,
726 * if such a condition occurs. If it doesn't change, it will
727 * increase the busy_siga_counter and save the timestamp, and
728 * schedule the queue for later processing. qdio_outbound_processing
729 * will check out the counter. If non-zero, it will call qdio_kick_outbound_q
730 * as often as the value of the counter. This will attempt further SIGA
731 * instructions. For each successful SIGA, the counter is
732 * decreased, for failing SIGAs the counter remains the same, after
733 * all. After some time of no movement, qdio_kick_outbound_q will
734 * finally fail and reflect corresponding error codes to call
735 * the upper layer module and have it take the queues down.
736 *
737 * Note that this is a change from the original HiperSockets design
738 * (saying cc=2 and busy bit means take the queues down), but in
739 * these days Guest LAN didn't exist... excessive cc=2 with busy bit
740 * conditions will still take the queues down, but the threshold is
741 * higher due to the Guest LAN environment.
742 *
743 * Called from outbound tasklet and do_QDIO handler.
744 */
745static void qdio_kick_outbound_q(struct qdio_q *q)
746{
747 int rc;
779e6e1c 748
22f99347 749 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kickoutq:%1d", q->nr);
779e6e1c
JG
750
751 if (!need_siga_out(q))
752 return;
753
754 rc = qdio_siga_output(q);
755 switch (rc) {
756 case 0:
779e6e1c 757 /* TODO: improve error handling for CC=0 case */
22f99347
JG
758 if (q->u.out.timestamp)
759 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "cc2 rslv:%4x",
760 atomic_read(&q->u.out.busy_siga_counter));
58eb27cd
JG
761 /* went smooth this time, reset timestamp */
762 q->u.out.timestamp = 0;
779e6e1c
JG
763 break;
764 /* cc=2 and busy bit */
765 case (2 | QDIO_ERROR_SIGA_BUSY):
766 atomic_inc(&q->u.out.busy_siga_counter);
767
768 /* if the last siga was successful, save timestamp here */
769 if (!q->u.out.timestamp)
770 q->u.out.timestamp = get_usecs();
771
772 /* if we're in time, don't touch qdio_error */
773 if (get_usecs() - q->u.out.timestamp < QDIO_BUSY_BIT_GIVE_UP) {
774 tasklet_schedule(&q->tasklet);
775 break;
776 }
22f99347 777 DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr);
779e6e1c
JG
778 default:
779 /* for plain cc=1, 2 or 3 */
780 q->qdio_error = rc;
781 }
782}
783
784static void qdio_kick_outbound_handler(struct qdio_q *q)
785{
786 int start, end, count;
779e6e1c
JG
787
788 start = q->first_to_kick;
789 end = q->last_move_ftc;
790 if (end >= start)
791 count = end - start;
792 else
793 count = end + QDIO_MAX_BUFFERS_PER_Q - start;
794
22f99347
JG
795 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kickouth: %1d", q->nr);
796 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "s:%3d c:%3d", start, count);
779e6e1c
JG
797
798 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
799 return;
800
801 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
802 q->irq_ptr->int_parm);
803
804 /* for the next time: */
805 q->first_to_kick = q->last_move_ftc;
806 q->qdio_error = 0;
807}
808
809static void __qdio_outbound_processing(struct qdio_q *q)
810{
811 int siga_attempts;
812
813 qdio_perf_stat_inc(&perf_stats.tasklet_outbound);
814
815 /* see comment in qdio_kick_outbound_q */
816 siga_attempts = atomic_read(&q->u.out.busy_siga_counter);
817 while (siga_attempts--) {
818 atomic_dec(&q->u.out.busy_siga_counter);
819 qdio_kick_outbound_q(q);
820 }
821
822 BUG_ON(atomic_read(&q->nr_buf_used) < 0);
823
824 if (qdio_outbound_q_moved(q))
825 qdio_kick_outbound_handler(q);
826
827 if (queue_type(q) == QDIO_ZFCP_QFMT) {
828 if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
829 tasklet_schedule(&q->tasklet);
830 return;
831 }
832
833 /* bail out for HiperSockets unicast queues */
834 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
835 return;
836
4bcb3a37
UB
837 if ((queue_type(q) == QDIO_IQDIO_QFMT) &&
838 (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL) {
839 tasklet_schedule(&q->tasklet);
840 return;
841 }
842
779e6e1c
JG
843 if (q->u.out.pci_out_enabled)
844 return;
845
846 /*
847 * Now we know that queue type is either qeth without pci enabled
848 * or HiperSockets multicast. Make sure buffer switch from PRIMED to
849 * EMPTY is noticed and outbound_handler is called after some time.
850 */
851 if (qdio_outbound_q_done(q))
852 del_timer(&q->u.out.timer);
853 else {
854 if (!timer_pending(&q->u.out.timer)) {
855 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
856 qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer);
857 }
858 }
859}
860
861/* outbound tasklet */
862void qdio_outbound_processing(unsigned long data)
863{
864 struct qdio_q *q = (struct qdio_q *)data;
865 __qdio_outbound_processing(q);
866}
867
868void qdio_outbound_timer(unsigned long data)
869{
870 struct qdio_q *q = (struct qdio_q *)data;
871 tasklet_schedule(&q->tasklet);
872}
873
874/* called from thinint inbound tasklet */
875void qdio_check_outbound_after_thinint(struct qdio_q *q)
876{
877 struct qdio_q *out;
878 int i;
879
880 if (!pci_out_supported(q))
881 return;
882
883 for_each_output_queue(q->irq_ptr, out, i)
884 if (!qdio_outbound_q_done(out))
885 tasklet_schedule(&out->tasklet);
886}
887
888static inline void qdio_set_state(struct qdio_irq *irq_ptr,
889 enum qdio_irq_states state)
890{
22f99347 891 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
779e6e1c
JG
892
893 irq_ptr->state = state;
894 mb();
895}
896
22f99347 897static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
779e6e1c 898{
779e6e1c 899 if (irb->esw.esw0.erw.cons) {
22f99347
JG
900 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
901 DBF_ERROR_HEX(irb, 64);
902 DBF_ERROR_HEX(irb->ecw, 64);
779e6e1c
JG
903 }
904}
905
906/* PCI interrupt handler */
907static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
908{
909 int i;
910 struct qdio_q *q;
911
912 qdio_perf_stat_inc(&perf_stats.pci_int);
913
914 for_each_input_queue(irq_ptr, q, i)
915 tasklet_schedule(&q->tasklet);
916
917 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
918 return;
919
920 for_each_output_queue(irq_ptr, q, i) {
921 if (qdio_outbound_q_done(q))
922 continue;
923
924 if (!siga_syncs_out_pci(q))
925 qdio_siga_sync_q(q);
926
927 tasklet_schedule(&q->tasklet);
928 }
929}
930
931static void qdio_handle_activate_check(struct ccw_device *cdev,
932 unsigned long intparm, int cstat, int dstat)
933{
934 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
935 struct qdio_q *q;
779e6e1c 936
22f99347
JG
937 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
938 DBF_ERROR("intp :%lx", intparm);
939 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
779e6e1c
JG
940
941 if (irq_ptr->nr_input_qs) {
942 q = irq_ptr->input_qs[0];
943 } else if (irq_ptr->nr_output_qs) {
944 q = irq_ptr->output_qs[0];
945 } else {
946 dump_stack();
947 goto no_handler;
948 }
949 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
950 0, -1, -1, irq_ptr->int_parm);
951no_handler:
952 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
953}
954
955static void qdio_call_shutdown(struct work_struct *work)
956{
957 struct ccw_device_private *priv;
958 struct ccw_device *cdev;
959
960 priv = container_of(work, struct ccw_device_private, kick_work);
961 cdev = priv->cdev;
962 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
963 put_device(&cdev->dev);
964}
965
966static void qdio_int_error(struct ccw_device *cdev)
967{
968 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
969
970 switch (irq_ptr->state) {
971 case QDIO_IRQ_STATE_INACTIVE:
972 case QDIO_IRQ_STATE_CLEANUP:
973 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
974 break;
975 case QDIO_IRQ_STATE_ESTABLISHED:
976 case QDIO_IRQ_STATE_ACTIVE:
977 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
978 if (get_device(&cdev->dev)) {
979 /* Can't call shutdown from interrupt context. */
980 PREPARE_WORK(&cdev->private->kick_work,
981 qdio_call_shutdown);
982 queue_work(ccw_device_work, &cdev->private->kick_work);
983 }
984 break;
985 default:
986 WARN_ON(1);
987 }
988 wake_up(&cdev->private->wait_q);
989}
990
991static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat,
22f99347 992 int dstat)
779e6e1c
JG
993{
994 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
995
996 if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) {
22f99347 997 DBF_ERROR("EQ:ck con");
779e6e1c
JG
998 goto error;
999 }
1000
1001 if (!(dstat & DEV_STAT_DEV_END)) {
22f99347 1002 DBF_ERROR("EQ:no dev");
779e6e1c
JG
1003 goto error;
1004 }
1005
1006 if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) {
22f99347 1007 DBF_ERROR("EQ: bad io");
779e6e1c
JG
1008 goto error;
1009 }
1010 return 0;
1011error:
22f99347
JG
1012 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
1013 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
1014
779e6e1c
JG
1015 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1016 return 1;
1017}
1018
1019static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
1020 int dstat)
1021{
1022 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
779e6e1c 1023
22f99347 1024 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
779e6e1c
JG
1025 if (!qdio_establish_check_errors(cdev, cstat, dstat))
1026 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
1027}
1028
1029/* qdio interrupt handler */
1030void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1031 struct irb *irb)
1032{
1033 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1034 int cstat, dstat;
779e6e1c
JG
1035
1036 qdio_perf_stat_inc(&perf_stats.qdio_int);
1037
1038 if (!intparm || !irq_ptr) {
22f99347 1039 DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
779e6e1c
JG
1040 return;
1041 }
1042
1043 if (IS_ERR(irb)) {
1044 switch (PTR_ERR(irb)) {
1045 case -EIO:
22f99347 1046 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
779e6e1c
JG
1047 return;
1048 case -ETIMEDOUT:
22f99347 1049 DBF_ERROR("%4x IO timeout", irq_ptr->schid.sch_no);
779e6e1c
JG
1050 qdio_int_error(cdev);
1051 return;
1052 default:
1053 WARN_ON(1);
1054 return;
1055 }
1056 }
22f99347 1057 qdio_irq_check_sense(irq_ptr, irb);
779e6e1c
JG
1058
1059 cstat = irb->scsw.cmd.cstat;
1060 dstat = irb->scsw.cmd.dstat;
1061
1062 switch (irq_ptr->state) {
1063 case QDIO_IRQ_STATE_INACTIVE:
1064 qdio_establish_handle_irq(cdev, cstat, dstat);
1065 break;
1066
1067 case QDIO_IRQ_STATE_CLEANUP:
1068 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1069 break;
1070
1071 case QDIO_IRQ_STATE_ESTABLISHED:
1072 case QDIO_IRQ_STATE_ACTIVE:
1073 if (cstat & SCHN_STAT_PCI) {
1074 qdio_int_handler_pci(irq_ptr);
1075 /* no state change so no need to wake up wait_q */
1076 return;
1077 }
1078 if ((cstat & ~SCHN_STAT_PCI) || dstat) {
1079 qdio_handle_activate_check(cdev, intparm, cstat,
1080 dstat);
1081 break;
1082 }
1083 default:
1084 WARN_ON(1);
1085 }
1086 wake_up(&cdev->private->wait_q);
1087}
1088
1089/**
1090 * qdio_get_ssqd_desc - get qdio subchannel description
1091 * @cdev: ccw device to get description for
bbd50e17 1092 * @data: where to store the ssqd
779e6e1c 1093 *
bbd50e17
JG
1094 * Returns 0 or an error code. The results of the chsc are stored in the
1095 * specified structure.
779e6e1c 1096 */
bbd50e17
JG
1097int qdio_get_ssqd_desc(struct ccw_device *cdev,
1098 struct qdio_ssqd_desc *data)
779e6e1c 1099{
779e6e1c 1100
bbd50e17
JG
1101 if (!cdev || !cdev->private)
1102 return -EINVAL;
1103
22f99347 1104 DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
bbd50e17 1105 return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
779e6e1c
JG
1106}
1107EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1108
1109/**
1110 * qdio_cleanup - shutdown queues and free data structures
1111 * @cdev: associated ccw device
1112 * @how: use halt or clear to shutdown
1113 *
1114 * This function calls qdio_shutdown() for @cdev with method @how
1115 * and on success qdio_free() for @cdev.
1116 */
1117int qdio_cleanup(struct ccw_device *cdev, int how)
1118{
22f99347 1119 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
779e6e1c
JG
1120 int rc;
1121
779e6e1c
JG
1122 if (!irq_ptr)
1123 return -ENODEV;
1124
779e6e1c
JG
1125 rc = qdio_shutdown(cdev, how);
1126 if (rc == 0)
1127 rc = qdio_free(cdev);
1128 return rc;
1129}
1130EXPORT_SYMBOL_GPL(qdio_cleanup);
1131
1132static void qdio_shutdown_queues(struct ccw_device *cdev)
1133{
1134 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1135 struct qdio_q *q;
1136 int i;
1137
1138 for_each_input_queue(irq_ptr, q, i)
1139 tasklet_disable(&q->tasklet);
1140
1141 for_each_output_queue(irq_ptr, q, i) {
1142 tasklet_disable(&q->tasklet);
1143 del_timer(&q->u.out.timer);
1144 }
1145}
1146
1147/**
1148 * qdio_shutdown - shut down a qdio subchannel
1149 * @cdev: associated ccw device
1150 * @how: use halt or clear to shutdown
1151 */
1152int qdio_shutdown(struct ccw_device *cdev, int how)
1153{
22f99347 1154 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
779e6e1c
JG
1155 int rc;
1156 unsigned long flags;
779e6e1c 1157
779e6e1c
JG
1158 if (!irq_ptr)
1159 return -ENODEV;
1160
22f99347
JG
1161 DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
1162
779e6e1c
JG
1163 mutex_lock(&irq_ptr->setup_mutex);
1164 /*
1165 * Subchannel was already shot down. We cannot prevent being called
1166 * twice since cio may trigger a shutdown asynchronously.
1167 */
1168 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1169 mutex_unlock(&irq_ptr->setup_mutex);
1170 return 0;
1171 }
1172
779e6e1c
JG
1173 tiqdio_remove_input_queues(irq_ptr);
1174 qdio_shutdown_queues(cdev);
1175 qdio_shutdown_debug_entries(irq_ptr, cdev);
1176
1177 /* cleanup subchannel */
1178 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1179
1180 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1181 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1182 else
1183 /* default behaviour is halt */
1184 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1185 if (rc) {
22f99347
JG
1186 DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1187 DBF_ERROR("rc:%4d", rc);
779e6e1c
JG
1188 goto no_cleanup;
1189 }
1190
1191 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1192 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1193 wait_event_interruptible_timeout(cdev->private->wait_q,
1194 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1195 irq_ptr->state == QDIO_IRQ_STATE_ERR,
1196 10 * HZ);
1197 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1198
1199no_cleanup:
1200 qdio_shutdown_thinint(irq_ptr);
1201
1202 /* restore interrupt handler */
1203 if ((void *)cdev->handler == (void *)qdio_int_handler)
1204 cdev->handler = irq_ptr->orig_handler;
1205 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1206
1207 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1208 mutex_unlock(&irq_ptr->setup_mutex);
779e6e1c
JG
1209 if (rc)
1210 return rc;
1211 return 0;
1212}
1213EXPORT_SYMBOL_GPL(qdio_shutdown);
1214
1215/**
1216 * qdio_free - free data structures for a qdio subchannel
1217 * @cdev: associated ccw device
1218 */
1219int qdio_free(struct ccw_device *cdev)
1220{
22f99347 1221 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
58eb27cd 1222
779e6e1c
JG
1223 if (!irq_ptr)
1224 return -ENODEV;
1225
22f99347 1226 DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
779e6e1c 1227 mutex_lock(&irq_ptr->setup_mutex);
22f99347
JG
1228
1229 if (irq_ptr->debug_area != NULL) {
1230 debug_unregister(irq_ptr->debug_area);
1231 irq_ptr->debug_area = NULL;
1232 }
779e6e1c
JG
1233 cdev->private->qdio_data = NULL;
1234 mutex_unlock(&irq_ptr->setup_mutex);
1235
1236 qdio_release_memory(irq_ptr);
1237 return 0;
1238}
1239EXPORT_SYMBOL_GPL(qdio_free);
1240
1241/**
1242 * qdio_initialize - allocate and establish queues for a qdio subchannel
1243 * @init_data: initialization data
1244 *
1245 * This function first allocates queues via qdio_allocate() and on success
1246 * establishes them via qdio_establish().
1247 */
1248int qdio_initialize(struct qdio_initialize *init_data)
1249{
1250 int rc;
779e6e1c
JG
1251
1252 rc = qdio_allocate(init_data);
1253 if (rc)
1254 return rc;
1255
1256 rc = qdio_establish(init_data);
1257 if (rc)
1258 qdio_free(init_data->cdev);
1259 return rc;
1260}
1261EXPORT_SYMBOL_GPL(qdio_initialize);
1262
1263/**
1264 * qdio_allocate - allocate qdio queues and associated data
1265 * @init_data: initialization data
1266 */
1267int qdio_allocate(struct qdio_initialize *init_data)
1268{
1269 struct qdio_irq *irq_ptr;
779e6e1c 1270
22f99347 1271 DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
779e6e1c
JG
1272
1273 if ((init_data->no_input_qs && !init_data->input_handler) ||
1274 (init_data->no_output_qs && !init_data->output_handler))
1275 return -EINVAL;
1276
1277 if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1278 (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1279 return -EINVAL;
1280
1281 if ((!init_data->input_sbal_addr_array) ||
1282 (!init_data->output_sbal_addr_array))
1283 return -EINVAL;
1284
779e6e1c
JG
1285 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1286 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1287 if (!irq_ptr)
1288 goto out_err;
779e6e1c
JG
1289
1290 mutex_init(&irq_ptr->setup_mutex);
22f99347 1291 qdio_allocate_dbf(init_data, irq_ptr);
779e6e1c
JG
1292
1293 /*
1294 * Allocate a page for the chsc calls in qdio_establish.
1295 * Must be pre-allocated since a zfcp recovery will call
1296 * qdio_establish. In case of low memory and swap on a zfcp disk
1297 * we may not be able to allocate memory otherwise.
1298 */
1299 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1300 if (!irq_ptr->chsc_page)
1301 goto out_rel;
1302
1303 /* qdr is used in ccw1.cda which is u32 */
3b8e3004 1304 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
779e6e1c
JG
1305 if (!irq_ptr->qdr)
1306 goto out_rel;
1307 WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1308
779e6e1c
JG
1309 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1310 init_data->no_output_qs))
1311 goto out_rel;
1312
1313 init_data->cdev->private->qdio_data = irq_ptr;
1314 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1315 return 0;
1316out_rel:
1317 qdio_release_memory(irq_ptr);
1318out_err:
1319 return -ENOMEM;
1320}
1321EXPORT_SYMBOL_GPL(qdio_allocate);
1322
1323/**
1324 * qdio_establish - establish queues on a qdio subchannel
1325 * @init_data: initialization data
1326 */
1327int qdio_establish(struct qdio_initialize *init_data)
1328{
779e6e1c
JG
1329 struct qdio_irq *irq_ptr;
1330 struct ccw_device *cdev = init_data->cdev;
1331 unsigned long saveflags;
1332 int rc;
1333
22f99347 1334 DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
58eb27cd 1335
779e6e1c
JG
1336 irq_ptr = cdev->private->qdio_data;
1337 if (!irq_ptr)
1338 return -ENODEV;
1339
1340 if (cdev->private->state != DEV_STATE_ONLINE)
1341 return -EINVAL;
1342
779e6e1c
JG
1343 mutex_lock(&irq_ptr->setup_mutex);
1344 qdio_setup_irq(init_data);
1345
1346 rc = qdio_establish_thinint(irq_ptr);
1347 if (rc) {
1348 mutex_unlock(&irq_ptr->setup_mutex);
1349 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1350 return rc;
1351 }
1352
1353 /* establish q */
1354 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1355 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1356 irq_ptr->ccw.count = irq_ptr->equeue.count;
1357 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1358
1359 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1360 ccw_device_set_options_mask(cdev, 0);
1361
1362 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1363 if (rc) {
22f99347
JG
1364 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1365 DBF_ERROR("rc:%4x", rc);
779e6e1c
JG
1366 }
1367 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1368
1369 if (rc) {
1370 mutex_unlock(&irq_ptr->setup_mutex);
1371 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1372 return rc;
1373 }
1374
1375 wait_event_interruptible_timeout(cdev->private->wait_q,
1376 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1377 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1378
1379 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1380 mutex_unlock(&irq_ptr->setup_mutex);
1381 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1382 return -EIO;
1383 }
1384
1385 qdio_setup_ssqd_info(irq_ptr);
22f99347
JG
1386 DBF_EVENT("qDmmwc:%2x", irq_ptr->ssqd_desc.mmwc);
1387 DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
779e6e1c
JG
1388
1389 /* qebsm is now setup if available, initialize buffer states */
1390 qdio_init_buf_states(irq_ptr);
1391
1392 mutex_unlock(&irq_ptr->setup_mutex);
1393 qdio_print_subchannel_info(irq_ptr, cdev);
1394 qdio_setup_debug_entries(irq_ptr, cdev);
1395 return 0;
1396}
1397EXPORT_SYMBOL_GPL(qdio_establish);
1398
1399/**
1400 * qdio_activate - activate queues on a qdio subchannel
1401 * @cdev: associated cdev
1402 */
1403int qdio_activate(struct ccw_device *cdev)
1404{
1405 struct qdio_irq *irq_ptr;
1406 int rc;
1407 unsigned long saveflags;
779e6e1c 1408
22f99347 1409 DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
58eb27cd 1410
779e6e1c
JG
1411 irq_ptr = cdev->private->qdio_data;
1412 if (!irq_ptr)
1413 return -ENODEV;
1414
1415 if (cdev->private->state != DEV_STATE_ONLINE)
1416 return -EINVAL;
1417
1418 mutex_lock(&irq_ptr->setup_mutex);
1419 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1420 rc = -EBUSY;
1421 goto out;
1422 }
1423
779e6e1c
JG
1424 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1425 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1426 irq_ptr->ccw.count = irq_ptr->aqueue.count;
1427 irq_ptr->ccw.cda = 0;
1428
1429 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1430 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1431
1432 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1433 0, DOIO_DENY_PREFETCH);
1434 if (rc) {
22f99347
JG
1435 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1436 DBF_ERROR("rc:%4x", rc);
779e6e1c
JG
1437 }
1438 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1439
1440 if (rc)
1441 goto out;
1442
1443 if (is_thinint_irq(irq_ptr))
1444 tiqdio_add_input_queues(irq_ptr);
1445
1446 /* wait for subchannel to become active */
1447 msleep(5);
1448
1449 switch (irq_ptr->state) {
1450 case QDIO_IRQ_STATE_STOPPED:
1451 case QDIO_IRQ_STATE_ERR:
1452 mutex_unlock(&irq_ptr->setup_mutex);
1453 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1454 return -EIO;
1455 default:
1456 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1457 rc = 0;
1458 }
1459out:
1460 mutex_unlock(&irq_ptr->setup_mutex);
1461 return rc;
1462}
1463EXPORT_SYMBOL_GPL(qdio_activate);
1464
1465static inline int buf_in_between(int bufnr, int start, int count)
1466{
1467 int end = add_buf(start, count);
1468
1469 if (end > start) {
1470 if (bufnr >= start && bufnr < end)
1471 return 1;
1472 else
1473 return 0;
1474 }
1475
1476 /* wrap-around case */
1477 if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1478 (bufnr < end))
1479 return 1;
1480 else
1481 return 0;
1482}
1483
1484/**
1485 * handle_inbound - reset processed input buffers
1486 * @q: queue containing the buffers
1487 * @callflags: flags
1488 * @bufnr: first buffer to process
1489 * @count: how many buffers are emptied
1490 */
1491static void handle_inbound(struct qdio_q *q, unsigned int callflags,
1492 int bufnr, int count)
1493{
50f769df 1494 int used, rc, diff;
779e6e1c 1495
50f769df
JG
1496 if (!q->u.in.polling)
1497 goto set;
1498
1499 /* protect against stop polling setting an ACK for an emptied slsb */
1500 if (count == QDIO_MAX_BUFFERS_PER_Q) {
1501 /* overwriting everything, just delete polling status */
1502 q->u.in.polling = 0;
1503 q->u.in.ack_count = 0;
1504 goto set;
1505 } else if (buf_in_between(q->last_move_ftc, bufnr, count)) {
1506 if (is_qebsm(q)) {
1507 /* partial overwrite, just update last_move_ftc */
1508 diff = add_buf(bufnr, count);
1509 diff = sub_buf(diff, q->last_move_ftc);
1510 q->u.in.ack_count -= diff;
1511 if (q->u.in.ack_count <= 0) {
1512 q->u.in.polling = 0;
1513 q->u.in.ack_count = 0;
1514 /* TODO: must we set last_move_ftc to something meaningful? */
1515 goto set;
1516 }
1517 q->last_move_ftc = add_buf(q->last_move_ftc, diff);
1518 }
1519 else
1520 /* the only ACK will be deleted, so stop polling */
779e6e1c 1521 q->u.in.polling = 0;
50f769df 1522 }
779e6e1c 1523
50f769df 1524set:
779e6e1c 1525 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
779e6e1c
JG
1526
1527 used = atomic_add_return(count, &q->nr_buf_used) - count;
1528 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1529
1530 /* no need to signal as long as the adapter had free buffers */
1531 if (used)
1532 return;
1533
1534 if (need_siga_in(q)) {
1535 rc = qdio_siga_input(q);
1536 if (rc)
1537 q->qdio_error = rc;
1538 }
1539}
1540
1541/**
1542 * handle_outbound - process filled outbound buffers
1543 * @q: queue containing the buffers
1544 * @callflags: flags
1545 * @bufnr: first buffer to process
1546 * @count: how many buffers are filled
1547 */
1548static void handle_outbound(struct qdio_q *q, unsigned int callflags,
1549 int bufnr, int count)
1550{
1551 unsigned char state;
1552 int used;
1553
1554 qdio_perf_stat_inc(&perf_stats.outbound_handler);
1555
1556 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1557 used = atomic_add_return(count, &q->nr_buf_used);
1558 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1559
1560 if (callflags & QDIO_FLAG_PCI_OUT)
1561 q->u.out.pci_out_enabled = 1;
1562 else
1563 q->u.out.pci_out_enabled = 0;
1564
1565 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1566 if (multicast_outbound(q))
1567 qdio_kick_outbound_q(q);
1568 else
7a0f4755
KDW
1569 if ((q->irq_ptr->ssqd_desc.mmwc > 1) &&
1570 (count > 1) &&
1571 (count <= q->irq_ptr->ssqd_desc.mmwc)) {
1572 /* exploit enhanced SIGA */
1573 q->u.out.use_enh_siga = 1;
779e6e1c 1574 qdio_kick_outbound_q(q);
7a0f4755
KDW
1575 } else {
1576 /*
1577 * One siga-w per buffer required for unicast
1578 * HiperSockets.
1579 */
1580 q->u.out.use_enh_siga = 0;
1581 while (count--)
1582 qdio_kick_outbound_q(q);
1583 }
779e6e1c
JG
1584 goto out;
1585 }
1586
1587 if (need_siga_sync(q)) {
1588 qdio_siga_sync_q(q);
1589 goto out;
1590 }
1591
1592 /* try to fast requeue buffers */
50f769df 1593 get_buf_state(q, prev_buf(bufnr), &state, 0);
779e6e1c
JG
1594 if (state != SLSB_CU_OUTPUT_PRIMED)
1595 qdio_kick_outbound_q(q);
1596 else {
22f99347 1597 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "fast-req");
779e6e1c
JG
1598 qdio_perf_stat_inc(&perf_stats.fast_requeue);
1599 }
1600out:
1601 /* Fixme: could wait forever if called from process context */
1602 tasklet_schedule(&q->tasklet);
1603}
1604
1605/**
1606 * do_QDIO - process input or output buffers
1607 * @cdev: associated ccw_device for the qdio subchannel
1608 * @callflags: input or output and special flags from the program
1609 * @q_nr: queue number
1610 * @bufnr: buffer number
1611 * @count: how many buffers to process
1612 */
1613int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1614 int q_nr, int bufnr, int count)
1615{
1616 struct qdio_irq *irq_ptr;
779e6e1c
JG
1617
1618 if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) ||
1619 (count > QDIO_MAX_BUFFERS_PER_Q) ||
1620 (q_nr > QDIO_MAX_QUEUES_PER_IRQ))
1621 return -EINVAL;
1622
1623 if (!count)
1624 return 0;
1625
1626 irq_ptr = cdev->private->qdio_data;
1627 if (!irq_ptr)
1628 return -ENODEV;
1629
779e6e1c 1630 if (callflags & QDIO_FLAG_SYNC_INPUT)
22f99347 1631 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO input");
779e6e1c 1632 else
22f99347
JG
1633 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO output");
1634 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "q:%1d flag:%4x", q_nr, callflags);
1635 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "buf:%2d cnt:%3d", bufnr, count);
779e6e1c
JG
1636
1637 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1638 return -EBUSY;
1639
1640 if (callflags & QDIO_FLAG_SYNC_INPUT)
22f99347
JG
1641 handle_inbound(irq_ptr->input_qs[q_nr], callflags, bufnr,
1642 count);
779e6e1c 1643 else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
22f99347
JG
1644 handle_outbound(irq_ptr->output_qs[q_nr], callflags, bufnr,
1645 count);
1646 else
779e6e1c 1647 return -EINVAL;
779e6e1c
JG
1648 return 0;
1649}
1650EXPORT_SYMBOL_GPL(do_QDIO);
1651
1652static int __init init_QDIO(void)
1653{
1654 int rc;
1655
1656 rc = qdio_setup_init();
1657 if (rc)
1658 return rc;
1659 rc = tiqdio_allocate_memory();
1660 if (rc)
1661 goto out_cache;
1662 rc = qdio_debug_init();
1663 if (rc)
1664 goto out_ti;
1665 rc = qdio_setup_perf_stats();
1666 if (rc)
1667 goto out_debug;
1668 rc = tiqdio_register_thinints();
1669 if (rc)
1670 goto out_perf;
1671 return 0;
1672
1673out_perf:
1674 qdio_remove_perf_stats();
1675out_debug:
1676 qdio_debug_exit();
1677out_ti:
1678 tiqdio_free_memory();
1679out_cache:
1680 qdio_setup_exit();
1681 return rc;
1682}
1683
1684static void __exit exit_QDIO(void)
1685{
1686 tiqdio_unregister_thinints();
1687 tiqdio_free_memory();
1688 qdio_remove_perf_stats();
1689 qdio_debug_exit();
1690 qdio_setup_exit();
1691}
1692
1693module_init(init_QDIO);
1694module_exit(exit_QDIO);
This page took 0.143478 seconds and 5 git commands to generate.