4 * Setup and helper functions to access QDIO.
6 * Copyright IBM Corporation 2002, 2008
11 /* FIXME(tune): free space should be one max. SBAL chain plus what? */
12 #define ZFCP_QDIO_PCI_INTERVAL (QDIO_MAX_BUFFERS_PER_Q \
13 - (FSF_MAX_SBALS_PER_REQ + 4))
14 #define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
16 static int zfcp_qdio_buffers_enqueue(struct qdio_buffer
**sbal
)
20 for (pos
= 0; pos
< QDIO_MAX_BUFFERS_PER_Q
; pos
+= QBUFF_PER_PAGE
) {
21 sbal
[pos
] = (struct qdio_buffer
*) get_zeroed_page(GFP_KERNEL
);
25 for (pos
= 0; pos
< QDIO_MAX_BUFFERS_PER_Q
; pos
++)
26 if (pos
% QBUFF_PER_PAGE
)
27 sbal
[pos
] = sbal
[pos
- 1] + 1;
31 static struct qdio_buffer_element
*
32 zfcp_qdio_sbale(struct zfcp_qdio_queue
*q
, int sbal_idx
, int sbale_idx
)
34 return &q
->sbal
[sbal_idx
]->element
[sbale_idx
];
38 * zfcp_qdio_free - free memory used by request- and resposne queue
39 * @adapter: pointer to the zfcp_adapter structure
41 void zfcp_qdio_free(struct zfcp_adapter
*adapter
)
43 struct qdio_buffer
**sbal_req
, **sbal_resp
;
46 if (adapter
->ccw_device
)
47 qdio_free(adapter
->ccw_device
);
49 sbal_req
= adapter
->req_q
.sbal
;
50 sbal_resp
= adapter
->resp_q
.sbal
;
52 for (p
= 0; p
< QDIO_MAX_BUFFERS_PER_Q
; p
+= QBUFF_PER_PAGE
) {
53 free_page((unsigned long) sbal_req
[p
]);
54 free_page((unsigned long) sbal_resp
[p
]);
58 static void zfcp_qdio_handler_error(struct zfcp_adapter
*adapter
, u8 id
)
60 dev_warn(&adapter
->ccw_device
->dev
, "A QDIO problem occurred\n");
62 zfcp_erp_adapter_reopen(adapter
,
63 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED
|
64 ZFCP_STATUS_COMMON_ERP_FAILED
, id
, NULL
);
67 static void zfcp_qdio_zero_sbals(struct qdio_buffer
*sbal
[], int first
, int cnt
)
71 for (i
= first
; i
< first
+ cnt
; i
++) {
72 sbal_idx
= i
% QDIO_MAX_BUFFERS_PER_Q
;
73 memset(sbal
[sbal_idx
], 0, sizeof(struct qdio_buffer
));
77 static void zfcp_qdio_int_req(struct ccw_device
*cdev
, unsigned int qdio_err
,
78 int queue_no
, int first
, int count
,
81 struct zfcp_adapter
*adapter
= (struct zfcp_adapter
*) parm
;
82 struct zfcp_qdio_queue
*queue
= &adapter
->req_q
;
84 if (unlikely(qdio_err
)) {
85 zfcp_hba_dbf_event_qdio(adapter
, qdio_err
, first
, count
);
86 zfcp_qdio_handler_error(adapter
, 140);
90 /* cleanup all SBALs being program-owned now */
91 zfcp_qdio_zero_sbals(queue
->sbal
, first
, count
);
93 atomic_add(count
, &queue
->count
);
94 wake_up(&adapter
->request_wq
);
97 static void zfcp_qdio_reqid_check(struct zfcp_adapter
*adapter
,
98 unsigned long req_id
, int sbal_idx
)
100 struct zfcp_fsf_req
*fsf_req
;
103 spin_lock_irqsave(&adapter
->req_list_lock
, flags
);
104 fsf_req
= zfcp_reqlist_find(adapter
, req_id
);
108 * Unknown request means that we have potentially memory
109 * corruption and must stop the machine immediatly.
111 panic("error: unknown request id (%lx) on adapter %s.\n",
112 req_id
, zfcp_get_busid_by_adapter(adapter
));
114 zfcp_reqlist_remove(adapter
, fsf_req
);
115 spin_unlock_irqrestore(&adapter
->req_list_lock
, flags
);
117 fsf_req
->sbal_response
= sbal_idx
;
118 fsf_req
->qdio_inb_usage
= atomic_read(&adapter
->resp_q
.count
);
119 zfcp_fsf_req_complete(fsf_req
);
122 static void zfcp_qdio_resp_put_back(struct zfcp_adapter
*adapter
, int processed
)
124 struct zfcp_qdio_queue
*queue
= &adapter
->resp_q
;
125 struct ccw_device
*cdev
= adapter
->ccw_device
;
126 u8 count
, start
= queue
->first
;
129 count
= atomic_read(&queue
->count
) + processed
;
131 retval
= do_QDIO(cdev
, QDIO_FLAG_SYNC_INPUT
, 0, start
, count
);
133 if (unlikely(retval
)) {
134 atomic_set(&queue
->count
, count
);
135 /* FIXME: Recover this with an adapter reopen? */
137 queue
->first
+= count
;
138 queue
->first
%= QDIO_MAX_BUFFERS_PER_Q
;
139 atomic_set(&queue
->count
, 0);
143 static void zfcp_qdio_int_resp(struct ccw_device
*cdev
, unsigned int qdio_err
,
144 int queue_no
, int first
, int count
,
147 struct zfcp_adapter
*adapter
= (struct zfcp_adapter
*) parm
;
148 struct zfcp_qdio_queue
*queue
= &adapter
->resp_q
;
149 struct qdio_buffer_element
*sbale
;
150 int sbal_idx
, sbale_idx
, sbal_no
;
152 if (unlikely(qdio_err
)) {
153 zfcp_hba_dbf_event_qdio(adapter
, qdio_err
, first
, count
);
154 zfcp_qdio_handler_error(adapter
, 147);
159 * go through all SBALs from input queue currently
160 * returned by QDIO layer
162 for (sbal_no
= 0; sbal_no
< count
; sbal_no
++) {
163 sbal_idx
= (first
+ sbal_no
) % QDIO_MAX_BUFFERS_PER_Q
;
165 /* go through all SBALEs of SBAL */
166 for (sbale_idx
= 0; sbale_idx
< QDIO_MAX_ELEMENTS_PER_BUFFER
;
168 sbale
= zfcp_qdio_sbale(queue
, sbal_idx
, sbale_idx
);
169 zfcp_qdio_reqid_check(adapter
,
170 (unsigned long) sbale
->addr
,
172 if (likely(sbale
->flags
& SBAL_FLAGS_LAST_ENTRY
))
176 if (unlikely(!(sbale
->flags
& SBAL_FLAGS_LAST_ENTRY
)))
177 dev_warn(&adapter
->ccw_device
->dev
,
178 "A QDIO protocol error occurred, "
179 "operations continue\n");
183 * put range of SBALs back to response queue
184 * (including SBALs which have already been free before)
186 zfcp_qdio_resp_put_back(adapter
, count
);
190 * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req
191 * @fsf_req: pointer to struct fsf_req
192 * Returns: pointer to qdio_buffer_element (SBALE) structure
194 struct qdio_buffer_element
*zfcp_qdio_sbale_req(struct zfcp_fsf_req
*req
)
196 return zfcp_qdio_sbale(&req
->adapter
->req_q
, req
->sbal_last
, 0);
200 * zfcp_qdio_sbale_curr - return curr SBALE on req_q for a struct zfcp_fsf_req
201 * @fsf_req: pointer to struct fsf_req
202 * Returns: pointer to qdio_buffer_element (SBALE) structure
204 struct qdio_buffer_element
*zfcp_qdio_sbale_curr(struct zfcp_fsf_req
*req
)
206 return zfcp_qdio_sbale(&req
->adapter
->req_q
, req
->sbal_last
,
210 static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req
*fsf_req
, int max_sbals
)
212 int count
= atomic_read(&fsf_req
->adapter
->req_q
.count
);
213 count
= min(count
, max_sbals
);
214 fsf_req
->sbal_limit
= (fsf_req
->sbal_first
+ count
- 1)
215 % QDIO_MAX_BUFFERS_PER_Q
;
218 static struct qdio_buffer_element
*
219 zfcp_qdio_sbal_chain(struct zfcp_fsf_req
*fsf_req
, unsigned long sbtype
)
221 struct qdio_buffer_element
*sbale
;
223 /* set last entry flag in current SBALE of current SBAL */
224 sbale
= zfcp_qdio_sbale_curr(fsf_req
);
225 sbale
->flags
|= SBAL_FLAGS_LAST_ENTRY
;
227 /* don't exceed last allowed SBAL */
228 if (fsf_req
->sbal_last
== fsf_req
->sbal_limit
)
231 /* set chaining flag in first SBALE of current SBAL */
232 sbale
= zfcp_qdio_sbale_req(fsf_req
);
233 sbale
->flags
|= SBAL_FLAGS0_MORE_SBALS
;
235 /* calculate index of next SBAL */
236 fsf_req
->sbal_last
++;
237 fsf_req
->sbal_last
%= QDIO_MAX_BUFFERS_PER_Q
;
239 /* keep this requests number of SBALs up-to-date */
240 fsf_req
->sbal_number
++;
242 /* start at first SBALE of new SBAL */
243 fsf_req
->sbale_curr
= 0;
245 /* set storage-block type for new SBAL */
246 sbale
= zfcp_qdio_sbale_curr(fsf_req
);
247 sbale
->flags
|= sbtype
;
252 static struct qdio_buffer_element
*
253 zfcp_qdio_sbale_next(struct zfcp_fsf_req
*fsf_req
, unsigned long sbtype
)
255 if (fsf_req
->sbale_curr
== ZFCP_LAST_SBALE_PER_SBAL
)
256 return zfcp_qdio_sbal_chain(fsf_req
, sbtype
);
257 fsf_req
->sbale_curr
++;
258 return zfcp_qdio_sbale_curr(fsf_req
);
261 static void zfcp_qdio_undo_sbals(struct zfcp_fsf_req
*fsf_req
)
263 struct qdio_buffer
**sbal
= fsf_req
->adapter
->req_q
.sbal
;
264 int first
= fsf_req
->sbal_first
;
265 int last
= fsf_req
->sbal_last
;
266 int count
= (last
- first
+ QDIO_MAX_BUFFERS_PER_Q
) %
267 QDIO_MAX_BUFFERS_PER_Q
+ 1;
268 zfcp_qdio_zero_sbals(sbal
, first
, count
);
271 static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req
*fsf_req
,
272 unsigned int sbtype
, void *start_addr
,
273 unsigned int total_length
)
275 struct qdio_buffer_element
*sbale
;
276 unsigned long remaining
, length
;
279 /* split segment up */
280 for (addr
= start_addr
, remaining
= total_length
; remaining
> 0;
281 addr
+= length
, remaining
-= length
) {
282 sbale
= zfcp_qdio_sbale_next(fsf_req
, sbtype
);
284 atomic_inc(&fsf_req
->adapter
->qdio_outb_full
);
285 zfcp_qdio_undo_sbals(fsf_req
);
289 /* new piece must not exceed next page boundary */
290 length
= min(remaining
,
291 (PAGE_SIZE
- ((unsigned long)addr
&
294 sbale
->length
= length
;
300 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
301 * @fsf_req: request to be processed
302 * @sbtype: SBALE flags
303 * @sg: scatter-gather list
304 * @max_sbals: upper bound for number of SBALs to be used
305 * Returns: number of bytes, or error (negativ)
307 int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req
*fsf_req
, unsigned long sbtype
,
308 struct scatterlist
*sg
, int max_sbals
)
310 struct qdio_buffer_element
*sbale
;
311 int retval
, bytes
= 0;
313 /* figure out last allowed SBAL */
314 zfcp_qdio_sbal_limit(fsf_req
, max_sbals
);
316 /* set storage-block type for this request */
317 sbale
= zfcp_qdio_sbale_req(fsf_req
);
318 sbale
->flags
|= sbtype
;
320 for (; sg
; sg
= sg_next(sg
)) {
321 retval
= zfcp_qdio_fill_sbals(fsf_req
, sbtype
, sg_virt(sg
),
328 /* assume that no other SBALEs are to follow in the same SBAL */
329 sbale
= zfcp_qdio_sbale_curr(fsf_req
);
330 sbale
->flags
|= SBAL_FLAGS_LAST_ENTRY
;
336 * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
337 * @fsf_req: pointer to struct zfcp_fsf_req
338 * Returns: 0 on success, error otherwise
340 int zfcp_qdio_send(struct zfcp_fsf_req
*fsf_req
)
342 struct zfcp_adapter
*adapter
= fsf_req
->adapter
;
343 struct zfcp_qdio_queue
*req_q
= &adapter
->req_q
;
344 int first
= fsf_req
->sbal_first
;
345 int count
= fsf_req
->sbal_number
;
346 int retval
, pci
, pci_batch
;
347 struct qdio_buffer_element
*sbale
;
349 /* acknowledgements for transferred buffers */
350 pci_batch
= adapter
->req_q_pci_batch
+ count
;
351 if (unlikely(pci_batch
>= ZFCP_QDIO_PCI_INTERVAL
)) {
352 pci_batch
%= ZFCP_QDIO_PCI_INTERVAL
;
353 pci
= first
+ count
- (pci_batch
+ 1);
354 pci
%= QDIO_MAX_BUFFERS_PER_Q
;
355 sbale
= zfcp_qdio_sbale(req_q
, pci
, 0);
356 sbale
->flags
|= SBAL_FLAGS0_PCI
;
359 retval
= do_QDIO(adapter
->ccw_device
, QDIO_FLAG_SYNC_OUTPUT
, 0, first
,
361 if (unlikely(retval
)) {
362 zfcp_qdio_zero_sbals(req_q
->sbal
, first
, count
);
366 /* account for transferred buffers */
367 atomic_sub(count
, &req_q
->count
);
368 req_q
->first
+= count
;
369 req_q
->first
%= QDIO_MAX_BUFFERS_PER_Q
;
370 adapter
->req_q_pci_batch
= pci_batch
;
375 * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
376 * @adapter: pointer to struct zfcp_adapter
377 * Returns: -ENOMEM on memory allocation error or return value from
380 int zfcp_qdio_allocate(struct zfcp_adapter
*adapter
)
382 struct qdio_initialize
*init_data
;
384 if (zfcp_qdio_buffers_enqueue(adapter
->req_q
.sbal
) ||
385 zfcp_qdio_buffers_enqueue(adapter
->resp_q
.sbal
))
388 init_data
= &adapter
->qdio_init_data
;
390 init_data
->cdev
= adapter
->ccw_device
;
391 init_data
->q_format
= QDIO_ZFCP_QFMT
;
392 memcpy(init_data
->adapter_name
, zfcp_get_busid_by_adapter(adapter
), 8);
393 ASCEBC(init_data
->adapter_name
, 8);
394 init_data
->qib_param_field_format
= 0;
395 init_data
->qib_param_field
= NULL
;
396 init_data
->input_slib_elements
= NULL
;
397 init_data
->output_slib_elements
= NULL
;
398 init_data
->no_input_qs
= 1;
399 init_data
->no_output_qs
= 1;
400 init_data
->input_handler
= zfcp_qdio_int_resp
;
401 init_data
->output_handler
= zfcp_qdio_int_req
;
402 init_data
->int_parm
= (unsigned long) adapter
;
403 init_data
->flags
= QDIO_INBOUND_0COPY_SBALS
|
404 QDIO_OUTBOUND_0COPY_SBALS
| QDIO_USE_OUTBOUND_PCIS
;
405 init_data
->input_sbal_addr_array
=
406 (void **) (adapter
->resp_q
.sbal
);
407 init_data
->output_sbal_addr_array
=
408 (void **) (adapter
->req_q
.sbal
);
410 return qdio_allocate(init_data
);
414 * zfcp_close_qdio - close qdio queues for an adapter
416 void zfcp_qdio_close(struct zfcp_adapter
*adapter
)
418 struct zfcp_qdio_queue
*req_q
;
421 if (!(atomic_read(&adapter
->status
) & ZFCP_STATUS_ADAPTER_QDIOUP
))
424 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
425 req_q
= &adapter
->req_q
;
426 spin_lock_bh(&adapter
->req_q_lock
);
427 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP
, &adapter
->status
);
428 spin_unlock_bh(&adapter
->req_q_lock
);
430 qdio_shutdown(adapter
->ccw_device
, QDIO_FLAG_CLEANUP_USING_CLEAR
);
432 /* cleanup used outbound sbals */
433 count
= atomic_read(&req_q
->count
);
434 if (count
< QDIO_MAX_BUFFERS_PER_Q
) {
435 first
= (req_q
->first
+ count
) % QDIO_MAX_BUFFERS_PER_Q
;
436 count
= QDIO_MAX_BUFFERS_PER_Q
- count
;
437 zfcp_qdio_zero_sbals(req_q
->sbal
, first
, count
);
440 atomic_set(&req_q
->count
, 0);
441 adapter
->req_q_pci_batch
= 0;
442 adapter
->resp_q
.first
= 0;
443 atomic_set(&adapter
->resp_q
.count
, 0);
447 * zfcp_qdio_open - prepare and initialize response queue
448 * @adapter: pointer to struct zfcp_adapter
449 * Returns: 0 on success, otherwise -EIO
451 int zfcp_qdio_open(struct zfcp_adapter
*adapter
)
453 struct qdio_buffer_element
*sbale
;
456 if (atomic_read(&adapter
->status
) & ZFCP_STATUS_ADAPTER_QDIOUP
)
459 if (qdio_establish(&adapter
->qdio_init_data
))
460 goto failed_establish
;
462 if (qdio_activate(adapter
->ccw_device
))
465 for (cc
= 0; cc
< QDIO_MAX_BUFFERS_PER_Q
; cc
++) {
466 sbale
= &(adapter
->resp_q
.sbal
[cc
]->element
[0]);
468 sbale
->flags
= SBAL_FLAGS_LAST_ENTRY
;
472 if (do_QDIO(adapter
->ccw_device
, QDIO_FLAG_SYNC_INPUT
, 0, 0,
473 QDIO_MAX_BUFFERS_PER_Q
))
476 /* set index of first avalable SBALS / number of available SBALS */
477 adapter
->req_q
.first
= 0;
478 atomic_set(&adapter
->req_q
.count
, QDIO_MAX_BUFFERS_PER_Q
);
479 adapter
->req_q_pci_batch
= 0;
484 qdio_shutdown(adapter
->ccw_device
, QDIO_FLAG_CLEANUP_USING_CLEAR
);
486 dev_err(&adapter
->ccw_device
->dev
,
487 "Setting up the QDIO connection to the FCP adapter failed\n");