ALSA: oxfw: add stream format quirk for SCS.1 models
[deliverable/linux.git] / drivers / net / ethernet / intel / i40evf / i40e_adminq.c
1 /*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
27 #include "i40e_status.h"
28 #include "i40e_type.h"
29 #include "i40e_register.h"
30 #include "i40e_adminq.h"
31 #include "i40e_prototype.h"
32
33 /**
34 * i40e_is_nvm_update_op - return true if this is an NVM update operation
35 * @desc: API request descriptor
36 **/
37 static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
38 {
39 return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
40 (desc->opcode == i40e_aqc_opc_nvm_update);
41 }
42
43 /**
44 * i40e_adminq_init_regs - Initialize AdminQ registers
45 * @hw: pointer to the hardware structure
46 *
47 * This assumes the alloc_asq and alloc_arq functions have already been called
48 **/
49 static void i40e_adminq_init_regs(struct i40e_hw *hw)
50 {
51 /* set head and tail registers in our local struct */
52 if (i40e_is_vf(hw)) {
53 hw->aq.asq.tail = I40E_VF_ATQT1;
54 hw->aq.asq.head = I40E_VF_ATQH1;
55 hw->aq.asq.len = I40E_VF_ATQLEN1;
56 hw->aq.asq.bal = I40E_VF_ATQBAL1;
57 hw->aq.asq.bah = I40E_VF_ATQBAH1;
58 hw->aq.arq.tail = I40E_VF_ARQT1;
59 hw->aq.arq.head = I40E_VF_ARQH1;
60 hw->aq.arq.len = I40E_VF_ARQLEN1;
61 hw->aq.arq.bal = I40E_VF_ARQBAL1;
62 hw->aq.arq.bah = I40E_VF_ARQBAH1;
63 }
64 }
65
66 /**
67 * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
68 * @hw: pointer to the hardware structure
69 **/
70 static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
71 {
72 i40e_status ret_code;
73
74 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
75 i40e_mem_atq_ring,
76 (hw->aq.num_asq_entries *
77 sizeof(struct i40e_aq_desc)),
78 I40E_ADMINQ_DESC_ALIGNMENT);
79 if (ret_code)
80 return ret_code;
81
82 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
83 (hw->aq.num_asq_entries *
84 sizeof(struct i40e_asq_cmd_details)));
85 if (ret_code) {
86 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
87 return ret_code;
88 }
89
90 return ret_code;
91 }
92
93 /**
94 * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
95 * @hw: pointer to the hardware structure
96 **/
97 static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
98 {
99 i40e_status ret_code;
100
101 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
102 i40e_mem_arq_ring,
103 (hw->aq.num_arq_entries *
104 sizeof(struct i40e_aq_desc)),
105 I40E_ADMINQ_DESC_ALIGNMENT);
106
107 return ret_code;
108 }
109
110 /**
111 * i40e_free_adminq_asq - Free Admin Queue send rings
112 * @hw: pointer to the hardware structure
113 *
114 * This assumes the posted send buffers have already been cleaned
115 * and de-allocated
116 **/
117 static void i40e_free_adminq_asq(struct i40e_hw *hw)
118 {
119 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
120 }
121
122 /**
123 * i40e_free_adminq_arq - Free Admin Queue receive rings
124 * @hw: pointer to the hardware structure
125 *
126 * This assumes the posted receive buffers have already been cleaned
127 * and de-allocated
128 **/
129 static void i40e_free_adminq_arq(struct i40e_hw *hw)
130 {
131 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
132 }
133
134 /**
135 * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
136 * @hw: pointer to the hardware structure
137 **/
138 static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
139 {
140 i40e_status ret_code;
141 struct i40e_aq_desc *desc;
142 struct i40e_dma_mem *bi;
143 int i;
144
145 /* We'll be allocating the buffer info memory first, then we can
146 * allocate the mapped buffers for the event processing
147 */
148
149 /* buffer_info structures do not need alignment */
150 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
151 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
152 if (ret_code)
153 goto alloc_arq_bufs;
154 hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
155
156 /* allocate the mapped buffers */
157 for (i = 0; i < hw->aq.num_arq_entries; i++) {
158 bi = &hw->aq.arq.r.arq_bi[i];
159 ret_code = i40e_allocate_dma_mem(hw, bi,
160 i40e_mem_arq_buf,
161 hw->aq.arq_buf_size,
162 I40E_ADMINQ_DESC_ALIGNMENT);
163 if (ret_code)
164 goto unwind_alloc_arq_bufs;
165
166 /* now configure the descriptors for use */
167 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
168
169 desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
170 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
171 desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
172 desc->opcode = 0;
173 /* This is in accordance with Admin queue design, there is no
174 * register for buffer size configuration
175 */
176 desc->datalen = cpu_to_le16((u16)bi->size);
177 desc->retval = 0;
178 desc->cookie_high = 0;
179 desc->cookie_low = 0;
180 desc->params.external.addr_high =
181 cpu_to_le32(upper_32_bits(bi->pa));
182 desc->params.external.addr_low =
183 cpu_to_le32(lower_32_bits(bi->pa));
184 desc->params.external.param0 = 0;
185 desc->params.external.param1 = 0;
186 }
187
188 alloc_arq_bufs:
189 return ret_code;
190
191 unwind_alloc_arq_bufs:
192 /* don't try to free the one that failed... */
193 i--;
194 for (; i >= 0; i--)
195 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
196 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
197
198 return ret_code;
199 }
200
201 /**
202 * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
203 * @hw: pointer to the hardware structure
204 **/
205 static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
206 {
207 i40e_status ret_code;
208 struct i40e_dma_mem *bi;
209 int i;
210
211 /* No mapped memory needed yet, just the buffer info structures */
212 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
213 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
214 if (ret_code)
215 goto alloc_asq_bufs;
216 hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
217
218 /* allocate the mapped buffers */
219 for (i = 0; i < hw->aq.num_asq_entries; i++) {
220 bi = &hw->aq.asq.r.asq_bi[i];
221 ret_code = i40e_allocate_dma_mem(hw, bi,
222 i40e_mem_asq_buf,
223 hw->aq.asq_buf_size,
224 I40E_ADMINQ_DESC_ALIGNMENT);
225 if (ret_code)
226 goto unwind_alloc_asq_bufs;
227 }
228 alloc_asq_bufs:
229 return ret_code;
230
231 unwind_alloc_asq_bufs:
232 /* don't try to free the one that failed... */
233 i--;
234 for (; i >= 0; i--)
235 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
236 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
237
238 return ret_code;
239 }
240
241 /**
242 * i40e_free_arq_bufs - Free receive queue buffer info elements
243 * @hw: pointer to the hardware structure
244 **/
245 static void i40e_free_arq_bufs(struct i40e_hw *hw)
246 {
247 int i;
248
249 /* free descriptors */
250 for (i = 0; i < hw->aq.num_arq_entries; i++)
251 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
252
253 /* free the descriptor memory */
254 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
255
256 /* free the dma header */
257 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
258 }
259
260 /**
261 * i40e_free_asq_bufs - Free send queue buffer info elements
262 * @hw: pointer to the hardware structure
263 **/
264 static void i40e_free_asq_bufs(struct i40e_hw *hw)
265 {
266 int i;
267
268 /* only unmap if the address is non-NULL */
269 for (i = 0; i < hw->aq.num_asq_entries; i++)
270 if (hw->aq.asq.r.asq_bi[i].pa)
271 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
272
273 /* free the buffer info list */
274 i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
275
276 /* free the descriptor memory */
277 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
278
279 /* free the dma header */
280 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
281 }
282
283 /**
284 * i40e_config_asq_regs - configure ASQ registers
285 * @hw: pointer to the hardware structure
286 *
287 * Configure base address and length registers for the transmit queue
288 **/
289 static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
290 {
291 i40e_status ret_code = 0;
292 u32 reg = 0;
293
294 /* Clear Head and Tail */
295 wr32(hw, hw->aq.asq.head, 0);
296 wr32(hw, hw->aq.asq.tail, 0);
297
298 /* set starting point */
299 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
300 I40E_VF_ATQLEN1_ATQENABLE_MASK));
301 wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
302 wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
303
304 /* Check one register to verify that config was applied */
305 reg = rd32(hw, hw->aq.asq.bal);
306 if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
307 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
308
309 return ret_code;
310 }
311
312 /**
313 * i40e_config_arq_regs - ARQ register configuration
314 * @hw: pointer to the hardware structure
315 *
316 * Configure base address and length registers for the receive (event queue)
317 **/
318 static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
319 {
320 i40e_status ret_code = 0;
321 u32 reg = 0;
322
323 /* Clear Head and Tail */
324 wr32(hw, hw->aq.arq.head, 0);
325 wr32(hw, hw->aq.arq.tail, 0);
326
327 /* set starting point */
328 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
329 I40E_VF_ARQLEN1_ARQENABLE_MASK));
330 wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
331 wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
332
333 /* Update tail in the HW to post pre-allocated buffers */
334 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
335
336 /* Check one register to verify that config was applied */
337 reg = rd32(hw, hw->aq.arq.bal);
338 if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
339 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
340
341 return ret_code;
342 }
343
344 /**
345 * i40e_init_asq - main initialization routine for ASQ
346 * @hw: pointer to the hardware structure
347 *
348 * This is the main initialization routine for the Admin Send Queue
349 * Prior to calling this function, drivers *MUST* set the following fields
350 * in the hw->aq structure:
351 * - hw->aq.num_asq_entries
352 * - hw->aq.arq_buf_size
353 *
354 * Do *NOT* hold the lock when calling this as the memory allocation routines
355 * called are not going to be atomic context safe
356 **/
357 static i40e_status i40e_init_asq(struct i40e_hw *hw)
358 {
359 i40e_status ret_code = 0;
360
361 if (hw->aq.asq.count > 0) {
362 /* queue already initialized */
363 ret_code = I40E_ERR_NOT_READY;
364 goto init_adminq_exit;
365 }
366
367 /* verify input for valid configuration */
368 if ((hw->aq.num_asq_entries == 0) ||
369 (hw->aq.asq_buf_size == 0)) {
370 ret_code = I40E_ERR_CONFIG;
371 goto init_adminq_exit;
372 }
373
374 hw->aq.asq.next_to_use = 0;
375 hw->aq.asq.next_to_clean = 0;
376
377 /* allocate the ring memory */
378 ret_code = i40e_alloc_adminq_asq_ring(hw);
379 if (ret_code)
380 goto init_adminq_exit;
381
382 /* allocate buffers in the rings */
383 ret_code = i40e_alloc_asq_bufs(hw);
384 if (ret_code)
385 goto init_adminq_free_rings;
386
387 /* initialize base registers */
388 ret_code = i40e_config_asq_regs(hw);
389 if (ret_code)
390 goto init_adminq_free_rings;
391
392 /* success! */
393 hw->aq.asq.count = hw->aq.num_asq_entries;
394 goto init_adminq_exit;
395
396 init_adminq_free_rings:
397 i40e_free_adminq_asq(hw);
398
399 init_adminq_exit:
400 return ret_code;
401 }
402
403 /**
404 * i40e_init_arq - initialize ARQ
405 * @hw: pointer to the hardware structure
406 *
407 * The main initialization routine for the Admin Receive (Event) Queue.
408 * Prior to calling this function, drivers *MUST* set the following fields
409 * in the hw->aq structure:
410 * - hw->aq.num_asq_entries
411 * - hw->aq.arq_buf_size
412 *
413 * Do *NOT* hold the lock when calling this as the memory allocation routines
414 * called are not going to be atomic context safe
415 **/
416 static i40e_status i40e_init_arq(struct i40e_hw *hw)
417 {
418 i40e_status ret_code = 0;
419
420 if (hw->aq.arq.count > 0) {
421 /* queue already initialized */
422 ret_code = I40E_ERR_NOT_READY;
423 goto init_adminq_exit;
424 }
425
426 /* verify input for valid configuration */
427 if ((hw->aq.num_arq_entries == 0) ||
428 (hw->aq.arq_buf_size == 0)) {
429 ret_code = I40E_ERR_CONFIG;
430 goto init_adminq_exit;
431 }
432
433 hw->aq.arq.next_to_use = 0;
434 hw->aq.arq.next_to_clean = 0;
435
436 /* allocate the ring memory */
437 ret_code = i40e_alloc_adminq_arq_ring(hw);
438 if (ret_code)
439 goto init_adminq_exit;
440
441 /* allocate buffers in the rings */
442 ret_code = i40e_alloc_arq_bufs(hw);
443 if (ret_code)
444 goto init_adminq_free_rings;
445
446 /* initialize base registers */
447 ret_code = i40e_config_arq_regs(hw);
448 if (ret_code)
449 goto init_adminq_free_rings;
450
451 /* success! */
452 hw->aq.arq.count = hw->aq.num_arq_entries;
453 goto init_adminq_exit;
454
455 init_adminq_free_rings:
456 i40e_free_adminq_arq(hw);
457
458 init_adminq_exit:
459 return ret_code;
460 }
461
462 /**
463 * i40e_shutdown_asq - shutdown the ASQ
464 * @hw: pointer to the hardware structure
465 *
466 * The main shutdown routine for the Admin Send Queue
467 **/
468 static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
469 {
470 i40e_status ret_code = 0;
471
472 mutex_lock(&hw->aq.asq_mutex);
473
474 if (hw->aq.asq.count == 0) {
475 ret_code = I40E_ERR_NOT_READY;
476 goto shutdown_asq_out;
477 }
478
479 /* Stop firmware AdminQ processing */
480 wr32(hw, hw->aq.asq.head, 0);
481 wr32(hw, hw->aq.asq.tail, 0);
482 wr32(hw, hw->aq.asq.len, 0);
483 wr32(hw, hw->aq.asq.bal, 0);
484 wr32(hw, hw->aq.asq.bah, 0);
485
486 hw->aq.asq.count = 0; /* to indicate uninitialized queue */
487
488 /* free ring buffers */
489 i40e_free_asq_bufs(hw);
490
491 shutdown_asq_out:
492 mutex_unlock(&hw->aq.asq_mutex);
493 return ret_code;
494 }
495
496 /**
497 * i40e_shutdown_arq - shutdown ARQ
498 * @hw: pointer to the hardware structure
499 *
500 * The main shutdown routine for the Admin Receive Queue
501 **/
502 static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
503 {
504 i40e_status ret_code = 0;
505
506 mutex_lock(&hw->aq.arq_mutex);
507
508 if (hw->aq.arq.count == 0) {
509 ret_code = I40E_ERR_NOT_READY;
510 goto shutdown_arq_out;
511 }
512
513 /* Stop firmware AdminQ processing */
514 wr32(hw, hw->aq.arq.head, 0);
515 wr32(hw, hw->aq.arq.tail, 0);
516 wr32(hw, hw->aq.arq.len, 0);
517 wr32(hw, hw->aq.arq.bal, 0);
518 wr32(hw, hw->aq.arq.bah, 0);
519
520 hw->aq.arq.count = 0; /* to indicate uninitialized queue */
521
522 /* free ring buffers */
523 i40e_free_arq_bufs(hw);
524
525 shutdown_arq_out:
526 mutex_unlock(&hw->aq.arq_mutex);
527 return ret_code;
528 }
529
530 /**
531 * i40evf_init_adminq - main initialization routine for Admin Queue
532 * @hw: pointer to the hardware structure
533 *
534 * Prior to calling this function, drivers *MUST* set the following fields
535 * in the hw->aq structure:
536 * - hw->aq.num_asq_entries
537 * - hw->aq.num_arq_entries
538 * - hw->aq.arq_buf_size
539 * - hw->aq.asq_buf_size
540 **/
541 i40e_status i40evf_init_adminq(struct i40e_hw *hw)
542 {
543 i40e_status ret_code;
544
545 /* verify input for valid configuration */
546 if ((hw->aq.num_arq_entries == 0) ||
547 (hw->aq.num_asq_entries == 0) ||
548 (hw->aq.arq_buf_size == 0) ||
549 (hw->aq.asq_buf_size == 0)) {
550 ret_code = I40E_ERR_CONFIG;
551 goto init_adminq_exit;
552 }
553
554 /* initialize locks */
555 mutex_init(&hw->aq.asq_mutex);
556 mutex_init(&hw->aq.arq_mutex);
557
558 /* Set up register offsets */
559 i40e_adminq_init_regs(hw);
560
561 /* setup ASQ command write back timeout */
562 hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
563
564 /* allocate the ASQ */
565 ret_code = i40e_init_asq(hw);
566 if (ret_code)
567 goto init_adminq_destroy_locks;
568
569 /* allocate the ARQ */
570 ret_code = i40e_init_arq(hw);
571 if (ret_code)
572 goto init_adminq_free_asq;
573
574 /* success! */
575 goto init_adminq_exit;
576
577 init_adminq_free_asq:
578 i40e_shutdown_asq(hw);
579 init_adminq_destroy_locks:
580
581 init_adminq_exit:
582 return ret_code;
583 }
584
585 /**
586 * i40evf_shutdown_adminq - shutdown routine for the Admin Queue
587 * @hw: pointer to the hardware structure
588 **/
589 i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw)
590 {
591 i40e_status ret_code = 0;
592
593 if (i40evf_check_asq_alive(hw))
594 i40evf_aq_queue_shutdown(hw, true);
595
596 i40e_shutdown_asq(hw);
597 i40e_shutdown_arq(hw);
598
599 /* destroy the locks */
600
601 if (hw->nvm_buff.va)
602 i40e_free_virt_mem(hw, &hw->nvm_buff);
603
604 return ret_code;
605 }
606
607 /**
608 * i40e_clean_asq - cleans Admin send queue
609 * @hw: pointer to the hardware structure
610 *
611 * returns the number of free desc
612 **/
613 static u16 i40e_clean_asq(struct i40e_hw *hw)
614 {
615 struct i40e_adminq_ring *asq = &(hw->aq.asq);
616 struct i40e_asq_cmd_details *details;
617 u16 ntc = asq->next_to_clean;
618 struct i40e_aq_desc desc_cb;
619 struct i40e_aq_desc *desc;
620
621 desc = I40E_ADMINQ_DESC(*asq, ntc);
622 details = I40E_ADMINQ_DETAILS(*asq, ntc);
623 while (rd32(hw, hw->aq.asq.head) != ntc) {
624 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
625 "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
626
627 if (details->callback) {
628 I40E_ADMINQ_CALLBACK cb_func =
629 (I40E_ADMINQ_CALLBACK)details->callback;
630 desc_cb = *desc;
631 cb_func(hw, &desc_cb);
632 }
633 memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
634 memset((void *)details, 0,
635 sizeof(struct i40e_asq_cmd_details));
636 ntc++;
637 if (ntc == asq->count)
638 ntc = 0;
639 desc = I40E_ADMINQ_DESC(*asq, ntc);
640 details = I40E_ADMINQ_DETAILS(*asq, ntc);
641 }
642
643 asq->next_to_clean = ntc;
644
645 return I40E_DESC_UNUSED(asq);
646 }
647
648 /**
649 * i40evf_asq_done - check if FW has processed the Admin Send Queue
650 * @hw: pointer to the hw struct
651 *
652 * Returns true if the firmware has processed all descriptors on the
653 * admin send queue. Returns false if there are still requests pending.
654 **/
655 bool i40evf_asq_done(struct i40e_hw *hw)
656 {
657 /* AQ designers suggest use of head for better
658 * timing reliability than DD bit
659 */
660 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
661
662 }
663
664 /**
665 * i40evf_asq_send_command - send command to Admin Queue
666 * @hw: pointer to the hw struct
667 * @desc: prefilled descriptor describing the command (non DMA mem)
668 * @buff: buffer to use for indirect commands
669 * @buff_size: size of buffer for indirect commands
670 * @cmd_details: pointer to command details structure
671 *
672 * This is the main send command driver routine for the Admin Queue send
673 * queue. It runs the queue, cleans the queue, etc
674 **/
675 i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
676 struct i40e_aq_desc *desc,
677 void *buff, /* can be NULL */
678 u16 buff_size,
679 struct i40e_asq_cmd_details *cmd_details)
680 {
681 i40e_status status = 0;
682 struct i40e_dma_mem *dma_buff = NULL;
683 struct i40e_asq_cmd_details *details;
684 struct i40e_aq_desc *desc_on_ring;
685 bool cmd_completed = false;
686 u16 retval = 0;
687 u32 val = 0;
688
689 mutex_lock(&hw->aq.asq_mutex);
690
691 if (hw->aq.asq.count == 0) {
692 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
693 "AQTX: Admin queue not initialized.\n");
694 status = I40E_ERR_QUEUE_EMPTY;
695 goto asq_send_command_error;
696 }
697
698 hw->aq.asq_last_status = I40E_AQ_RC_OK;
699
700 val = rd32(hw, hw->aq.asq.head);
701 if (val >= hw->aq.num_asq_entries) {
702 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
703 "AQTX: head overrun at %d\n", val);
704 status = I40E_ERR_QUEUE_EMPTY;
705 goto asq_send_command_error;
706 }
707
708 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
709 if (cmd_details) {
710 *details = *cmd_details;
711
712 /* If the cmd_details are defined copy the cookie. The
713 * cpu_to_le32 is not needed here because the data is ignored
714 * by the FW, only used by the driver
715 */
716 if (details->cookie) {
717 desc->cookie_high =
718 cpu_to_le32(upper_32_bits(details->cookie));
719 desc->cookie_low =
720 cpu_to_le32(lower_32_bits(details->cookie));
721 }
722 } else {
723 memset(details, 0, sizeof(struct i40e_asq_cmd_details));
724 }
725
726 /* clear requested flags and then set additional flags if defined */
727 desc->flags &= ~cpu_to_le16(details->flags_dis);
728 desc->flags |= cpu_to_le16(details->flags_ena);
729
730 if (buff_size > hw->aq.asq_buf_size) {
731 i40e_debug(hw,
732 I40E_DEBUG_AQ_MESSAGE,
733 "AQTX: Invalid buffer size: %d.\n",
734 buff_size);
735 status = I40E_ERR_INVALID_SIZE;
736 goto asq_send_command_error;
737 }
738
739 if (details->postpone && !details->async) {
740 i40e_debug(hw,
741 I40E_DEBUG_AQ_MESSAGE,
742 "AQTX: Async flag not set along with postpone flag");
743 status = I40E_ERR_PARAM;
744 goto asq_send_command_error;
745 }
746
747 /* call clean and check queue available function to reclaim the
748 * descriptors that were processed by FW, the function returns the
749 * number of desc available
750 */
751 /* the clean function called here could be called in a separate thread
752 * in case of asynchronous completions
753 */
754 if (i40e_clean_asq(hw) == 0) {
755 i40e_debug(hw,
756 I40E_DEBUG_AQ_MESSAGE,
757 "AQTX: Error queue is full.\n");
758 status = I40E_ERR_ADMIN_QUEUE_FULL;
759 goto asq_send_command_error;
760 }
761
762 /* initialize the temp desc pointer with the right desc */
763 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
764
765 /* if the desc is available copy the temp desc to the right place */
766 *desc_on_ring = *desc;
767
768 /* if buff is not NULL assume indirect command */
769 if (buff != NULL) {
770 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
771 /* copy the user buff into the respective DMA buff */
772 memcpy(dma_buff->va, buff, buff_size);
773 desc_on_ring->datalen = cpu_to_le16(buff_size);
774
775 /* Update the address values in the desc with the pa value
776 * for respective buffer
777 */
778 desc_on_ring->params.external.addr_high =
779 cpu_to_le32(upper_32_bits(dma_buff->pa));
780 desc_on_ring->params.external.addr_low =
781 cpu_to_le32(lower_32_bits(dma_buff->pa));
782 }
783
784 /* bump the tail */
785 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
786 i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
787 buff, buff_size);
788 (hw->aq.asq.next_to_use)++;
789 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
790 hw->aq.asq.next_to_use = 0;
791 if (!details->postpone)
792 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
793
794 /* if cmd_details are not defined or async flag is not set,
795 * we need to wait for desc write back
796 */
797 if (!details->async && !details->postpone) {
798 u32 total_delay = 0;
799
800 do {
801 /* AQ designers suggest use of head for better
802 * timing reliability than DD bit
803 */
804 if (i40evf_asq_done(hw))
805 break;
806 usleep_range(1000, 2000);
807 total_delay++;
808 } while (total_delay < hw->aq.asq_cmd_timeout);
809 }
810
811 /* if ready, copy the desc back to temp */
812 if (i40evf_asq_done(hw)) {
813 *desc = *desc_on_ring;
814 if (buff != NULL)
815 memcpy(buff, dma_buff->va, buff_size);
816 retval = le16_to_cpu(desc->retval);
817 if (retval != 0) {
818 i40e_debug(hw,
819 I40E_DEBUG_AQ_MESSAGE,
820 "AQTX: Command completed with error 0x%X.\n",
821 retval);
822
823 /* strip off FW internal code */
824 retval &= 0xff;
825 }
826 cmd_completed = true;
827 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
828 status = 0;
829 else
830 status = I40E_ERR_ADMIN_QUEUE_ERROR;
831 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
832 }
833
834 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
835 "AQTX: desc and buffer writeback:\n");
836 i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff,
837 buff_size);
838
839 /* save writeback aq if requested */
840 if (details->wb_desc)
841 *details->wb_desc = *desc_on_ring;
842
843 /* update the error if time out occurred */
844 if ((!cmd_completed) &&
845 (!details->async && !details->postpone)) {
846 i40e_debug(hw,
847 I40E_DEBUG_AQ_MESSAGE,
848 "AQTX: Writeback timeout.\n");
849 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
850 }
851
852 asq_send_command_error:
853 mutex_unlock(&hw->aq.asq_mutex);
854 return status;
855 }
856
857 /**
858 * i40evf_fill_default_direct_cmd_desc - AQ descriptor helper function
859 * @desc: pointer to the temp descriptor (non DMA mem)
860 * @opcode: the opcode can be used to decide which flags to turn off or on
861 *
862 * Fill the desc with default values
863 **/
864 void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
865 u16 opcode)
866 {
867 /* zero out the desc */
868 memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
869 desc->opcode = cpu_to_le16(opcode);
870 desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
871 }
872
873 /**
874 * i40evf_clean_arq_element
875 * @hw: pointer to the hw struct
876 * @e: event info from the receive descriptor, includes any buffers
877 * @pending: number of events that could be left to process
878 *
879 * This function cleans one Admin Receive Queue element and returns
880 * the contents through e. It can also return how many events are
881 * left to process through 'pending'
882 **/
883 i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
884 struct i40e_arq_event_info *e,
885 u16 *pending)
886 {
887 i40e_status ret_code = 0;
888 u16 ntc = hw->aq.arq.next_to_clean;
889 struct i40e_aq_desc *desc;
890 struct i40e_dma_mem *bi;
891 u16 desc_idx;
892 u16 datalen;
893 u16 flags;
894 u16 ntu;
895
896 /* take the lock before we start messing with the ring */
897 mutex_lock(&hw->aq.arq_mutex);
898
899 if (hw->aq.arq.count == 0) {
900 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
901 "AQRX: Admin queue not initialized.\n");
902 ret_code = I40E_ERR_QUEUE_EMPTY;
903 goto clean_arq_element_err;
904 }
905
906 /* set next_to_use to head */
907 ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
908 if (ntu == ntc) {
909 /* nothing to do - shouldn't need to update ring's values */
910 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
911 goto clean_arq_element_out;
912 }
913
914 /* now clean the next descriptor */
915 desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
916 desc_idx = ntc;
917
918 flags = le16_to_cpu(desc->flags);
919 if (flags & I40E_AQ_FLAG_ERR) {
920 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
921 hw->aq.arq_last_status =
922 (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
923 i40e_debug(hw,
924 I40E_DEBUG_AQ_MESSAGE,
925 "AQRX: Event received with error 0x%X.\n",
926 hw->aq.arq_last_status);
927 }
928
929 e->desc = *desc;
930 datalen = le16_to_cpu(desc->datalen);
931 e->msg_len = min(datalen, e->buf_len);
932 if (e->msg_buf != NULL && (e->msg_len != 0))
933 memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
934 e->msg_len);
935
936 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
937 i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
938 hw->aq.arq_buf_size);
939
940 /* Restore the original datalen and buffer address in the desc,
941 * FW updates datalen to indicate the event message
942 * size
943 */
944 bi = &hw->aq.arq.r.arq_bi[ntc];
945 memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
946
947 desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
948 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
949 desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
950 desc->datalen = cpu_to_le16((u16)bi->size);
951 desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
952 desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
953
954 /* set tail = the last cleaned desc index. */
955 wr32(hw, hw->aq.arq.tail, ntc);
956 /* ntc is updated to tail + 1 */
957 ntc++;
958 if (ntc == hw->aq.num_arq_entries)
959 ntc = 0;
960 hw->aq.arq.next_to_clean = ntc;
961 hw->aq.arq.next_to_use = ntu;
962
963 clean_arq_element_out:
964 /* Set pending if needed, unlock and return */
965 if (pending != NULL)
966 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
967
968 clean_arq_element_err:
969 mutex_unlock(&hw->aq.arq_mutex);
970
971 return ret_code;
972 }
973
974 void i40evf_resume_aq(struct i40e_hw *hw)
975 {
976 /* Registers are reset after PF reset */
977 hw->aq.asq.next_to_use = 0;
978 hw->aq.asq.next_to_clean = 0;
979
980 i40e_config_asq_regs(hw);
981
982 hw->aq.arq.next_to_use = 0;
983 hw->aq.arq.next_to_clean = 0;
984
985 i40e_config_arq_regs(hw);
986 }
This page took 0.066464 seconds and 5 git commands to generate.