Merge remote-tracking branch 'regulator/topic/da9063' into regulator-next
[deliverable/linux.git] / drivers / net / ethernet / intel / i40e / i40e_adminq.c
CommitLineData
56a62fc8
JB
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#include "i40e_status.h"
29#include "i40e_type.h"
30#include "i40e_register.h"
31#include "i40e_adminq.h"
32#include "i40e_prototype.h"
33
34/**
35 * i40e_adminq_init_regs - Initialize AdminQ registers
36 * @hw: pointer to the hardware structure
37 *
38 * This assumes the alloc_asq and alloc_arq functions have already been called
39 **/
40static void i40e_adminq_init_regs(struct i40e_hw *hw)
41{
42 /* set head and tail registers in our local struct */
43 if (hw->mac.type == I40E_MAC_VF) {
44 hw->aq.asq.tail = I40E_VF_ATQT1;
45 hw->aq.asq.head = I40E_VF_ATQH1;
46 hw->aq.arq.tail = I40E_VF_ARQT1;
47 hw->aq.arq.head = I40E_VF_ARQH1;
48 } else {
49 hw->aq.asq.tail = I40E_PF_ATQT;
50 hw->aq.asq.head = I40E_PF_ATQH;
51 hw->aq.arq.tail = I40E_PF_ARQT;
52 hw->aq.arq.head = I40E_PF_ARQH;
53 }
54}
55
56/**
57 * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
58 * @hw: pointer to the hardware structure
59 **/
60static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
61{
62 i40e_status ret_code;
63 struct i40e_virt_mem mem;
64
65 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq_mem,
66 i40e_mem_atq_ring,
67 (hw->aq.num_asq_entries *
68 sizeof(struct i40e_aq_desc)),
69 I40E_ADMINQ_DESC_ALIGNMENT);
70 if (ret_code)
71 return ret_code;
72
73 hw->aq.asq.desc = hw->aq.asq_mem.va;
74 hw->aq.asq.dma_addr = hw->aq.asq_mem.pa;
75
76 ret_code = i40e_allocate_virt_mem(hw, &mem,
77 (hw->aq.num_asq_entries *
78 sizeof(struct i40e_asq_cmd_details)));
79 if (ret_code) {
80 i40e_free_dma_mem(hw, &hw->aq.asq_mem);
81 hw->aq.asq_mem.va = NULL;
82 hw->aq.asq_mem.pa = 0;
83 return ret_code;
84 }
85
86 hw->aq.asq.details = mem.va;
87
88 return ret_code;
89}
90
91/**
92 * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
93 * @hw: pointer to the hardware structure
94 **/
95static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
96{
97 i40e_status ret_code;
98
99 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq_mem,
100 i40e_mem_arq_ring,
101 (hw->aq.num_arq_entries *
102 sizeof(struct i40e_aq_desc)),
103 I40E_ADMINQ_DESC_ALIGNMENT);
104 if (ret_code)
105 return ret_code;
106
107 hw->aq.arq.desc = hw->aq.arq_mem.va;
108 hw->aq.arq.dma_addr = hw->aq.arq_mem.pa;
109
110 return ret_code;
111}
112
113/**
114 * i40e_free_adminq_asq - Free Admin Queue send rings
115 * @hw: pointer to the hardware structure
116 *
117 * This assumes the posted send buffers have already been cleaned
118 * and de-allocated
119 **/
120static void i40e_free_adminq_asq(struct i40e_hw *hw)
121{
122 struct i40e_virt_mem mem;
123
124 i40e_free_dma_mem(hw, &hw->aq.asq_mem);
125 hw->aq.asq_mem.va = NULL;
126 hw->aq.asq_mem.pa = 0;
127 mem.va = hw->aq.asq.details;
128 i40e_free_virt_mem(hw, &mem);
129 hw->aq.asq.details = NULL;
130}
131
132/**
133 * i40e_free_adminq_arq - Free Admin Queue receive rings
134 * @hw: pointer to the hardware structure
135 *
136 * This assumes the posted receive buffers have already been cleaned
137 * and de-allocated
138 **/
139static void i40e_free_adminq_arq(struct i40e_hw *hw)
140{
141 i40e_free_dma_mem(hw, &hw->aq.arq_mem);
142 hw->aq.arq_mem.va = NULL;
143 hw->aq.arq_mem.pa = 0;
144}
145
146/**
147 * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
148 * @hw: pointer to the hardware structure
149 **/
150static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
151{
152 i40e_status ret_code;
153 struct i40e_aq_desc *desc;
154 struct i40e_virt_mem mem;
155 struct i40e_dma_mem *bi;
156 int i;
157
158 /* We'll be allocating the buffer info memory first, then we can
159 * allocate the mapped buffers for the event processing
160 */
161
162 /* buffer_info structures do not need alignment */
163 ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_arq_entries *
164 sizeof(struct i40e_dma_mem)));
165 if (ret_code)
166 goto alloc_arq_bufs;
167 hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)mem.va;
168
169 /* allocate the mapped buffers */
170 for (i = 0; i < hw->aq.num_arq_entries; i++) {
171 bi = &hw->aq.arq.r.arq_bi[i];
172 ret_code = i40e_allocate_dma_mem(hw, bi,
173 i40e_mem_arq_buf,
174 hw->aq.arq_buf_size,
175 I40E_ADMINQ_DESC_ALIGNMENT);
176 if (ret_code)
177 goto unwind_alloc_arq_bufs;
178
179 /* now configure the descriptors for use */
180 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
181
182 desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
183 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
184 desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
185 desc->opcode = 0;
186 /* This is in accordance with Admin queue design, there is no
187 * register for buffer size configuration
188 */
189 desc->datalen = cpu_to_le16((u16)bi->size);
190 desc->retval = 0;
191 desc->cookie_high = 0;
192 desc->cookie_low = 0;
193 desc->params.external.addr_high =
194 cpu_to_le32(upper_32_bits(bi->pa));
195 desc->params.external.addr_low =
196 cpu_to_le32(lower_32_bits(bi->pa));
197 desc->params.external.param0 = 0;
198 desc->params.external.param1 = 0;
199 }
200
201alloc_arq_bufs:
202 return ret_code;
203
204unwind_alloc_arq_bufs:
205 /* don't try to free the one that failed... */
206 i--;
207 for (; i >= 0; i--)
208 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
209 mem.va = hw->aq.arq.r.arq_bi;
210 i40e_free_virt_mem(hw, &mem);
211
212 return ret_code;
213}
214
215/**
216 * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
217 * @hw: pointer to the hardware structure
218 **/
219static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
220{
221 i40e_status ret_code;
222 struct i40e_virt_mem mem;
223 struct i40e_dma_mem *bi;
224 int i;
225
226 /* No mapped memory needed yet, just the buffer info structures */
227 ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_asq_entries *
228 sizeof(struct i40e_dma_mem)));
229 if (ret_code)
230 goto alloc_asq_bufs;
231 hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)mem.va;
232
233 /* allocate the mapped buffers */
234 for (i = 0; i < hw->aq.num_asq_entries; i++) {
235 bi = &hw->aq.asq.r.asq_bi[i];
236 ret_code = i40e_allocate_dma_mem(hw, bi,
237 i40e_mem_asq_buf,
238 hw->aq.asq_buf_size,
239 I40E_ADMINQ_DESC_ALIGNMENT);
240 if (ret_code)
241 goto unwind_alloc_asq_bufs;
242 }
243alloc_asq_bufs:
244 return ret_code;
245
246unwind_alloc_asq_bufs:
247 /* don't try to free the one that failed... */
248 i--;
249 for (; i >= 0; i--)
250 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
251 mem.va = hw->aq.asq.r.asq_bi;
252 i40e_free_virt_mem(hw, &mem);
253
254 return ret_code;
255}
256
257/**
258 * i40e_free_arq_bufs - Free receive queue buffer info elements
259 * @hw: pointer to the hardware structure
260 **/
261static void i40e_free_arq_bufs(struct i40e_hw *hw)
262{
263 struct i40e_virt_mem mem;
264 int i;
265
266 for (i = 0; i < hw->aq.num_arq_entries; i++)
267 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
268
269 mem.va = hw->aq.arq.r.arq_bi;
270 i40e_free_virt_mem(hw, &mem);
271}
272
273/**
274 * i40e_free_asq_bufs - Free send queue buffer info elements
275 * @hw: pointer to the hardware structure
276 **/
277static void i40e_free_asq_bufs(struct i40e_hw *hw)
278{
279 struct i40e_virt_mem mem;
280 int i;
281
282 /* only unmap if the address is non-NULL */
283 for (i = 0; i < hw->aq.num_asq_entries; i++)
284 if (hw->aq.asq.r.asq_bi[i].pa)
285 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
286
287 /* now free the buffer info list */
288 mem.va = hw->aq.asq.r.asq_bi;
289 i40e_free_virt_mem(hw, &mem);
290}
291
292/**
293 * i40e_config_asq_regs - configure ASQ registers
294 * @hw: pointer to the hardware structure
295 *
296 * Configure base address and length registers for the transmit queue
297 **/
298static void i40e_config_asq_regs(struct i40e_hw *hw)
299{
300 if (hw->mac.type == I40E_MAC_VF) {
301 /* configure the transmit queue */
302 wr32(hw, I40E_VF_ATQBAH1, upper_32_bits(hw->aq.asq.dma_addr));
303 wr32(hw, I40E_VF_ATQBAL1, lower_32_bits(hw->aq.asq.dma_addr));
304 wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
305 I40E_VF_ATQLEN1_ATQENABLE_MASK));
306 } else {
307 /* configure the transmit queue */
308 wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.dma_addr));
309 wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.dma_addr));
310 wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
311 I40E_PF_ATQLEN_ATQENABLE_MASK));
312 }
313}
314
315/**
316 * i40e_config_arq_regs - ARQ register configuration
317 * @hw: pointer to the hardware structure
318 *
319 * Configure base address and length registers for the receive (event queue)
320 **/
321static void i40e_config_arq_regs(struct i40e_hw *hw)
322{
323 if (hw->mac.type == I40E_MAC_VF) {
324 /* configure the receive queue */
325 wr32(hw, I40E_VF_ARQBAH1, upper_32_bits(hw->aq.arq.dma_addr));
326 wr32(hw, I40E_VF_ARQBAL1, lower_32_bits(hw->aq.arq.dma_addr));
327 wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
328 I40E_VF_ARQLEN1_ARQENABLE_MASK));
329 } else {
330 /* configure the receive queue */
331 wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.dma_addr));
332 wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.dma_addr));
333 wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
334 I40E_PF_ARQLEN_ARQENABLE_MASK));
335 }
336
337 /* Update tail in the HW to post pre-allocated buffers */
338 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
339}
340
341/**
342 * i40e_init_asq - main initialization routine for ASQ
343 * @hw: pointer to the hardware structure
344 *
345 * This is the main initialization routine for the Admin Send Queue
346 * Prior to calling this function, drivers *MUST* set the following fields
347 * in the hw->aq structure:
348 * - hw->aq.num_asq_entries
349 * - hw->aq.arq_buf_size
350 *
351 * Do *NOT* hold the lock when calling this as the memory allocation routines
352 * called are not going to be atomic context safe
353 **/
354static i40e_status i40e_init_asq(struct i40e_hw *hw)
355{
356 i40e_status ret_code = 0;
357
358 if (hw->aq.asq.count > 0) {
359 /* queue already initialized */
360 ret_code = I40E_ERR_NOT_READY;
361 goto init_adminq_exit;
362 }
363
364 /* verify input for valid configuration */
365 if ((hw->aq.num_asq_entries == 0) ||
366 (hw->aq.asq_buf_size == 0)) {
367 ret_code = I40E_ERR_CONFIG;
368 goto init_adminq_exit;
369 }
370
371 hw->aq.asq.next_to_use = 0;
372 hw->aq.asq.next_to_clean = 0;
373 hw->aq.asq.count = hw->aq.num_asq_entries;
374
375 /* allocate the ring memory */
376 ret_code = i40e_alloc_adminq_asq_ring(hw);
377 if (ret_code)
378 goto init_adminq_exit;
379
380 /* allocate buffers in the rings */
381 ret_code = i40e_alloc_asq_bufs(hw);
382 if (ret_code)
383 goto init_adminq_free_rings;
384
385 /* initialize base registers */
386 i40e_config_asq_regs(hw);
387
388 /* success! */
389 goto init_adminq_exit;
390
391init_adminq_free_rings:
392 i40e_free_adminq_asq(hw);
393
394init_adminq_exit:
395 return ret_code;
396}
397
398/**
399 * i40e_init_arq - initialize ARQ
400 * @hw: pointer to the hardware structure
401 *
402 * The main initialization routine for the Admin Receive (Event) Queue.
403 * Prior to calling this function, drivers *MUST* set the following fields
404 * in the hw->aq structure:
405 * - hw->aq.num_asq_entries
406 * - hw->aq.arq_buf_size
407 *
408 * Do *NOT* hold the lock when calling this as the memory allocation routines
409 * called are not going to be atomic context safe
410 **/
411static i40e_status i40e_init_arq(struct i40e_hw *hw)
412{
413 i40e_status ret_code = 0;
414
415 if (hw->aq.arq.count > 0) {
416 /* queue already initialized */
417 ret_code = I40E_ERR_NOT_READY;
418 goto init_adminq_exit;
419 }
420
421 /* verify input for valid configuration */
422 if ((hw->aq.num_arq_entries == 0) ||
423 (hw->aq.arq_buf_size == 0)) {
424 ret_code = I40E_ERR_CONFIG;
425 goto init_adminq_exit;
426 }
427
428 hw->aq.arq.next_to_use = 0;
429 hw->aq.arq.next_to_clean = 0;
430 hw->aq.arq.count = hw->aq.num_arq_entries;
431
432 /* allocate the ring memory */
433 ret_code = i40e_alloc_adminq_arq_ring(hw);
434 if (ret_code)
435 goto init_adminq_exit;
436
437 /* allocate buffers in the rings */
438 ret_code = i40e_alloc_arq_bufs(hw);
439 if (ret_code)
440 goto init_adminq_free_rings;
441
442 /* initialize base registers */
443 i40e_config_arq_regs(hw);
444
445 /* success! */
446 goto init_adminq_exit;
447
448init_adminq_free_rings:
449 i40e_free_adminq_arq(hw);
450
451init_adminq_exit:
452 return ret_code;
453}
454
455/**
456 * i40e_shutdown_asq - shutdown the ASQ
457 * @hw: pointer to the hardware structure
458 *
459 * The main shutdown routine for the Admin Send Queue
460 **/
461static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
462{
463 i40e_status ret_code = 0;
464
465 if (hw->aq.asq.count == 0)
466 return I40E_ERR_NOT_READY;
467
468 /* Stop firmware AdminQ processing */
469 if (hw->mac.type == I40E_MAC_VF)
470 wr32(hw, I40E_VF_ATQLEN1, 0);
471 else
472 wr32(hw, I40E_PF_ATQLEN, 0);
473
474 /* make sure lock is available */
475 mutex_lock(&hw->aq.asq_mutex);
476
477 hw->aq.asq.count = 0; /* to indicate uninitialized queue */
478
479 /* free ring buffers */
480 i40e_free_asq_bufs(hw);
481 /* free the ring descriptors */
482 i40e_free_adminq_asq(hw);
483
484 mutex_unlock(&hw->aq.asq_mutex);
485
486 return ret_code;
487}
488
489/**
490 * i40e_shutdown_arq - shutdown ARQ
491 * @hw: pointer to the hardware structure
492 *
493 * The main shutdown routine for the Admin Receive Queue
494 **/
495static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
496{
497 i40e_status ret_code = 0;
498
499 if (hw->aq.arq.count == 0)
500 return I40E_ERR_NOT_READY;
501
502 /* Stop firmware AdminQ processing */
503 if (hw->mac.type == I40E_MAC_VF)
504 wr32(hw, I40E_VF_ARQLEN1, 0);
505 else
506 wr32(hw, I40E_PF_ARQLEN, 0);
507
508 /* make sure lock is available */
509 mutex_lock(&hw->aq.arq_mutex);
510
511 hw->aq.arq.count = 0; /* to indicate uninitialized queue */
512
513 /* free ring buffers */
514 i40e_free_arq_bufs(hw);
515 /* free the ring descriptors */
516 i40e_free_adminq_arq(hw);
517
518 mutex_unlock(&hw->aq.arq_mutex);
519
520 return ret_code;
521}
522
523/**
524 * i40e_init_adminq - main initialization routine for Admin Queue
525 * @hw: pointer to the hardware structure
526 *
527 * Prior to calling this function, drivers *MUST* set the following fields
528 * in the hw->aq structure:
529 * - hw->aq.num_asq_entries
530 * - hw->aq.num_arq_entries
531 * - hw->aq.arq_buf_size
532 * - hw->aq.asq_buf_size
533 **/
534i40e_status i40e_init_adminq(struct i40e_hw *hw)
535{
536 u16 eetrack_lo, eetrack_hi;
537 i40e_status ret_code;
538
539 /* verify input for valid configuration */
540 if ((hw->aq.num_arq_entries == 0) ||
541 (hw->aq.num_asq_entries == 0) ||
542 (hw->aq.arq_buf_size == 0) ||
543 (hw->aq.asq_buf_size == 0)) {
544 ret_code = I40E_ERR_CONFIG;
545 goto init_adminq_exit;
546 }
547
548 /* initialize locks */
549 mutex_init(&hw->aq.asq_mutex);
550 mutex_init(&hw->aq.arq_mutex);
551
552 /* Set up register offsets */
553 i40e_adminq_init_regs(hw);
554
555 /* allocate the ASQ */
556 ret_code = i40e_init_asq(hw);
557 if (ret_code)
558 goto init_adminq_destroy_locks;
559
560 /* allocate the ARQ */
561 ret_code = i40e_init_arq(hw);
562 if (ret_code)
563 goto init_adminq_free_asq;
564
565 ret_code = i40e_aq_get_firmware_version(hw,
566 &hw->aq.fw_maj_ver, &hw->aq.fw_min_ver,
567 &hw->aq.api_maj_ver, &hw->aq.api_min_ver,
568 NULL);
569 if (ret_code)
570 goto init_adminq_free_arq;
571
572 if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR ||
573 hw->aq.api_min_ver != I40E_FW_API_VERSION_MINOR) {
574 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
575 goto init_adminq_free_arq;
576 }
577 i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
578 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
579 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
580 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
581
582 ret_code = i40e_aq_set_hmc_resource_profile(hw,
583 I40E_HMC_PROFILE_DEFAULT,
584 0,
585 NULL);
586 ret_code = 0;
587
588 /* success! */
589 goto init_adminq_exit;
590
591init_adminq_free_arq:
592 i40e_shutdown_arq(hw);
593init_adminq_free_asq:
594 i40e_shutdown_asq(hw);
595init_adminq_destroy_locks:
596
597init_adminq_exit:
598 return ret_code;
599}
600
601/**
602 * i40e_shutdown_adminq - shutdown routine for the Admin Queue
603 * @hw: pointer to the hardware structure
604 **/
605i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
606{
607 i40e_status ret_code = 0;
608
609 i40e_shutdown_asq(hw);
610 i40e_shutdown_arq(hw);
611
612 /* destroy the locks */
613
614 return ret_code;
615}
616
617/**
618 * i40e_clean_asq - cleans Admin send queue
619 * @asq: pointer to the adminq send ring
620 *
621 * returns the number of free desc
622 **/
623static u16 i40e_clean_asq(struct i40e_hw *hw)
624{
625 struct i40e_adminq_ring *asq = &(hw->aq.asq);
626 struct i40e_asq_cmd_details *details;
627 u16 ntc = asq->next_to_clean;
628 struct i40e_aq_desc desc_cb;
629 struct i40e_aq_desc *desc;
630
631 desc = I40E_ADMINQ_DESC(*asq, ntc);
632 details = I40E_ADMINQ_DETAILS(*asq, ntc);
633 while (rd32(hw, hw->aq.asq.head) != ntc) {
634 if (details->callback) {
635 I40E_ADMINQ_CALLBACK cb_func =
636 (I40E_ADMINQ_CALLBACK)details->callback;
637 desc_cb = *desc;
638 cb_func(hw, &desc_cb);
639 }
640 memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
641 memset((void *)details, 0,
642 sizeof(struct i40e_asq_cmd_details));
643 ntc++;
644 if (ntc == asq->count)
645 ntc = 0;
646 desc = I40E_ADMINQ_DESC(*asq, ntc);
647 details = I40E_ADMINQ_DETAILS(*asq, ntc);
648 }
649
650 asq->next_to_clean = ntc;
651
652 return I40E_DESC_UNUSED(asq);
653}
654
655/**
656 * i40e_asq_done - check if FW has processed the Admin Send Queue
657 * @hw: pointer to the hw struct
658 *
659 * Returns true if the firmware has processed all descriptors on the
660 * admin send queue. Returns false if there are still requests pending.
661 **/
662bool i40e_asq_done(struct i40e_hw *hw)
663{
664 /* AQ designers suggest use of head for better
665 * timing reliability than DD bit
666 */
667 return (rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use);
668
669}
670
671/**
672 * i40e_asq_send_command - send command to Admin Queue
673 * @hw: pointer to the hw struct
674 * @desc: prefilled descriptor describing the command (non DMA mem)
675 * @buff: buffer to use for indirect commands
676 * @buff_size: size of buffer for indirect commands
677 * @opaque: pointer to info to be used in async cleanup
678 *
679 * This is the main send command driver routine for the Admin Queue send
680 * queue. It runs the queue, cleans the queue, etc
681 **/
682i40e_status i40e_asq_send_command(struct i40e_hw *hw,
683 struct i40e_aq_desc *desc,
684 void *buff, /* can be NULL */
685 u16 buff_size,
686 struct i40e_asq_cmd_details *cmd_details)
687{
688 i40e_status status = 0;
689 struct i40e_dma_mem *dma_buff = NULL;
690 struct i40e_asq_cmd_details *details;
691 struct i40e_aq_desc *desc_on_ring;
692 bool cmd_completed = false;
693 u16 retval = 0;
694
695 if (hw->aq.asq.count == 0) {
696 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
697 "AQTX: Admin queue not initialized.\n");
698 status = I40E_ERR_QUEUE_EMPTY;
699 goto asq_send_command_exit;
700 }
701
702 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
703 if (cmd_details) {
d7595a22 704 *details = *cmd_details;
56a62fc8
JB
705
706 /* If the cmd_details are defined copy the cookie. The
707 * cpu_to_le32 is not needed here because the data is ignored
708 * by the FW, only used by the driver
709 */
710 if (details->cookie) {
711 desc->cookie_high =
712 cpu_to_le32(upper_32_bits(details->cookie));
713 desc->cookie_low =
714 cpu_to_le32(lower_32_bits(details->cookie));
715 }
716 } else {
717 memset(details, 0, sizeof(struct i40e_asq_cmd_details));
718 }
719
720 /* clear requested flags and then set additional flags if defined */
721 desc->flags &= ~cpu_to_le16(details->flags_dis);
722 desc->flags |= cpu_to_le16(details->flags_ena);
723
724 mutex_lock(&hw->aq.asq_mutex);
725
726 if (buff_size > hw->aq.asq_buf_size) {
727 i40e_debug(hw,
728 I40E_DEBUG_AQ_MESSAGE,
729 "AQTX: Invalid buffer size: %d.\n",
730 buff_size);
731 status = I40E_ERR_INVALID_SIZE;
732 goto asq_send_command_error;
733 }
734
735 if (details->postpone && !details->async) {
736 i40e_debug(hw,
737 I40E_DEBUG_AQ_MESSAGE,
738 "AQTX: Async flag not set along with postpone flag");
739 status = I40E_ERR_PARAM;
740 goto asq_send_command_error;
741 }
742
743 /* call clean and check queue available function to reclaim the
744 * descriptors that were processed by FW, the function returns the
745 * number of desc available
746 */
747 /* the clean function called here could be called in a separate thread
748 * in case of asynchronous completions
749 */
750 if (i40e_clean_asq(hw) == 0) {
751 i40e_debug(hw,
752 I40E_DEBUG_AQ_MESSAGE,
753 "AQTX: Error queue is full.\n");
754 status = I40E_ERR_ADMIN_QUEUE_FULL;
755 goto asq_send_command_error;
756 }
757
758 /* initialize the temp desc pointer with the right desc */
759 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
760
761 /* if the desc is available copy the temp desc to the right place */
d7595a22 762 *desc_on_ring = *desc;
56a62fc8
JB
763
764 /* if buff is not NULL assume indirect command */
765 if (buff != NULL) {
766 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
767 /* copy the user buff into the respective DMA buff */
768 memcpy(dma_buff->va, buff, buff_size);
769 desc_on_ring->datalen = cpu_to_le16(buff_size);
770
771 /* Update the address values in the desc with the pa value
772 * for respective buffer
773 */
774 desc_on_ring->params.external.addr_high =
775 cpu_to_le32(upper_32_bits(dma_buff->pa));
776 desc_on_ring->params.external.addr_low =
777 cpu_to_le32(lower_32_bits(dma_buff->pa));
778 }
779
780 /* bump the tail */
781 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
782 (hw->aq.asq.next_to_use)++;
783 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
784 hw->aq.asq.next_to_use = 0;
785 if (!details->postpone)
786 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
787
788 /* if cmd_details are not defined or async flag is not set,
789 * we need to wait for desc write back
790 */
791 if (!details->async && !details->postpone) {
792 u32 total_delay = 0;
793 u32 delay_len = 10;
794
795 do {
796 /* AQ designers suggest use of head for better
797 * timing reliability than DD bit
798 */
799 if (i40e_asq_done(hw))
800 break;
801 /* ugh! delay while spin_lock */
802 udelay(delay_len);
803 total_delay += delay_len;
804 } while (total_delay < I40E_ASQ_CMD_TIMEOUT);
805 }
806
807 /* if ready, copy the desc back to temp */
808 if (i40e_asq_done(hw)) {
d7595a22 809 *desc = *desc_on_ring;
56a62fc8
JB
810 if (buff != NULL)
811 memcpy(buff, dma_buff->va, buff_size);
812 retval = le16_to_cpu(desc->retval);
813 if (retval != 0) {
814 i40e_debug(hw,
815 I40E_DEBUG_AQ_MESSAGE,
816 "AQTX: Command completed with error 0x%X.\n",
817 retval);
818 /* strip off FW internal code */
819 retval &= 0xff;
820 }
821 cmd_completed = true;
822 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
823 status = 0;
824 else
825 status = I40E_ERR_ADMIN_QUEUE_ERROR;
826 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
827 }
828
829 /* update the error if time out occurred */
830 if ((!cmd_completed) &&
831 (!details->async && !details->postpone)) {
832 i40e_debug(hw,
833 I40E_DEBUG_AQ_MESSAGE,
834 "AQTX: Writeback timeout.\n");
835 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
836 }
837
838asq_send_command_error:
839 mutex_unlock(&hw->aq.asq_mutex);
840asq_send_command_exit:
841 return status;
842}
843
844/**
845 * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
846 * @desc: pointer to the temp descriptor (non DMA mem)
847 * @opcode: the opcode can be used to decide which flags to turn off or on
848 *
849 * Fill the desc with default values
850 **/
851void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
852 u16 opcode)
853{
854 /* zero out the desc */
855 memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
856 desc->opcode = cpu_to_le16(opcode);
857 desc->flags = cpu_to_le16(I40E_AQ_FLAG_EI | I40E_AQ_FLAG_SI);
858}
859
860/**
861 * i40e_clean_arq_element
862 * @hw: pointer to the hw struct
863 * @e: event info from the receive descriptor, includes any buffers
864 * @pending: number of events that could be left to process
865 *
866 * This function cleans one Admin Receive Queue element and returns
867 * the contents through e. It can also return how many events are
868 * left to process through 'pending'
869 **/
870i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
871 struct i40e_arq_event_info *e,
872 u16 *pending)
873{
874 i40e_status ret_code = 0;
875 u16 ntc = hw->aq.arq.next_to_clean;
876 struct i40e_aq_desc *desc;
877 struct i40e_dma_mem *bi;
878 u16 desc_idx;
879 u16 datalen;
880 u16 flags;
881 u16 ntu;
882
883 /* take the lock before we start messing with the ring */
884 mutex_lock(&hw->aq.arq_mutex);
885
886 /* set next_to_use to head */
887 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
888 if (ntu == ntc) {
889 /* nothing to do - shouldn't need to update ring's values */
890 i40e_debug(hw,
891 I40E_DEBUG_AQ_MESSAGE,
892 "AQRX: Queue is empty.\n");
893 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
894 goto clean_arq_element_out;
895 }
896
897 /* now clean the next descriptor */
898 desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
899 desc_idx = ntc;
900 i40e_debug_aq(hw,
901 I40E_DEBUG_AQ_COMMAND,
902 (void *)desc,
903 hw->aq.arq.r.arq_bi[desc_idx].va);
904
905 flags = le16_to_cpu(desc->flags);
906 if (flags & I40E_AQ_FLAG_ERR) {
907 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
908 hw->aq.arq_last_status =
909 (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
910 i40e_debug(hw,
911 I40E_DEBUG_AQ_MESSAGE,
912 "AQRX: Event received with error 0x%X.\n",
913 hw->aq.arq_last_status);
914 } else {
915 memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc));
916 datalen = le16_to_cpu(desc->datalen);
917 e->msg_size = min(datalen, e->msg_size);
918 if (e->msg_buf != NULL && (e->msg_size != 0))
919 memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
920 e->msg_size);
921 }
922
923 /* Restore the original datalen and buffer address in the desc,
924 * FW updates datalen to indicate the event message
925 * size
926 */
927 bi = &hw->aq.arq.r.arq_bi[ntc];
928 desc->datalen = cpu_to_le16((u16)bi->size);
929 desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
930 desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
931
932 /* set tail = the last cleaned desc index. */
933 wr32(hw, hw->aq.arq.tail, ntc);
934 /* ntc is updated to tail + 1 */
935 ntc++;
936 if (ntc == hw->aq.num_arq_entries)
937 ntc = 0;
938 hw->aq.arq.next_to_clean = ntc;
939 hw->aq.arq.next_to_use = ntu;
940
941clean_arq_element_out:
942 /* Set pending if needed, unlock and return */
943 if (pending != NULL)
944 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
945 mutex_unlock(&hw->aq.arq_mutex);
946
947 return ret_code;
948}
949
950void i40e_resume_aq(struct i40e_hw *hw)
951{
952 u32 reg = 0;
953
954 /* Registers are reset after PF reset */
955 hw->aq.asq.next_to_use = 0;
956 hw->aq.asq.next_to_clean = 0;
957
958 i40e_config_asq_regs(hw);
959 reg = hw->aq.num_asq_entries;
960
961 if (hw->mac.type == I40E_MAC_VF) {
962 reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
963 wr32(hw, I40E_VF_ATQLEN1, reg);
964 } else {
965 reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
966 wr32(hw, I40E_PF_ATQLEN, reg);
967 }
968
969 hw->aq.arq.next_to_use = 0;
970 hw->aq.arq.next_to_clean = 0;
971
972 i40e_config_arq_regs(hw);
973 reg = hw->aq.num_arq_entries;
974
975 if (hw->mac.type == I40E_MAC_VF) {
976 reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
977 wr32(hw, I40E_VF_ARQLEN1, reg);
978 } else {
979 reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
980 wr32(hw, I40E_PF_ARQLEN, reg);
981 }
982}
This page took 0.069451 seconds and 5 git commands to generate.