1 /******************************************************************************
2
3 Copyright (c) 2013-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: head/sys/dev/ixl/i40e_adminq.c 284049 2015-06-05 22:52:42Z jfv $*/
34
35 #include "i40e_status.h"
36 #include "i40e_type.h"
37 #include "i40e_register.h"
38 #include "i40e_adminq.h"
39 #include "i40e_prototype.h"
40
41 /**
42 * i40e_is_nvm_update_op - return TRUE if this is an NVM update operation
43 * @desc: API request descriptor
44 **/
45 static INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
46 {
47 return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase) ||
48 desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update));
49 }
50
51 /**
52 * i40e_adminq_init_regs - Initialize AdminQ registers
53 * @hw: pointer to the hardware structure
54 *
55 * This assumes the alloc_asq and alloc_arq functions have already been called
56 **/
57 static void i40e_adminq_init_regs(struct i40e_hw *hw)
58 {
59 /* set head and tail registers in our local struct */
60 if (i40e_is_vf(hw)) {
61 hw->aq.asq.tail = I40E_VF_ATQT1;
62 hw->aq.asq.head = I40E_VF_ATQH1;
63 hw->aq.asq.len = I40E_VF_ATQLEN1;
64 hw->aq.asq.bal = I40E_VF_ATQBAL1;
65 hw->aq.asq.bah = I40E_VF_ATQBAH1;
66 hw->aq.arq.tail = I40E_VF_ARQT1;
67 hw->aq.arq.head = I40E_VF_ARQH1;
68 hw->aq.arq.len = I40E_VF_ARQLEN1;
69 hw->aq.arq.bal = I40E_VF_ARQBAL1;
70 hw->aq.arq.bah = I40E_VF_ARQBAH1;
71 } else {
72 hw->aq.asq.tail = I40E_PF_ATQT;
73 hw->aq.asq.head = I40E_PF_ATQH;
74 hw->aq.asq.len = I40E_PF_ATQLEN;
75 hw->aq.asq.bal = I40E_PF_ATQBAL;
76 hw->aq.asq.bah = I40E_PF_ATQBAH;
77 hw->aq.arq.tail = I40E_PF_ARQT;
78 hw->aq.arq.head = I40E_PF_ARQH;
79 hw->aq.arq.len = I40E_PF_ARQLEN;
80 hw->aq.arq.bal = I40E_PF_ARQBAL;
81 hw->aq.arq.bah = I40E_PF_ARQBAH;
82 }
83 }
84
85 /**
86 * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
87 * @hw: pointer to the hardware structure
88 **/
89 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
90 {
91 enum i40e_status_code ret_code;
92
93 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
94 i40e_mem_atq_ring,
95 (hw->aq.num_asq_entries *
96 sizeof(struct i40e_aq_desc)),
97 I40E_ADMINQ_DESC_ALIGNMENT);
98 if (ret_code)
99 return ret_code;
100
101 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
102 (hw->aq.num_asq_entries *
103 sizeof(struct i40e_asq_cmd_details)));
104 if (ret_code) {
105 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
106 return ret_code;
107 }
108
109 return ret_code;
110 }
111
112 /**
113 * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
114 * @hw: pointer to the hardware structure
115 **/
116 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
117 {
118 enum i40e_status_code ret_code;
119
120 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
121 i40e_mem_arq_ring,
122 (hw->aq.num_arq_entries *
123 sizeof(struct i40e_aq_desc)),
124 I40E_ADMINQ_DESC_ALIGNMENT);
125
126 return ret_code;
127 }
128
129 /**
130 * i40e_free_adminq_asq - Free Admin Queue send rings
131 * @hw: pointer to the hardware structure
132 *
133 * This assumes the posted send buffers have already been cleaned
134 * and de-allocated
135 **/
136 void i40e_free_adminq_asq(struct i40e_hw *hw)
137 {
138 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
139 }
140
141 /**
142 * i40e_free_adminq_arq - Free Admin Queue receive rings
143 * @hw: pointer to the hardware structure
144 *
145 * This assumes the posted receive buffers have already been cleaned
146 * and de-allocated
147 **/
148 void i40e_free_adminq_arq(struct i40e_hw *hw)
149 {
150 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
151 }
152
153 /**
154 * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
155 * @hw: pointer to the hardware structure
156 **/
157 static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
158 {
159 enum i40e_status_code ret_code;
160 struct i40e_aq_desc *desc;
161 struct i40e_dma_mem *bi;
162 int i;
163
164 /* We'll be allocating the buffer info memory first, then we can
165 * allocate the mapped buffers for the event processing
166 */
167
168 /* buffer_info structures do not need alignment */
169 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
170 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
171 if (ret_code)
172 goto alloc_arq_bufs;
173 hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
174
175 /* allocate the mapped buffers */
176 for (i = 0; i < hw->aq.num_arq_entries; i++) {
177 bi = &hw->aq.arq.r.arq_bi[i];
178 ret_code = i40e_allocate_dma_mem(hw, bi,
179 i40e_mem_arq_buf,
180 hw->aq.arq_buf_size,
181 I40E_ADMINQ_DESC_ALIGNMENT);
182 if (ret_code)
183 goto unwind_alloc_arq_bufs;
184
185 /* now configure the descriptors for use */
186 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
187
188 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
189 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
190 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
191 desc->opcode = 0;
192 /* This is in accordance with Admin queue design, there is no
193 * register for buffer size configuration
194 */
195 desc->datalen = CPU_TO_LE16((u16)bi->size);
196 desc->retval = 0;
197 desc->cookie_high = 0;
198 desc->cookie_low = 0;
199 desc->params.external.addr_high =
200 CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
201 desc->params.external.addr_low =
202 CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
203 desc->params.external.param0 = 0;
204 desc->params.external.param1 = 0;
205 }
206
207 alloc_arq_bufs:
208 return ret_code;
209
210 unwind_alloc_arq_bufs:
211 /* don't try to free the one that failed... */
212 i--;
213 for (; i >= 0; i--)
214 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
215 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
216
217 return ret_code;
218 }
219
220 /**
221 * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
222 * @hw: pointer to the hardware structure
223 **/
224 static enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
225 {
226 enum i40e_status_code ret_code;
227 struct i40e_dma_mem *bi;
228 int i;
229
230 /* No mapped memory needed yet, just the buffer info structures */
231 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
232 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
233 if (ret_code)
234 goto alloc_asq_bufs;
235 hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
236
237 /* allocate the mapped buffers */
238 for (i = 0; i < hw->aq.num_asq_entries; i++) {
239 bi = &hw->aq.asq.r.asq_bi[i];
240 ret_code = i40e_allocate_dma_mem(hw, bi,
241 i40e_mem_asq_buf,
242 hw->aq.asq_buf_size,
243 I40E_ADMINQ_DESC_ALIGNMENT);
244 if (ret_code)
245 goto unwind_alloc_asq_bufs;
246 }
247 alloc_asq_bufs:
248 return ret_code;
249
250 unwind_alloc_asq_bufs:
251 /* don't try to free the one that failed... */
252 i--;
253 for (; i >= 0; i--)
254 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
255 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
256
257 return ret_code;
258 }
259
260 /**
261 * i40e_free_arq_bufs - Free receive queue buffer info elements
262 * @hw: pointer to the hardware structure
263 **/
264 static void i40e_free_arq_bufs(struct i40e_hw *hw)
265 {
266 int i;
267
268 /* free descriptors */
269 for (i = 0; i < hw->aq.num_arq_entries; i++)
270 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
271
272 /* free the descriptor memory */
273 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
274
275 /* free the dma header */
276 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
277 }
278
279 /**
280 * i40e_free_asq_bufs - Free send queue buffer info elements
281 * @hw: pointer to the hardware structure
282 **/
283 static void i40e_free_asq_bufs(struct i40e_hw *hw)
284 {
285 int i;
286
287 /* only unmap if the address is non-NULL */
288 for (i = 0; i < hw->aq.num_asq_entries; i++)
289 if (hw->aq.asq.r.asq_bi[i].pa)
290 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
291
292 /* free the buffer info list */
293 i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
294
295 /* free the descriptor memory */
296 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
297
298 /* free the dma header */
299 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
300 }
301
302 /**
303 * i40e_config_asq_regs - configure ASQ registers
304 * @hw: pointer to the hardware structure
305 *
306 * Configure base address and length registers for the transmit queue
307 **/
308 static enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
309 {
310 enum i40e_status_code ret_code = I40E_SUCCESS;
311 u32 reg = 0;
312
313 /* Clear Head and Tail */
314 wr32(hw, hw->aq.asq.head, 0);
315 wr32(hw, hw->aq.asq.tail, 0);
316
317 /* set starting point */
318 if (!i40e_is_vf(hw))
319 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
320 I40E_PF_ATQLEN_ATQENABLE_MASK));
321 if (i40e_is_vf(hw))
322 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
323 I40E_VF_ATQLEN1_ATQENABLE_MASK));
324 wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
325 wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
326
327 /* Check one register to verify that config was applied */
328 reg = rd32(hw, hw->aq.asq.bal);
329 if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
330 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
331
332 return ret_code;
333 }
334
335 /**
336 * i40e_config_arq_regs - ARQ register configuration
337 * @hw: pointer to the hardware structure
338 *
339 * Configure base address and length registers for the receive (event queue)
340 **/
341 static enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
342 {
343 enum i40e_status_code ret_code = I40E_SUCCESS;
344 u32 reg = 0;
345
346 /* Clear Head and Tail */
347 wr32(hw, hw->aq.arq.head, 0);
348 wr32(hw, hw->aq.arq.tail, 0);
349
350 /* set starting point */
351 if (!i40e_is_vf(hw))
352 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
353 I40E_PF_ARQLEN_ARQENABLE_MASK));
354 if (i40e_is_vf(hw))
355 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
356 I40E_VF_ARQLEN1_ARQENABLE_MASK));
357 wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
358 wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
359
360 /* Update tail in the HW to post pre-allocated buffers */
361 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
362
363 /* Check one register to verify that config was applied */
364 reg = rd32(hw, hw->aq.arq.bal);
365 if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
366 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
367
368 return ret_code;
369 }
370
371 /**
372 * i40e_init_asq - main initialization routine for ASQ
373 * @hw: pointer to the hardware structure
374 *
375 * This is the main initialization routine for the Admin Send Queue
376 * Prior to calling this function, drivers *MUST* set the following fields
377 * in the hw->aq structure:
378 * - hw->aq.num_asq_entries
379 * - hw->aq.arq_buf_size
380 *
381 * Do *NOT* hold the lock when calling this as the memory allocation routines
382 * called are not going to be atomic context safe
383 **/
384 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
385 {
386 enum i40e_status_code ret_code = I40E_SUCCESS;
387
388 if (hw->aq.asq.count > 0) {
389 /* queue already initialized */
390 ret_code = I40E_ERR_NOT_READY;
391 goto init_adminq_exit;
392 }
393
394 /* verify input for valid configuration */
395 if ((hw->aq.num_asq_entries == 0) ||
396 (hw->aq.asq_buf_size == 0)) {
397 ret_code = I40E_ERR_CONFIG;
398 goto init_adminq_exit;
399 }
400
401 hw->aq.asq.next_to_use = 0;
402 hw->aq.asq.next_to_clean = 0;
403 hw->aq.asq.count = hw->aq.num_asq_entries;
404
405 /* allocate the ring memory */
406 ret_code = i40e_alloc_adminq_asq_ring(hw);
407 if (ret_code != I40E_SUCCESS)
408 goto init_adminq_exit;
409
410 /* allocate buffers in the rings */
411 ret_code = i40e_alloc_asq_bufs(hw);
412 if (ret_code != I40E_SUCCESS)
413 goto init_adminq_free_rings;
414
415 /* initialize base registers */
416 ret_code = i40e_config_asq_regs(hw);
417 if (ret_code != I40E_SUCCESS)
418 goto init_adminq_free_rings;
419
420 /* success! */
421 goto init_adminq_exit;
422
423 init_adminq_free_rings:
424 i40e_free_adminq_asq(hw);
425
426 init_adminq_exit:
427 return ret_code;
428 }
429
430 /**
431 * i40e_init_arq - initialize ARQ
432 * @hw: pointer to the hardware structure
433 *
434 * The main initialization routine for the Admin Receive (Event) Queue.
435 * Prior to calling this function, drivers *MUST* set the following fields
436 * in the hw->aq structure:
437 * - hw->aq.num_asq_entries
438 * - hw->aq.arq_buf_size
439 *
440 * Do *NOT* hold the lock when calling this as the memory allocation routines
441 * called are not going to be atomic context safe
442 **/
443 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
444 {
445 enum i40e_status_code ret_code = I40E_SUCCESS;
446
447 if (hw->aq.arq.count > 0) {
448 /* queue already initialized */
449 ret_code = I40E_ERR_NOT_READY;
450 goto init_adminq_exit;
451 }
452
453 /* verify input for valid configuration */
454 if ((hw->aq.num_arq_entries == 0) ||
455 (hw->aq.arq_buf_size == 0)) {
456 ret_code = I40E_ERR_CONFIG;
457 goto init_adminq_exit;
458 }
459
460 hw->aq.arq.next_to_use = 0;
461 hw->aq.arq.next_to_clean = 0;
462 hw->aq.arq.count = hw->aq.num_arq_entries;
463
464 /* allocate the ring memory */
465 ret_code = i40e_alloc_adminq_arq_ring(hw);
466 if (ret_code != I40E_SUCCESS)
467 goto init_adminq_exit;
468
469 /* allocate buffers in the rings */
470 ret_code = i40e_alloc_arq_bufs(hw);
471 if (ret_code != I40E_SUCCESS)
472 goto init_adminq_free_rings;
473
474 /* initialize base registers */
475 ret_code = i40e_config_arq_regs(hw);
476 if (ret_code != I40E_SUCCESS)
477 goto init_adminq_free_rings;
478
479 /* success! */
480 goto init_adminq_exit;
481
482 init_adminq_free_rings:
483 i40e_free_adminq_arq(hw);
484
485 init_adminq_exit:
486 return ret_code;
487 }
488
489 /**
490 * i40e_shutdown_asq - shutdown the ASQ
491 * @hw: pointer to the hardware structure
492 *
493 * The main shutdown routine for the Admin Send Queue
494 **/
495 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
496 {
497 enum i40e_status_code ret_code = I40E_SUCCESS;
498
499 if (hw->aq.asq.count == 0)
500 return I40E_ERR_NOT_READY;
501
502 /* Stop firmware AdminQ processing */
503 wr32(hw, hw->aq.asq.head, 0);
504 wr32(hw, hw->aq.asq.tail, 0);
505 wr32(hw, hw->aq.asq.len, 0);
506 wr32(hw, hw->aq.asq.bal, 0);
507 wr32(hw, hw->aq.asq.bah, 0);
508
509 /* make sure spinlock is available */
510 i40e_acquire_spinlock(&hw->aq.asq_spinlock);
511
512 hw->aq.asq.count = 0; /* to indicate uninitialized queue */
513
514 /* free ring buffers */
515 i40e_free_asq_bufs(hw);
516
517 i40e_release_spinlock(&hw->aq.asq_spinlock);
518
519 return ret_code;
520 }
521
522 /**
523 * i40e_shutdown_arq - shutdown ARQ
524 * @hw: pointer to the hardware structure
525 *
526 * The main shutdown routine for the Admin Receive Queue
527 **/
528 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
529 {
530 enum i40e_status_code ret_code = I40E_SUCCESS;
531
532 if (hw->aq.arq.count == 0)
533 return I40E_ERR_NOT_READY;
534
535 /* Stop firmware AdminQ processing */
536 wr32(hw, hw->aq.arq.head, 0);
537 wr32(hw, hw->aq.arq.tail, 0);
538 wr32(hw, hw->aq.arq.len, 0);
539 wr32(hw, hw->aq.arq.bal, 0);
540 wr32(hw, hw->aq.arq.bah, 0);
541
542 /* make sure spinlock is available */
543 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
544
545 hw->aq.arq.count = 0; /* to indicate uninitialized queue */
546
547 /* free ring buffers */
548 i40e_free_arq_bufs(hw);
549
550 i40e_release_spinlock(&hw->aq.arq_spinlock);
551
552 return ret_code;
553 }
554
555 /**
556 * i40e_init_adminq - main initialization routine for Admin Queue
557 * @hw: pointer to the hardware structure
558 *
559 * Prior to calling this function, drivers *MUST* set the following fields
560 * in the hw->aq structure:
561 * - hw->aq.num_asq_entries
562 * - hw->aq.num_arq_entries
563 * - hw->aq.arq_buf_size
564 * - hw->aq.asq_buf_size
565 **/
566 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
567 {
568 enum i40e_status_code ret_code;
569 u16 eetrack_lo, eetrack_hi;
570 u16 cfg_ptr, oem_hi, oem_lo;
571 int retry = 0;
572 /* verify input for valid configuration */
573 if ((hw->aq.num_arq_entries == 0) ||
574 (hw->aq.num_asq_entries == 0) ||
575 (hw->aq.arq_buf_size == 0) ||
576 (hw->aq.asq_buf_size == 0)) {
577 ret_code = I40E_ERR_CONFIG;
578 goto init_adminq_exit;
579 }
580
581 /* initialize spin locks */
582 #ifdef I40E_ILLUMOS
583 /* We need "hw" in illumos for driver-prioritization parameters. */
584 i40e_init_spinlock(&hw->aq.asq_spinlock, hw);
585 i40e_init_spinlock(&hw->aq.arq_spinlock, hw);
586 #else
587 i40e_init_spinlock(&hw->aq.asq_spinlock);
588 i40e_init_spinlock(&hw->aq.arq_spinlock);
589 #endif /* I40E_ILLUMOS */
590
591 /* Set up register offsets */
592 i40e_adminq_init_regs(hw);
593
594 /* setup ASQ command write back timeout */
595 hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
596
597 /* allocate the ASQ */
598 ret_code = i40e_init_asq(hw);
599 if (ret_code != I40E_SUCCESS)
600 goto init_adminq_destroy_spinlocks;
601
602 /* allocate the ARQ */
603 ret_code = i40e_init_arq(hw);
604 if (ret_code != I40E_SUCCESS)
605 goto init_adminq_free_asq;
606
607 /* VF has no need of firmware */
608 if (i40e_is_vf(hw))
609 goto init_adminq_exit;
610 /* There are some cases where the firmware may not be quite ready
611 * for AdminQ operations, so we retry the AdminQ setup a few times
612 * if we see timeouts in this first AQ call.
613 */
614 do {
615 ret_code = i40e_aq_get_firmware_version(hw,
616 &hw->aq.fw_maj_ver,
617 &hw->aq.fw_min_ver,
618 &hw->aq.fw_build,
619 &hw->aq.api_maj_ver,
620 &hw->aq.api_min_ver,
621 NULL);
622 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
623 break;
624 retry++;
625 i40e_msec_delay(100);
626 i40e_resume_aq(hw);
627 } while (retry < 10);
628 if (ret_code != I40E_SUCCESS)
629 goto init_adminq_free_arq;
630
631 /* get the NVM version info */
632 i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
633 &hw->nvm.version);
634 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
635 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
636 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
637 i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
638 i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
639 &oem_hi);
640 i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
641 &oem_lo);
642 hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
643
644 if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
645 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
646 goto init_adminq_free_arq;
647 }
648
649 /* pre-emptive resource lock release */
650 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
651 hw->aq.nvm_release_on_done = FALSE;
652 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
653
654 ret_code = i40e_aq_set_hmc_resource_profile(hw,
655 I40E_HMC_PROFILE_DEFAULT,
656 0,
657 NULL);
658 ret_code = I40E_SUCCESS;
659
660 /* success! */
661 goto init_adminq_exit;
662
663 init_adminq_free_arq:
664 i40e_shutdown_arq(hw);
665 init_adminq_free_asq:
666 i40e_shutdown_asq(hw);
667 init_adminq_destroy_spinlocks:
668 i40e_destroy_spinlock(&hw->aq.asq_spinlock);
669 i40e_destroy_spinlock(&hw->aq.arq_spinlock);
670
671 init_adminq_exit:
672 return ret_code;
673 }
674
675 /**
676 * i40e_shutdown_adminq - shutdown routine for the Admin Queue
677 * @hw: pointer to the hardware structure
678 **/
679 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
680 {
681 enum i40e_status_code ret_code = I40E_SUCCESS;
682
683 if (i40e_check_asq_alive(hw))
684 i40e_aq_queue_shutdown(hw, TRUE);
685
686 i40e_shutdown_asq(hw);
687 i40e_shutdown_arq(hw);
688
689 /* destroy the spinlocks */
690 i40e_destroy_spinlock(&hw->aq.asq_spinlock);
691 i40e_destroy_spinlock(&hw->aq.arq_spinlock);
692
693 if (hw->nvm_buff.va)
694 i40e_free_virt_mem(hw, &hw->nvm_buff);
695
696 return ret_code;
697 }
698
699 /**
700 * i40e_clean_asq - cleans Admin send queue
701 * @hw: pointer to the hardware structure
702 *
703 * returns the number of free desc
704 **/
705 u16 i40e_clean_asq(struct i40e_hw *hw)
706 {
707 struct i40e_adminq_ring *asq = &(hw->aq.asq);
708 struct i40e_asq_cmd_details *details;
709 u16 ntc = asq->next_to_clean;
710 struct i40e_aq_desc desc_cb;
711 struct i40e_aq_desc *desc;
712
713 desc = I40E_ADMINQ_DESC(*asq, ntc);
714 details = I40E_ADMINQ_DETAILS(*asq, ntc);
715
716 while (rd32(hw, hw->aq.asq.head) != ntc) {
717 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
718 "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
719
720 if (details->callback) {
721 I40E_ADMINQ_CALLBACK cb_func =
722 (I40E_ADMINQ_CALLBACK)details->callback;
723 i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
724 I40E_DMA_TO_DMA);
725 cb_func(hw, &desc_cb);
726 }
727 i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
728 i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
729 ntc++;
730 if (ntc == asq->count)
731 ntc = 0;
732 desc = I40E_ADMINQ_DESC(*asq, ntc);
733 details = I40E_ADMINQ_DETAILS(*asq, ntc);
734 }
735
736 asq->next_to_clean = ntc;
737
738 return I40E_DESC_UNUSED(asq);
739 }
740
741 /**
742 * i40e_asq_done - check if FW has processed the Admin Send Queue
743 * @hw: pointer to the hw struct
744 *
745 * Returns TRUE if the firmware has processed all descriptors on the
746 * admin send queue. Returns FALSE if there are still requests pending.
747 **/
748 bool i40e_asq_done(struct i40e_hw *hw)
749 {
750 /* AQ designers suggest use of head for better
751 * timing reliability than DD bit
752 */
753 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
754
755 }
756
757 /**
758 * i40e_asq_send_command - send command to Admin Queue
759 * @hw: pointer to the hw struct
760 * @desc: prefilled descriptor describing the command (non DMA mem)
761 * @buff: buffer to use for indirect commands
762 * @buff_size: size of buffer for indirect commands
763 * @cmd_details: pointer to command details structure
764 *
765 * This is the main send command driver routine for the Admin Queue send
766 * queue. It runs the queue, cleans the queue, etc
767 **/
768 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
769 struct i40e_aq_desc *desc,
770 void *buff, /* can be NULL */
771 u16 buff_size,
772 struct i40e_asq_cmd_details *cmd_details)
773 {
774 enum i40e_status_code status = I40E_SUCCESS;
775 struct i40e_dma_mem *dma_buff = NULL;
776 struct i40e_asq_cmd_details *details;
777 struct i40e_aq_desc *desc_on_ring;
778 bool cmd_completed = FALSE;
779 u16 retval = 0;
780 u32 val = 0;
781
782 hw->aq.asq_last_status = I40E_AQ_RC_OK;
783
784 val = rd32(hw, hw->aq.asq.head);
785 if (val >= hw->aq.num_asq_entries) {
786 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
787 "AQTX: head overrun at %d\n", val);
788 status = I40E_ERR_QUEUE_EMPTY;
789 goto asq_send_command_exit;
790 }
791
792 if (hw->aq.asq.count == 0) {
793 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
794 "AQTX: Admin queue not initialized.\n");
795 status = I40E_ERR_QUEUE_EMPTY;
796 goto asq_send_command_exit;
797 }
798
799 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
800 if (cmd_details) {
801 i40e_memcpy(details,
802 cmd_details,
803 sizeof(struct i40e_asq_cmd_details),
804 I40E_NONDMA_TO_NONDMA);
805
806 /* If the cmd_details are defined copy the cookie. The
807 * CPU_TO_LE32 is not needed here because the data is ignored
808 * by the FW, only used by the driver
809 */
810 if (details->cookie) {
811 desc->cookie_high =
812 CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
813 desc->cookie_low =
814 CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
815 }
816 } else {
817 i40e_memset(details, 0,
818 sizeof(struct i40e_asq_cmd_details),
819 I40E_NONDMA_MEM);
820 }
821
822 /* clear requested flags and then set additional flags if defined */
823 desc->flags &= ~CPU_TO_LE16(details->flags_dis);
824 desc->flags |= CPU_TO_LE16(details->flags_ena);
825
826 i40e_acquire_spinlock(&hw->aq.asq_spinlock);
827
828 if (buff_size > hw->aq.asq_buf_size) {
829 i40e_debug(hw,
830 I40E_DEBUG_AQ_MESSAGE,
831 "AQTX: Invalid buffer size: %d.\n",
832 buff_size);
833 status = I40E_ERR_INVALID_SIZE;
834 goto asq_send_command_error;
835 }
836
837 if (details->postpone && !details->async) {
838 i40e_debug(hw,
839 I40E_DEBUG_AQ_MESSAGE,
840 "AQTX: Async flag not set along with postpone flag");
841 status = I40E_ERR_PARAM;
842 goto asq_send_command_error;
843 }
844
845 /* call clean and check queue available function to reclaim the
846 * descriptors that were processed by FW, the function returns the
847 * number of desc available
848 */
849 /* the clean function called here could be called in a separate thread
850 * in case of asynchronous completions
851 */
852 if (i40e_clean_asq(hw) == 0) {
853 i40e_debug(hw,
854 I40E_DEBUG_AQ_MESSAGE,
855 "AQTX: Error queue is full.\n");
856 status = I40E_ERR_ADMIN_QUEUE_FULL;
857 goto asq_send_command_error;
858 }
859
860 /* initialize the temp desc pointer with the right desc */
861 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
862
863 /* if the desc is available copy the temp desc to the right place */
864 i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
865 I40E_NONDMA_TO_DMA);
866
867 /* if buff is not NULL assume indirect command */
868 if (buff != NULL) {
869 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
870 /* copy the user buff into the respective DMA buff */
871 i40e_memcpy(dma_buff->va, buff, buff_size,
872 I40E_NONDMA_TO_DMA);
873 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
874
875 /* Update the address values in the desc with the pa value
876 * for respective buffer
877 */
878 desc_on_ring->params.external.addr_high =
879 CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
880 desc_on_ring->params.external.addr_low =
881 CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
882 }
883
884 /* bump the tail */
885 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
886 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
887 buff, buff_size);
888 (hw->aq.asq.next_to_use)++;
889 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
890 hw->aq.asq.next_to_use = 0;
891 if (!details->postpone)
892 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
893
894 /* if cmd_details are not defined or async flag is not set,
895 * we need to wait for desc write back
896 */
897 if (!details->async && !details->postpone) {
898 u32 total_delay = 0;
899
900 do {
901 /* AQ designers suggest use of head for better
902 * timing reliability than DD bit
903 */
904 if (i40e_asq_done(hw))
905 break;
906 /* ugh! delay while spin_lock */
907 i40e_msec_delay(1);
908 total_delay++;
909 } while (total_delay < hw->aq.asq_cmd_timeout);
910 }
911
912 /* if ready, copy the desc back to temp */
913 if (i40e_asq_done(hw)) {
914 i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
915 I40E_DMA_TO_NONDMA);
916 if (buff != NULL)
917 i40e_memcpy(buff, dma_buff->va, buff_size,
918 I40E_DMA_TO_NONDMA);
919 retval = LE16_TO_CPU(desc->retval);
920 if (retval != 0) {
921 i40e_debug(hw,
922 I40E_DEBUG_AQ_MESSAGE,
923 "AQTX: Command completed with error 0x%X.\n",
924 retval);
925
926 /* strip off FW internal code */
927 retval &= 0xff;
928 }
929 cmd_completed = TRUE;
930 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
931 status = I40E_SUCCESS;
932 else
933 status = I40E_ERR_ADMIN_QUEUE_ERROR;
934 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
935 }
936
937 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
938 "AQTX: desc and buffer writeback:\n");
939 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
940
941 /* save writeback aq if requested */
942 if (details->wb_desc)
943 i40e_memcpy(details->wb_desc, desc_on_ring,
944 sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
945
946 /* update the error if time out occurred */
947 if ((!cmd_completed) &&
948 (!details->async && !details->postpone)) {
949 i40e_debug(hw,
950 I40E_DEBUG_AQ_MESSAGE,
951 "AQTX: Writeback timeout.\n");
952 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
953 }
954
955 asq_send_command_error:
956 i40e_release_spinlock(&hw->aq.asq_spinlock);
957 asq_send_command_exit:
958 return status;
959 }
960
961 /**
962 * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
963 * @desc: pointer to the temp descriptor (non DMA mem)
964 * @opcode: the opcode can be used to decide which flags to turn off or on
965 *
966 * Fill the desc with default values
967 **/
968 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
969 u16 opcode)
970 {
971 /* zero out the desc */
972 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
973 I40E_NONDMA_MEM);
974 desc->opcode = CPU_TO_LE16(opcode);
975 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
976 }
977
978 /**
979 * i40e_clean_arq_element
980 * @hw: pointer to the hw struct
981 * @e: event info from the receive descriptor, includes any buffers
982 * @pending: number of events that could be left to process
983 *
984 * This function cleans one Admin Receive Queue element and returns
985 * the contents through e. It can also return how many events are
986 * left to process through 'pending'
987 **/
988 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
989 struct i40e_arq_event_info *e,
990 u16 *pending)
991 {
992 enum i40e_status_code ret_code = I40E_SUCCESS;
993 u16 ntc = hw->aq.arq.next_to_clean;
994 struct i40e_aq_desc *desc;
995 struct i40e_dma_mem *bi;
996 u16 desc_idx;
997 u16 datalen;
998 u16 flags;
999 u16 ntu;
1000
1001 /* take the lock before we start messing with the ring */
1002 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1003
1004 /* set next_to_use to head */
1005 if (!i40e_is_vf(hw))
1006 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
1007 if (i40e_is_vf(hw))
1008 ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
1009 if (ntu == ntc) {
1010 /* nothing to do - shouldn't need to update ring's values */
1011 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1012 goto clean_arq_element_out;
1013 }
1014
1015 /* now clean the next descriptor */
1016 desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1017 desc_idx = ntc;
1018
1019 flags = LE16_TO_CPU(desc->flags);
1020 if (flags & I40E_AQ_FLAG_ERR) {
1021 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1022 hw->aq.arq_last_status =
1023 (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1024 i40e_debug(hw,
1025 I40E_DEBUG_AQ_MESSAGE,
1026 "AQRX: Event received with error 0x%X.\n",
1027 hw->aq.arq_last_status);
1028 }
1029
1030 i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1031 I40E_DMA_TO_NONDMA);
1032 datalen = LE16_TO_CPU(desc->datalen);
1033 e->msg_len = min(datalen, e->buf_len);
1034 if (e->msg_buf != NULL && (e->msg_len != 0))
1035 i40e_memcpy(e->msg_buf,
1036 hw->aq.arq.r.arq_bi[desc_idx].va,
1037 e->msg_len, I40E_DMA_TO_NONDMA);
1038
1039 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1040 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1041 hw->aq.arq_buf_size);
1042
1043 /* Restore the original datalen and buffer address in the desc,
1044 * FW updates datalen to indicate the event message
1045 * size
1046 */
1047 bi = &hw->aq.arq.r.arq_bi[ntc];
1048 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1049
1050 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1051 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1052 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1053 desc->datalen = CPU_TO_LE16((u16)bi->size);
1054 desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1055 desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1056
1057 /* set tail = the last cleaned desc index. */
1058 wr32(hw, hw->aq.arq.tail, ntc);
1059 /* ntc is updated to tail + 1 */
1060 ntc++;
1061 if (ntc == hw->aq.num_arq_entries)
1062 ntc = 0;
1063 hw->aq.arq.next_to_clean = ntc;
1064 hw->aq.arq.next_to_use = ntu;
1065
1066 clean_arq_element_out:
1067 /* Set pending if needed, unlock and return */
1068 if (pending != NULL)
1069 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1070 i40e_release_spinlock(&hw->aq.arq_spinlock);
1071
1072 if (i40e_is_nvm_update_op(&e->desc)) {
1073 if (hw->aq.nvm_release_on_done) {
1074 i40e_release_nvm(hw);
1075 hw->aq.nvm_release_on_done = FALSE;
1076 }
1077
1078 switch (hw->nvmupd_state) {
1079 case I40E_NVMUPD_STATE_INIT_WAIT:
1080 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1081 break;
1082
1083 case I40E_NVMUPD_STATE_WRITE_WAIT:
1084 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1085 break;
1086
1087 default:
1088 break;
1089 }
1090 }
1091
1092 return ret_code;
1093 }
1094
1095 void i40e_resume_aq(struct i40e_hw *hw)
1096 {
1097 /* Registers are reset after PF reset */
1098 hw->aq.asq.next_to_use = 0;
1099 hw->aq.asq.next_to_clean = 0;
1100
1101 i40e_config_asq_regs(hw);
1102
1103 hw->aq.arq.next_to_use = 0;
1104 hw->aq.arq.next_to_clean = 0;
1105
1106 i40e_config_arq_regs(hw);
1107 }