FreeBSD/Linux Kernel Cross Reference
sys/dev/ixl/i40e_nvm.c
1 /******************************************************************************
2
3 Copyright (c) 2013-2018, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #include "i40e_prototype.h"
36
37 /**
38 * i40e_init_nvm - Initialize NVM function pointers
39 * @hw: pointer to the HW structure
40 *
41 * Setup the function pointers and the NVM info structure. Should be called
42 * once per NVM initialization, e.g. inside the i40e_init_shared_code().
43 * Please notice that the NVM term is used here (& in all methods covered
44 * in this file) as an equivalent of the FLASH part mapped into the SR.
45 * We are accessing FLASH always through the Shadow RAM.
46 **/
47 enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
48 {
49 struct i40e_nvm_info *nvm = &hw->nvm;
50 enum i40e_status_code ret_code = I40E_SUCCESS;
51 u32 fla, gens;
52 u8 sr_size;
53
54 DEBUGFUNC("i40e_init_nvm");
55
56 /* The SR size is stored regardless of the nvm programming mode
57 * as the blank mode may be used in the factory line.
58 */
59 gens = rd32(hw, I40E_GLNVM_GENS);
60 sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
61 I40E_GLNVM_GENS_SR_SIZE_SHIFT);
62 /* Switching to words (sr_size contains power of 2KB) */
63 nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
64
65 /* Check if we are in the normal or blank NVM programming mode */
66 fla = rd32(hw, I40E_GLNVM_FLA);
67 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
68 /* Max NVM timeout */
69 nvm->timeout = I40E_MAX_NVM_TIMEOUT;
70 nvm->blank_nvm_mode = FALSE;
71 } else { /* Blank programming mode */
72 nvm->blank_nvm_mode = TRUE;
73 ret_code = I40E_ERR_NVM_BLANK_MODE;
74 i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
75 }
76
77 return ret_code;
78 }
79
80 /**
81 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
82 * @hw: pointer to the HW structure
83 * @access: NVM access type (read or write)
84 *
85 * This function will request NVM ownership for reading
86 * via the proper Admin Command.
87 **/
88 enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
89 enum i40e_aq_resource_access_type access)
90 {
91 enum i40e_status_code ret_code = I40E_SUCCESS;
92 u64 gtime, timeout;
93 u64 time_left = 0;
94
95 DEBUGFUNC("i40e_acquire_nvm");
96
97 if (hw->nvm.blank_nvm_mode)
98 goto i40e_i40e_acquire_nvm_exit;
99
100 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
101 0, &time_left, NULL);
102 /* Reading the Global Device Timer */
103 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
104
105 /* Store the timeout */
106 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
107
108 if (ret_code)
109 i40e_debug(hw, I40E_DEBUG_NVM,
110 "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
111 access, (unsigned long long)time_left, ret_code,
112 hw->aq.asq_last_status);
113
114 if (ret_code && time_left) {
115 /* Poll until the current NVM owner timeouts */
116 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
117 while ((gtime < timeout) && time_left) {
118 i40e_msec_delay(10);
119 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
120 ret_code = i40e_aq_request_resource(hw,
121 I40E_NVM_RESOURCE_ID,
122 access, 0, &time_left,
123 NULL);
124 if (ret_code == I40E_SUCCESS) {
125 hw->nvm.hw_semaphore_timeout =
126 I40E_MS_TO_GTIME(time_left) + gtime;
127 break;
128 }
129 }
130 if (ret_code != I40E_SUCCESS) {
131 hw->nvm.hw_semaphore_timeout = 0;
132 i40e_debug(hw, I40E_DEBUG_NVM,
133 "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
134 (unsigned long long)time_left, ret_code,
135 hw->aq.asq_last_status);
136 }
137 }
138
139 i40e_i40e_acquire_nvm_exit:
140 return ret_code;
141 }
142
143 /**
144 * i40e_release_nvm - Generic request for releasing the NVM ownership
145 * @hw: pointer to the HW structure
146 *
147 * This function will release NVM resource via the proper Admin Command.
148 **/
149 void i40e_release_nvm(struct i40e_hw *hw)
150 {
151 enum i40e_status_code ret_code = I40E_SUCCESS;
152 u32 total_delay = 0;
153
154 DEBUGFUNC("i40e_release_nvm");
155
156 if (hw->nvm.blank_nvm_mode)
157 return;
158
159 ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
160
161 /* there are some rare cases when trying to release the resource
162 * results in an admin Q timeout, so handle them correctly
163 */
164 while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
165 (total_delay < hw->aq.asq_cmd_timeout)) {
166 i40e_msec_delay(1);
167 ret_code = i40e_aq_release_resource(hw,
168 I40E_NVM_RESOURCE_ID, 0, NULL);
169 total_delay++;
170 }
171 }
172
173 /**
174 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
175 * @hw: pointer to the HW structure
176 *
177 * Polls the SRCTL Shadow RAM register done bit.
178 **/
179 static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
180 {
181 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
182 u32 srctl, wait_cnt;
183
184 DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
185
186 /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
187 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
188 srctl = rd32(hw, I40E_GLNVM_SRCTL);
189 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
190 ret_code = I40E_SUCCESS;
191 break;
192 }
193 i40e_usec_delay(5);
194 }
195 if (ret_code == I40E_ERR_TIMEOUT)
196 i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
197 return ret_code;
198 }
199
200 /**
201 * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
202 * @hw: pointer to the HW structure
203 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
204 * @data: word read from the Shadow RAM
205 *
206 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
207 **/
208 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
209 u16 *data)
210 {
211 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
212 u32 sr_reg;
213
214 DEBUGFUNC("i40e_read_nvm_word_srctl");
215
216 if (offset >= hw->nvm.sr_size) {
217 i40e_debug(hw, I40E_DEBUG_NVM,
218 "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
219 offset, hw->nvm.sr_size);
220 ret_code = I40E_ERR_PARAM;
221 goto read_nvm_exit;
222 }
223
224 /* Poll the done bit first */
225 ret_code = i40e_poll_sr_srctl_done_bit(hw);
226 if (ret_code == I40E_SUCCESS) {
227 /* Write the address and start reading */
228 sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
229 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
230 wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
231
232 /* Poll I40E_GLNVM_SRCTL until the done bit is set */
233 ret_code = i40e_poll_sr_srctl_done_bit(hw);
234 if (ret_code == I40E_SUCCESS) {
235 sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
236 *data = (u16)((sr_reg &
237 I40E_GLNVM_SRDATA_RDDATA_MASK)
238 >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
239 }
240 }
241 if (ret_code != I40E_SUCCESS)
242 i40e_debug(hw, I40E_DEBUG_NVM,
243 "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
244 offset);
245
246 read_nvm_exit:
247 return ret_code;
248 }
249
250 /**
251 * i40e_read_nvm_aq - Read Shadow RAM.
252 * @hw: pointer to the HW structure.
253 * @module_pointer: module pointer location in words from the NVM beginning
254 * @offset: offset in words from module start
255 * @words: number of words to write
256 * @data: buffer with words to write to the Shadow RAM
257 * @last_command: tells the AdminQ that this is the last command
258 *
259 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
260 **/
261 static enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw,
262 u8 module_pointer, u32 offset,
263 u16 words, void *data,
264 bool last_command)
265 {
266 enum i40e_status_code ret_code = I40E_ERR_NVM;
267 struct i40e_asq_cmd_details cmd_details;
268
269 DEBUGFUNC("i40e_read_nvm_aq");
270
271 memset(&cmd_details, 0, sizeof(cmd_details));
272 cmd_details.wb_desc = &hw->nvm_wb_desc;
273
274 /* Here we are checking the SR limit only for the flat memory model.
275 * We cannot do it for the module-based model, as we did not acquire
276 * the NVM resource yet (we cannot get the module pointer value).
277 * Firmware will check the module-based model.
278 */
279 if ((offset + words) > hw->nvm.sr_size)
280 i40e_debug(hw, I40E_DEBUG_NVM,
281 "NVM write error: offset %d beyond Shadow RAM limit %d\n",
282 (offset + words), hw->nvm.sr_size);
283 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
284 /* We can write only up to 4KB (one sector), in one AQ write */
285 i40e_debug(hw, I40E_DEBUG_NVM,
286 "NVM write fail error: tried to write %d words, limit is %d.\n",
287 words, I40E_SR_SECTOR_SIZE_IN_WORDS);
288 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
289 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
290 /* A single write cannot spread over two sectors */
291 i40e_debug(hw, I40E_DEBUG_NVM,
292 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
293 offset, words);
294 else
295 ret_code = i40e_aq_read_nvm(hw, module_pointer,
296 2 * offset, /*bytes*/
297 2 * words, /*bytes*/
298 data, last_command, &cmd_details);
299
300 return ret_code;
301 }
302
303 /**
304 * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
305 * @hw: pointer to the HW structure
306 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
307 * @data: word read from the Shadow RAM
308 *
309 * Reads one 16 bit word from the Shadow RAM using the AdminQ
310 **/
311 static enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
312 u16 *data)
313 {
314 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
315
316 DEBUGFUNC("i40e_read_nvm_word_aq");
317
318 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, TRUE);
319 *data = LE16_TO_CPU(*(__le16 *)data);
320
321 return ret_code;
322 }
323
324 /**
325 * __i40e_read_nvm_word - Reads NVM word, assumes caller does the locking
326 * @hw: pointer to the HW structure
327 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
328 * @data: word read from the Shadow RAM
329 *
330 * Reads one 16 bit word from the Shadow RAM.
331 *
332 * Do not use this function except in cases where the nvm lock is already
333 * taken via i40e_acquire_nvm().
334 **/
335 enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
336 u16 offset,
337 u16 *data)
338 {
339
340 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
341 return i40e_read_nvm_word_aq(hw, offset, data);
342
343 return i40e_read_nvm_word_srctl(hw, offset, data);
344 }
345
346 /**
347 * i40e_read_nvm_word - Reads NVM word, acquires lock if necessary
348 * @hw: pointer to the HW structure
349 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
350 * @data: word read from the Shadow RAM
351 *
352 * Reads one 16 bit word from the Shadow RAM.
353 **/
354 enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
355 u16 *data)
356 {
357 enum i40e_status_code ret_code = I40E_SUCCESS;
358
359 if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
360 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
361
362 if (ret_code)
363 return ret_code;
364 ret_code = __i40e_read_nvm_word(hw, offset, data);
365
366 if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
367 i40e_release_nvm(hw);
368 return ret_code;
369 }
370
371 /**
372 * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location
373 * @hw: Pointer to the HW structure
374 * @module_ptr: Pointer to module in words with respect to NVM beginning
375 * @module_offset: Offset in words from module start
376 * @data_offset: Offset in words from reading data area start
377 * @words_data_size: Words to read from NVM
378 * @data_ptr: Pointer to memory location where resulting buffer will be stored
379 **/
380 enum i40e_status_code
381 i40e_read_nvm_module_data(struct i40e_hw *hw, u8 module_ptr, u16 module_offset,
382 u16 data_offset, u16 words_data_size, u16 *data_ptr)
383 {
384 enum i40e_status_code status;
385 u16 specific_ptr = 0;
386 u16 ptr_value = 0;
387 u16 offset = 0;
388
389 if (module_ptr != 0) {
390 status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
391 if (status != I40E_SUCCESS) {
392 i40e_debug(hw, I40E_DEBUG_ALL,
393 "Reading nvm word failed.Error code: %d.\n",
394 status);
395 return I40E_ERR_NVM;
396 }
397 }
398 #define I40E_NVM_INVALID_PTR_VAL 0x7FFF
399 #define I40E_NVM_INVALID_VAL 0xFFFF
400
401 /* Pointer not initialized */
402 if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
403 ptr_value == I40E_NVM_INVALID_VAL) {
404 i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n");
405 return I40E_ERR_BAD_PTR;
406 }
407
408 /* Check whether the module is in SR mapped area or outside */
409 if (ptr_value & I40E_PTR_TYPE) {
410 /* Pointer points outside of the Shared RAM mapped area */
411 i40e_debug(hw, I40E_DEBUG_ALL,
412 "Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n");
413
414 return I40E_ERR_PARAM;
415 } else {
416 /* Read from the Shadow RAM */
417
418 status = i40e_read_nvm_word(hw, ptr_value + module_offset,
419 &specific_ptr);
420 if (status != I40E_SUCCESS) {
421 i40e_debug(hw, I40E_DEBUG_ALL,
422 "Reading nvm word failed.Error code: %d.\n",
423 status);
424 return I40E_ERR_NVM;
425 }
426
427 offset = ptr_value + module_offset + specific_ptr +
428 data_offset;
429
430 status = i40e_read_nvm_buffer(hw, offset, &words_data_size,
431 data_ptr);
432 if (status != I40E_SUCCESS) {
433 i40e_debug(hw, I40E_DEBUG_ALL,
434 "Reading nvm buffer failed.Error code: %d.\n",
435 status);
436 }
437 }
438
439 return status;
440 }
441
442 /**
443 * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
444 * @hw: pointer to the HW structure
445 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
446 * @words: (in) number of words to read; (out) number of words actually read
447 * @data: words read from the Shadow RAM
448 *
449 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
450 * method. The buffer read is preceded by the NVM ownership take
451 * and followed by the release.
452 **/
453 static enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
454 u16 *words, u16 *data)
455 {
456 enum i40e_status_code ret_code = I40E_SUCCESS;
457 u16 index, word;
458
459 DEBUGFUNC("i40e_read_nvm_buffer_srctl");
460
461 /* Loop through the selected region */
462 for (word = 0; word < *words; word++) {
463 index = offset + word;
464 ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
465 if (ret_code != I40E_SUCCESS)
466 break;
467 }
468
469 /* Update the number of words read from the Shadow RAM */
470 *words = word;
471
472 return ret_code;
473 }
474
475 /**
476 * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
477 * @hw: pointer to the HW structure
478 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
479 * @words: (in) number of words to read; (out) number of words actually read
480 * @data: words read from the Shadow RAM
481 *
482 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
483 * method. The buffer read is preceded by the NVM ownership take
484 * and followed by the release.
485 **/
486 static enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
487 u16 *words, u16 *data)
488 {
489 enum i40e_status_code ret_code;
490 u16 read_size = *words;
491 bool last_cmd = FALSE;
492 u16 words_read = 0;
493 u16 i = 0;
494
495 DEBUGFUNC("i40e_read_nvm_buffer_aq");
496
497 do {
498 /* Calculate number of bytes we should read in this step.
499 * FVL AQ do not allow to read more than one page at a time or
500 * to cross page boundaries.
501 */
502 if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
503 read_size = min(*words,
504 (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
505 (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
506 else
507 read_size = min((*words - words_read),
508 I40E_SR_SECTOR_SIZE_IN_WORDS);
509
510 /* Check if this is last command, if so set proper flag */
511 if ((words_read + read_size) >= *words)
512 last_cmd = TRUE;
513
514 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
515 data + words_read, last_cmd);
516 if (ret_code != I40E_SUCCESS)
517 goto read_nvm_buffer_aq_exit;
518
519 /* Increment counter for words already read and move offset to
520 * new read location
521 */
522 words_read += read_size;
523 offset += read_size;
524 } while (words_read < *words);
525
526 for (i = 0; i < *words; i++)
527 data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
528
529 read_nvm_buffer_aq_exit:
530 *words = words_read;
531 return ret_code;
532 }
533
534 /**
535 * __i40e_read_nvm_buffer - Reads NVM buffer, caller must acquire lock
536 * @hw: pointer to the HW structure
537 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
538 * @words: (in) number of words to read; (out) number of words actually read
539 * @data: words read from the Shadow RAM
540 *
541 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
542 * method.
543 **/
544 enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
545 u16 offset,
546 u16 *words, u16 *data)
547 {
548 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
549 return i40e_read_nvm_buffer_aq(hw, offset, words, data);
550
551 return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
552 }
553
554 /**
555 * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary
556 * @hw: pointer to the HW structure
557 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
558 * @words: (in) number of words to read; (out) number of words actually read
559 * @data: words read from the Shadow RAM
560 *
561 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
562 * method. The buffer read is preceded by the NVM ownership take
563 * and followed by the release.
564 **/
565 enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
566 u16 *words, u16 *data)
567 {
568 enum i40e_status_code ret_code = I40E_SUCCESS;
569
570 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
571 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
572 if (!ret_code) {
573 ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
574 data);
575 i40e_release_nvm(hw);
576 }
577 } else {
578 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
579 }
580
581 return ret_code;
582 }
583
584 /**
585 * i40e_write_nvm_aq - Writes Shadow RAM.
586 * @hw: pointer to the HW structure.
587 * @module_pointer: module pointer location in words from the NVM beginning
588 * @offset: offset in words from module start
589 * @words: number of words to write
590 * @data: buffer with words to write to the Shadow RAM
591 * @last_command: tells the AdminQ that this is the last command
592 *
593 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
594 **/
595 enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
596 u32 offset, u16 words, void *data,
597 bool last_command)
598 {
599 enum i40e_status_code ret_code = I40E_ERR_NVM;
600 struct i40e_asq_cmd_details cmd_details;
601
602 DEBUGFUNC("i40e_write_nvm_aq");
603
604 memset(&cmd_details, 0, sizeof(cmd_details));
605 cmd_details.wb_desc = &hw->nvm_wb_desc;
606
607 /* Here we are checking the SR limit only for the flat memory model.
608 * We cannot do it for the module-based model, as we did not acquire
609 * the NVM resource yet (we cannot get the module pointer value).
610 * Firmware will check the module-based model.
611 */
612 if ((offset + words) > hw->nvm.sr_size)
613 DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
614 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
615 /* We can write only up to 4KB (one sector), in one AQ write */
616 DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
617 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
618 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
619 /* A single write cannot spread over two sectors */
620 DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
621 else
622 ret_code = i40e_aq_update_nvm(hw, module_pointer,
623 2 * offset, /*bytes*/
624 2 * words, /*bytes*/
625 data, last_command, 0,
626 &cmd_details);
627
628 return ret_code;
629 }
630
631 /**
632 * __i40e_write_nvm_word - Writes Shadow RAM word
633 * @hw: pointer to the HW structure
634 * @offset: offset of the Shadow RAM word to write
635 * @data: word to write to the Shadow RAM
636 *
637 * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
638 * NVM ownership have to be acquired and released (on ARQ completion event
639 * reception) by caller. To commit SR to NVM update checksum function
640 * should be called.
641 **/
642 enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
643 void *data)
644 {
645 DEBUGFUNC("i40e_write_nvm_word");
646
647 *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
648
649 /* Value 0x00 below means that we treat SR as a flat mem */
650 return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, FALSE);
651 }
652
653 /**
654 * __i40e_write_nvm_buffer - Writes Shadow RAM buffer
655 * @hw: pointer to the HW structure
656 * @module_pointer: module pointer location in words from the NVM beginning
657 * @offset: offset of the Shadow RAM buffer to write
658 * @words: number of words to write
659 * @data: words to write to the Shadow RAM
660 *
661 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
662 * NVM ownership must be acquired before calling this function and released
663 * on ARQ completion event reception by caller. To commit SR to NVM update
664 * checksum function should be called.
665 **/
666 enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw,
667 u8 module_pointer, u32 offset,
668 u16 words, void *data)
669 {
670 __le16 *le_word_ptr = (__le16 *)data;
671 u16 *word_ptr = (u16 *)data;
672 u32 i = 0;
673
674 DEBUGFUNC("i40e_write_nvm_buffer");
675
676 for (i = 0; i < words; i++)
677 le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
678
679 /* Here we will only write one buffer as the size of the modules
680 * mirrored in the Shadow RAM is always less than 4K.
681 */
682 return i40e_write_nvm_aq(hw, module_pointer, offset, words,
683 data, FALSE);
684 }
685
686 /**
687 * i40e_calc_nvm_checksum - Calculates and returns the checksum
688 * @hw: pointer to hardware structure
689 * @checksum: pointer to the checksum
690 *
691 * This function calculates SW Checksum that covers the whole 64kB shadow RAM
692 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
693 * is customer specific and unknown. Therefore, this function skips all maximum
694 * possible size of VPD (1kB).
695 **/
696 enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
697 {
698 enum i40e_status_code ret_code = I40E_SUCCESS;
699 struct i40e_virt_mem vmem;
700 u16 pcie_alt_module = 0;
701 u16 checksum_local = 0;
702 u16 vpd_module = 0;
703 u16 *data;
704 u16 i = 0;
705
706 DEBUGFUNC("i40e_calc_nvm_checksum");
707
708 ret_code = i40e_allocate_virt_mem(hw, &vmem,
709 I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
710 if (ret_code)
711 goto i40e_calc_nvm_checksum_exit;
712 data = (u16 *)vmem.va;
713
714 /* read pointer to VPD area */
715 ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
716 if (ret_code != I40E_SUCCESS) {
717 ret_code = I40E_ERR_NVM_CHECKSUM;
718 goto i40e_calc_nvm_checksum_exit;
719 }
720
721 /* read pointer to PCIe Alt Auto-load module */
722 ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
723 &pcie_alt_module);
724 if (ret_code != I40E_SUCCESS) {
725 ret_code = I40E_ERR_NVM_CHECKSUM;
726 goto i40e_calc_nvm_checksum_exit;
727 }
728
729 /* Calculate SW checksum that covers the whole 64kB shadow RAM
730 * except the VPD and PCIe ALT Auto-load modules
731 */
732 for (i = 0; i < hw->nvm.sr_size; i++) {
733 /* Read SR page */
734 if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
735 u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
736
737 ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
738 if (ret_code != I40E_SUCCESS) {
739 ret_code = I40E_ERR_NVM_CHECKSUM;
740 goto i40e_calc_nvm_checksum_exit;
741 }
742 }
743
744 /* Skip Checksum word */
745 if (i == I40E_SR_SW_CHECKSUM_WORD)
746 continue;
747 /* Skip VPD module (convert byte size to word count) */
748 if ((i >= (u32)vpd_module) &&
749 (i < ((u32)vpd_module +
750 (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
751 continue;
752 }
753 /* Skip PCIe ALT module (convert byte size to word count) */
754 if ((i >= (u32)pcie_alt_module) &&
755 (i < ((u32)pcie_alt_module +
756 (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
757 continue;
758 }
759
760 checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
761 }
762
763 *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
764
765 i40e_calc_nvm_checksum_exit:
766 i40e_free_virt_mem(hw, &vmem);
767 return ret_code;
768 }
769
770 /**
771 * i40e_update_nvm_checksum - Updates the NVM checksum
772 * @hw: pointer to hardware structure
773 *
774 * NVM ownership must be acquired before calling this function and released
775 * on ARQ completion event reception by caller.
776 * This function will commit SR to NVM.
777 **/
778 enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
779 {
780 enum i40e_status_code ret_code = I40E_SUCCESS;
781 u16 checksum;
782 __le16 le_sum;
783
784 DEBUGFUNC("i40e_update_nvm_checksum");
785
786 ret_code = i40e_calc_nvm_checksum(hw, &checksum);
787 if (ret_code == I40E_SUCCESS) {
788 le_sum = CPU_TO_LE16(checksum);
789 ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
790 1, &le_sum, TRUE);
791 }
792
793 return ret_code;
794 }
795
796 /**
797 * i40e_validate_nvm_checksum - Validate EEPROM checksum
798 * @hw: pointer to hardware structure
799 * @checksum: calculated checksum
800 *
801 * Performs checksum calculation and validates the NVM SW checksum. If the
802 * caller does not need checksum, the value can be NULL.
803 **/
804 enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
805 u16 *checksum)
806 {
807 enum i40e_status_code ret_code = I40E_SUCCESS;
808 u16 checksum_sr = 0;
809 u16 checksum_local = 0;
810
811 DEBUGFUNC("i40e_validate_nvm_checksum");
812
813 /* We must acquire the NVM lock in order to correctly synchronize the
814 * NVM accesses across multiple PFs. Without doing so it is possible
815 * for one of the PFs to read invalid data potentially indicating that
816 * the checksum is invalid.
817 */
818 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
819 if (ret_code)
820 return ret_code;
821 ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
822 __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
823 i40e_release_nvm(hw);
824 if (ret_code)
825 return ret_code;
826
827 /* Verify read checksum from EEPROM is the same as
828 * calculated checksum
829 */
830 if (checksum_local != checksum_sr)
831 ret_code = I40E_ERR_NVM_CHECKSUM;
832
833 /* If the user cares, return the calculated checksum */
834 if (checksum)
835 *checksum = checksum_local;
836
837 return ret_code;
838 }
839
840 static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
841 struct i40e_nvm_access *cmd,
842 u8 *bytes, int *perrno);
843 static enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
844 struct i40e_nvm_access *cmd,
845 u8 *bytes, int *perrno);
846 static enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
847 struct i40e_nvm_access *cmd,
848 u8 *bytes, int *perrno);
849 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
850 struct i40e_nvm_access *cmd,
851 int *perrno);
852 static enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
853 struct i40e_nvm_access *cmd,
854 int *perrno);
855 static enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
856 struct i40e_nvm_access *cmd,
857 u8 *bytes, int *perrno);
858 static enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
859 struct i40e_nvm_access *cmd,
860 u8 *bytes, int *perrno);
861 static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
862 struct i40e_nvm_access *cmd,
863 u8 *bytes, int *perrno);
864 static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
865 struct i40e_nvm_access *cmd,
866 u8 *bytes, int *perrno);
867 static enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
868 struct i40e_nvm_access *cmd,
869 u8 *bytes, int *perrno);
870 static INLINE u8 i40e_nvmupd_get_module(u32 val)
871 {
872 return (u8)(val & I40E_NVM_MOD_PNT_MASK);
873 }
874 static INLINE u8 i40e_nvmupd_get_transaction(u32 val)
875 {
876 return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
877 }
878
879 static INLINE u8 i40e_nvmupd_get_preservation_flags(u32 val)
880 {
881 return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
882 I40E_NVM_PRESERVATION_FLAGS_SHIFT);
883 }
884
885 static const char *i40e_nvm_update_state_str[] = {
886 "I40E_NVMUPD_INVALID",
887 "I40E_NVMUPD_READ_CON",
888 "I40E_NVMUPD_READ_SNT",
889 "I40E_NVMUPD_READ_LCB",
890 "I40E_NVMUPD_READ_SA",
891 "I40E_NVMUPD_WRITE_ERA",
892 "I40E_NVMUPD_WRITE_CON",
893 "I40E_NVMUPD_WRITE_SNT",
894 "I40E_NVMUPD_WRITE_LCB",
895 "I40E_NVMUPD_WRITE_SA",
896 "I40E_NVMUPD_CSUM_CON",
897 "I40E_NVMUPD_CSUM_SA",
898 "I40E_NVMUPD_CSUM_LCB",
899 "I40E_NVMUPD_STATUS",
900 "I40E_NVMUPD_EXEC_AQ",
901 "I40E_NVMUPD_GET_AQ_RESULT",
902 "I40E_NVMUPD_GET_AQ_EVENT",
903 "I40E_NVMUPD_GET_FEATURES",
904 };
905
906 /**
907 * i40e_nvmupd_command - Process an NVM update command
908 * @hw: pointer to hardware structure
909 * @cmd: pointer to nvm update command
910 * @bytes: pointer to the data buffer
911 * @perrno: pointer to return error code
912 *
913 * Dispatches command depending on what update state is current
914 **/
915 enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
916 struct i40e_nvm_access *cmd,
917 u8 *bytes, int *perrno)
918 {
919 enum i40e_status_code status;
920 enum i40e_nvmupd_cmd upd_cmd;
921
922 DEBUGFUNC("i40e_nvmupd_command");
923
924 /* assume success */
925 *perrno = 0;
926
927 /* early check for status command and debug msgs */
928 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
929
930 i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
931 i40e_nvm_update_state_str[upd_cmd],
932 hw->nvmupd_state,
933 hw->nvm_release_on_done, hw->nvm_wait_opcode,
934 cmd->command, cmd->config, cmd->offset, cmd->data_size);
935
936 if (upd_cmd == I40E_NVMUPD_INVALID) {
937 *perrno = -EFAULT;
938 i40e_debug(hw, I40E_DEBUG_NVM,
939 "i40e_nvmupd_validate_command returns %d errno %d\n",
940 upd_cmd, *perrno);
941 }
942
943 /* a status request returns immediately rather than
944 * going into the state machine
945 */
946 if (upd_cmd == I40E_NVMUPD_STATUS) {
947 if (!cmd->data_size) {
948 *perrno = -EFAULT;
949 return I40E_ERR_BUF_TOO_SHORT;
950 }
951
952 bytes[0] = hw->nvmupd_state;
953
954 if (cmd->data_size >= 4) {
955 bytes[1] = 0;
956 *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
957 }
958
959 /* Clear error status on read */
960 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
961 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
962
963 return I40E_SUCCESS;
964 }
965
966 /*
967 * A supported features request returns immediately
968 * rather than going into state machine
969 */
970 if (upd_cmd == I40E_NVMUPD_FEATURES) {
971 if (cmd->data_size < hw->nvmupd_features.size) {
972 *perrno = -EFAULT;
973 return I40E_ERR_BUF_TOO_SHORT;
974 }
975
976 /*
977 * If buffer is bigger than i40e_nvmupd_features structure,
978 * make sure the trailing bytes are set to 0x0.
979 */
980 if (cmd->data_size > hw->nvmupd_features.size)
981 i40e_memset(bytes + hw->nvmupd_features.size, 0x0,
982 cmd->data_size - hw->nvmupd_features.size,
983 I40E_NONDMA_MEM);
984
985 i40e_memcpy(bytes, &hw->nvmupd_features,
986 hw->nvmupd_features.size, I40E_NONDMA_MEM);
987
988 return I40E_SUCCESS;
989 }
990
991 /* Clear status even it is not read and log */
992 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
993 i40e_debug(hw, I40E_DEBUG_NVM,
994 "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
995 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
996 }
997
998 /* Acquire lock to prevent race condition where adminq_task
999 * can execute after i40e_nvmupd_nvm_read/write but before state
1000 * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
1001 *
1002 * During NVMUpdate, it is observed that lock could be held for
1003 * ~5ms for most commands. However lock is held for ~60ms for
1004 * NVMUPD_CSUM_LCB command.
1005 */
1006 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1007 switch (hw->nvmupd_state) {
1008 case I40E_NVMUPD_STATE_INIT:
1009 status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
1010 break;
1011
1012 case I40E_NVMUPD_STATE_READING:
1013 status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
1014 break;
1015
1016 case I40E_NVMUPD_STATE_WRITING:
1017 status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
1018 break;
1019
1020 case I40E_NVMUPD_STATE_INIT_WAIT:
1021 case I40E_NVMUPD_STATE_WRITE_WAIT:
1022 /* if we need to stop waiting for an event, clear
1023 * the wait info and return before doing anything else
1024 */
1025 if (cmd->offset == 0xffff) {
1026 i40e_nvmupd_clear_wait_state(hw);
1027 status = I40E_SUCCESS;
1028 break;
1029 }
1030
1031 status = I40E_ERR_NOT_READY;
1032 *perrno = -EBUSY;
1033 break;
1034
1035 default:
1036 /* invalid state, should never happen */
1037 i40e_debug(hw, I40E_DEBUG_NVM,
1038 "NVMUPD: no such state %d\n", hw->nvmupd_state);
1039 status = I40E_NOT_SUPPORTED;
1040 *perrno = -ESRCH;
1041 break;
1042 }
1043
1044 i40e_release_spinlock(&hw->aq.arq_spinlock);
1045 return status;
1046 }
1047
1048 /**
1049 * i40e_nvmupd_state_init - Handle NVM update state Init
1050 * @hw: pointer to hardware structure
1051 * @cmd: pointer to nvm update command buffer
1052 * @bytes: pointer to the data buffer
1053 * @perrno: pointer to return error code
1054 *
1055 * Process legitimate commands of the Init state and conditionally set next
1056 * state. Reject all other commands.
1057 **/
1058 static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
1059 struct i40e_nvm_access *cmd,
1060 u8 *bytes, int *perrno)
1061 {
1062 enum i40e_status_code status = I40E_SUCCESS;
1063 enum i40e_nvmupd_cmd upd_cmd;
1064
1065 DEBUGFUNC("i40e_nvmupd_state_init");
1066
1067 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1068
1069 switch (upd_cmd) {
1070 case I40E_NVMUPD_READ_SA:
1071 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
1072 if (status) {
1073 *perrno = i40e_aq_rc_to_posix(status,
1074 hw->aq.asq_last_status);
1075 } else {
1076 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1077 i40e_release_nvm(hw);
1078 }
1079 break;
1080
1081 case I40E_NVMUPD_READ_SNT:
1082 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
1083 if (status) {
1084 *perrno = i40e_aq_rc_to_posix(status,
1085 hw->aq.asq_last_status);
1086 } else {
1087 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1088 if (status)
1089 i40e_release_nvm(hw);
1090 else
1091 hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
1092 }
1093 break;
1094
1095 case I40E_NVMUPD_WRITE_ERA:
1096 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1097 if (status) {
1098 *perrno = i40e_aq_rc_to_posix(status,
1099 hw->aq.asq_last_status);
1100 } else {
1101 status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
1102 if (status) {
1103 i40e_release_nvm(hw);
1104 } else {
1105 hw->nvm_release_on_done = TRUE;
1106 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
1107 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1108 }
1109 }
1110 break;
1111
1112 case I40E_NVMUPD_WRITE_SA:
1113 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1114 if (status) {
1115 *perrno = i40e_aq_rc_to_posix(status,
1116 hw->aq.asq_last_status);
1117 } else {
1118 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1119 if (status) {
1120 i40e_release_nvm(hw);
1121 } else {
1122 hw->nvm_release_on_done = TRUE;
1123 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1124 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1125 }
1126 }
1127 break;
1128
1129 case I40E_NVMUPD_WRITE_SNT:
1130 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1131 if (status) {
1132 *perrno = i40e_aq_rc_to_posix(status,
1133 hw->aq.asq_last_status);
1134 } else {
1135 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1136 if (status) {
1137 i40e_release_nvm(hw);
1138 } else {
1139 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1140 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1141 }
1142 }
1143 break;
1144
1145 case I40E_NVMUPD_CSUM_SA:
1146 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1147 if (status) {
1148 *perrno = i40e_aq_rc_to_posix(status,
1149 hw->aq.asq_last_status);
1150 } else {
1151 status = i40e_update_nvm_checksum(hw);
1152 if (status) {
1153 *perrno = hw->aq.asq_last_status ?
1154 i40e_aq_rc_to_posix(status,
1155 hw->aq.asq_last_status) :
1156 -EIO;
1157 i40e_release_nvm(hw);
1158 } else {
1159 hw->nvm_release_on_done = TRUE;
1160 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1161 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1162 }
1163 }
1164 break;
1165
1166 case I40E_NVMUPD_EXEC_AQ:
1167 status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
1168 break;
1169
1170 case I40E_NVMUPD_GET_AQ_RESULT:
1171 status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
1172 break;
1173
1174 case I40E_NVMUPD_GET_AQ_EVENT:
1175 status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
1176 break;
1177
1178 default:
1179 i40e_debug(hw, I40E_DEBUG_NVM,
1180 "NVMUPD: bad cmd %s in init state\n",
1181 i40e_nvm_update_state_str[upd_cmd]);
1182 status = I40E_ERR_NVM;
1183 *perrno = -ESRCH;
1184 break;
1185 }
1186 return status;
1187 }
1188
1189 /**
1190 * i40e_nvmupd_state_reading - Handle NVM update state Reading
1191 * @hw: pointer to hardware structure
1192 * @cmd: pointer to nvm update command buffer
1193 * @bytes: pointer to the data buffer
1194 * @perrno: pointer to return error code
1195 *
1196 * NVM ownership is already held. Process legitimate commands and set any
1197 * change in state; reject all other commands.
1198 **/
1199 static enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
1200 struct i40e_nvm_access *cmd,
1201 u8 *bytes, int *perrno)
1202 {
1203 enum i40e_status_code status = I40E_SUCCESS;
1204 enum i40e_nvmupd_cmd upd_cmd;
1205
1206 DEBUGFUNC("i40e_nvmupd_state_reading");
1207
1208 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1209
1210 switch (upd_cmd) {
1211 case I40E_NVMUPD_READ_SA:
1212 case I40E_NVMUPD_READ_CON:
1213 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1214 break;
1215
1216 case I40E_NVMUPD_READ_LCB:
1217 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1218 i40e_release_nvm(hw);
1219 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1220 break;
1221
1222 default:
1223 i40e_debug(hw, I40E_DEBUG_NVM,
1224 "NVMUPD: bad cmd %s in reading state.\n",
1225 i40e_nvm_update_state_str[upd_cmd]);
1226 status = I40E_NOT_SUPPORTED;
1227 *perrno = -ESRCH;
1228 break;
1229 }
1230 return status;
1231 }
1232
1233 /**
1234 * i40e_nvmupd_state_writing - Handle NVM update state Writing
1235 * @hw: pointer to hardware structure
1236 * @cmd: pointer to nvm update command buffer
1237 * @bytes: pointer to the data buffer
1238 * @perrno: pointer to return error code
1239 *
1240 * NVM ownership is already held. Process legitimate commands and set any
1241 * change in state; reject all other commands
1242 **/
1243 static enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
1244 struct i40e_nvm_access *cmd,
1245 u8 *bytes, int *perrno)
1246 {
1247 enum i40e_status_code status = I40E_SUCCESS;
1248 enum i40e_nvmupd_cmd upd_cmd;
1249 bool retry_attempt = FALSE;
1250
1251 DEBUGFUNC("i40e_nvmupd_state_writing");
1252
1253 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1254
1255 retry:
1256 switch (upd_cmd) {
1257 case I40E_NVMUPD_WRITE_CON:
1258 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1259 if (!status) {
1260 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1261 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1262 }
1263 break;
1264
1265 case I40E_NVMUPD_WRITE_LCB:
1266 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1267 if (status) {
1268 *perrno = hw->aq.asq_last_status ?
1269 i40e_aq_rc_to_posix(status,
1270 hw->aq.asq_last_status) :
1271 -EIO;
1272 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1273 } else {
1274 hw->nvm_release_on_done = TRUE;
1275 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1276 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1277 }
1278 break;
1279
1280 case I40E_NVMUPD_CSUM_CON:
1281 /* Assumes the caller has acquired the nvm */
1282 status = i40e_update_nvm_checksum(hw);
1283 if (status) {
1284 *perrno = hw->aq.asq_last_status ?
1285 i40e_aq_rc_to_posix(status,
1286 hw->aq.asq_last_status) :
1287 -EIO;
1288 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1289 } else {
1290 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1291 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1292 }
1293 break;
1294
1295 case I40E_NVMUPD_CSUM_LCB:
1296 /* Assumes the caller has acquired the nvm */
1297 status = i40e_update_nvm_checksum(hw);
1298 if (status) {
1299 *perrno = hw->aq.asq_last_status ?
1300 i40e_aq_rc_to_posix(status,
1301 hw->aq.asq_last_status) :
1302 -EIO;
1303 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1304 } else {
1305 hw->nvm_release_on_done = TRUE;
1306 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1307 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1308 }
1309 break;
1310
1311 default:
1312 i40e_debug(hw, I40E_DEBUG_NVM,
1313 "NVMUPD: bad cmd %s in writing state.\n",
1314 i40e_nvm_update_state_str[upd_cmd]);
1315 status = I40E_NOT_SUPPORTED;
1316 *perrno = -ESRCH;
1317 break;
1318 }
1319
1320 /* In some circumstances, a multi-write transaction takes longer
1321 * than the default 3 minute timeout on the write semaphore. If
1322 * the write failed with an EBUSY status, this is likely the problem,
1323 * so here we try to reacquire the semaphore then retry the write.
1324 * We only do one retry, then give up.
1325 */
1326 if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
1327 !retry_attempt) {
1328 enum i40e_status_code old_status = status;
1329 u32 old_asq_status = hw->aq.asq_last_status;
1330 u32 gtime;
1331
1332 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
1333 if (gtime >= hw->nvm.hw_semaphore_timeout) {
1334 i40e_debug(hw, I40E_DEBUG_ALL,
1335 "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
1336 gtime, hw->nvm.hw_semaphore_timeout);
1337 i40e_release_nvm(hw);
1338 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1339 if (status) {
1340 i40e_debug(hw, I40E_DEBUG_ALL,
1341 "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1342 hw->aq.asq_last_status);
1343 status = old_status;
1344 hw->aq.asq_last_status = old_asq_status;
1345 } else {
1346 retry_attempt = TRUE;
1347 goto retry;
1348 }
1349 }
1350 }
1351
1352 return status;
1353 }
1354
1355 /**
1356 * i40e_nvmupd_clear_wait_state - clear wait state on hw
1357 * @hw: pointer to the hardware structure
1358 **/
1359 void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
1360 {
1361 i40e_debug(hw, I40E_DEBUG_NVM,
1362 "NVMUPD: clearing wait on opcode 0x%04x\n",
1363 hw->nvm_wait_opcode);
1364
1365 if (hw->nvm_release_on_done) {
1366 i40e_release_nvm(hw);
1367 hw->nvm_release_on_done = FALSE;
1368 }
1369 hw->nvm_wait_opcode = 0;
1370
1371 if (hw->aq.arq_last_status) {
1372 hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
1373 return;
1374 }
1375
1376 switch (hw->nvmupd_state) {
1377 case I40E_NVMUPD_STATE_INIT_WAIT:
1378 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1379 break;
1380
1381 case I40E_NVMUPD_STATE_WRITE_WAIT:
1382 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1383 break;
1384
1385 default:
1386 break;
1387 }
1388 }
1389
1390 /**
1391 * i40e_nvmupd_check_wait_event - handle NVM update operation events
1392 * @hw: pointer to the hardware structure
1393 * @opcode: the event that just happened
1394 * @desc: AdminQ descriptor
1395 **/
1396 void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
1397 struct i40e_aq_desc *desc)
1398 {
1399 u32 aq_desc_len = sizeof(struct i40e_aq_desc);
1400
1401 if (opcode == hw->nvm_wait_opcode) {
1402 i40e_memcpy(&hw->nvm_aq_event_desc, desc,
1403 aq_desc_len, I40E_NONDMA_TO_NONDMA);
1404 i40e_nvmupd_clear_wait_state(hw);
1405 }
1406 }
1407
1408 /**
1409 * i40e_nvmupd_validate_command - Validate given command
1410 * @hw: pointer to hardware structure
1411 * @cmd: pointer to nvm update command buffer
1412 * @perrno: pointer to return error code
1413 *
1414 * Return one of the valid command types or I40E_NVMUPD_INVALID
1415 **/
1416 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1417 struct i40e_nvm_access *cmd,
1418 int *perrno)
1419 {
1420 enum i40e_nvmupd_cmd upd_cmd;
1421 u8 module, transaction;
1422
1423 DEBUGFUNC("i40e_nvmupd_validate_command\n");
1424
1425 /* anything that doesn't match a recognized case is an error */
1426 upd_cmd = I40E_NVMUPD_INVALID;
1427
1428 transaction = i40e_nvmupd_get_transaction(cmd->config);
1429 module = i40e_nvmupd_get_module(cmd->config);
1430
1431 /* limits on data size */
1432 if ((cmd->data_size < 1) ||
1433 (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
1434 i40e_debug(hw, I40E_DEBUG_NVM,
1435 "i40e_nvmupd_validate_command data_size %d\n",
1436 cmd->data_size);
1437 *perrno = -EFAULT;
1438 return I40E_NVMUPD_INVALID;
1439 }
1440
1441 switch (cmd->command) {
1442 case I40E_NVM_READ:
1443 switch (transaction) {
1444 case I40E_NVM_CON:
1445 upd_cmd = I40E_NVMUPD_READ_CON;
1446 break;
1447 case I40E_NVM_SNT:
1448 upd_cmd = I40E_NVMUPD_READ_SNT;
1449 break;
1450 case I40E_NVM_LCB:
1451 upd_cmd = I40E_NVMUPD_READ_LCB;
1452 break;
1453 case I40E_NVM_SA:
1454 upd_cmd = I40E_NVMUPD_READ_SA;
1455 break;
1456 case I40E_NVM_EXEC:
1457 switch (module) {
1458 case I40E_NVM_EXEC_GET_AQ_RESULT:
1459 upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
1460 break;
1461 case I40E_NVM_EXEC_FEATURES:
1462 upd_cmd = I40E_NVMUPD_FEATURES;
1463 break;
1464 case I40E_NVM_EXEC_STATUS:
1465 upd_cmd = I40E_NVMUPD_STATUS;
1466 break;
1467 default:
1468 *perrno = -EFAULT;
1469 return I40E_NVMUPD_INVALID;
1470 }
1471 break;
1472 case I40E_NVM_AQE:
1473 upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
1474 break;
1475 }
1476 break;
1477
1478 case I40E_NVM_WRITE:
1479 switch (transaction) {
1480 case I40E_NVM_CON:
1481 upd_cmd = I40E_NVMUPD_WRITE_CON;
1482 break;
1483 case I40E_NVM_SNT:
1484 upd_cmd = I40E_NVMUPD_WRITE_SNT;
1485 break;
1486 case I40E_NVM_LCB:
1487 upd_cmd = I40E_NVMUPD_WRITE_LCB;
1488 break;
1489 case I40E_NVM_SA:
1490 upd_cmd = I40E_NVMUPD_WRITE_SA;
1491 break;
1492 case I40E_NVM_ERA:
1493 upd_cmd = I40E_NVMUPD_WRITE_ERA;
1494 break;
1495 case I40E_NVM_CSUM:
1496 upd_cmd = I40E_NVMUPD_CSUM_CON;
1497 break;
1498 case (I40E_NVM_CSUM|I40E_NVM_SA):
1499 upd_cmd = I40E_NVMUPD_CSUM_SA;
1500 break;
1501 case (I40E_NVM_CSUM|I40E_NVM_LCB):
1502 upd_cmd = I40E_NVMUPD_CSUM_LCB;
1503 break;
1504 case I40E_NVM_EXEC:
1505 if (module == 0)
1506 upd_cmd = I40E_NVMUPD_EXEC_AQ;
1507 break;
1508 }
1509 break;
1510 }
1511
1512 return upd_cmd;
1513 }
1514
1515 /**
1516 * i40e_nvmupd_exec_aq - Run an AQ command
1517 * @hw: pointer to hardware structure
1518 * @cmd: pointer to nvm update command buffer
1519 * @bytes: pointer to the data buffer
1520 * @perrno: pointer to return error code
1521 *
1522 * cmd structure contains identifiers and data buffer
1523 **/
1524 static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1525 struct i40e_nvm_access *cmd,
1526 u8 *bytes, int *perrno)
1527 {
1528 struct i40e_asq_cmd_details cmd_details;
1529 enum i40e_status_code status;
1530 struct i40e_aq_desc *aq_desc;
1531 u32 buff_size = 0;
1532 u8 *buff = NULL;
1533 u32 aq_desc_len;
1534 u32 aq_data_len;
1535
1536 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1537 if (cmd->offset == 0xffff)
1538 return I40E_SUCCESS;
1539
1540 memset(&cmd_details, 0, sizeof(cmd_details));
1541 cmd_details.wb_desc = &hw->nvm_wb_desc;
1542
1543 aq_desc_len = sizeof(struct i40e_aq_desc);
1544 memset(&hw->nvm_wb_desc, 0, aq_desc_len);
1545
1546 /* get the aq descriptor */
1547 if (cmd->data_size < aq_desc_len) {
1548 i40e_debug(hw, I40E_DEBUG_NVM,
1549 "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
1550 cmd->data_size, aq_desc_len);
1551 *perrno = -EINVAL;
1552 return I40E_ERR_PARAM;
1553 }
1554 aq_desc = (struct i40e_aq_desc *)bytes;
1555
1556 /* if data buffer needed, make sure it's ready */
1557 aq_data_len = cmd->data_size - aq_desc_len;
1558 buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
1559 if (buff_size) {
1560 if (!hw->nvm_buff.va) {
1561 status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
1562 hw->aq.asq_buf_size);
1563 if (status)
1564 i40e_debug(hw, I40E_DEBUG_NVM,
1565 "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
1566 status);
1567 }
1568
1569 if (hw->nvm_buff.va) {
1570 buff = hw->nvm_buff.va;
1571 i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len,
1572 I40E_NONDMA_TO_NONDMA);
1573 }
1574 }
1575
1576 if (cmd->offset)
1577 memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
1578
1579 /* and away we go! */
1580 status = i40e_asq_send_command(hw, aq_desc, buff,
1581 buff_size, &cmd_details);
1582 if (status) {
1583 i40e_debug(hw, I40E_DEBUG_NVM,
1584 "i40e_nvmupd_exec_aq err %s aq_err %s\n",
1585 i40e_stat_str(hw, status),
1586 i40e_aq_str(hw, hw->aq.asq_last_status));
1587 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1588 return status;
1589 }
1590
1591 /* should we wait for a followup event? */
1592 if (cmd->offset) {
1593 hw->nvm_wait_opcode = cmd->offset;
1594 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1595 }
1596
1597 return status;
1598 }
1599
1600 /**
1601 * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
1602 * @hw: pointer to hardware structure
1603 * @cmd: pointer to nvm update command buffer
1604 * @bytes: pointer to the data buffer
1605 * @perrno: pointer to return error code
1606 *
1607 * cmd structure contains identifiers and data buffer
1608 **/
1609 static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
1610 struct i40e_nvm_access *cmd,
1611 u8 *bytes, int *perrno)
1612 {
1613 u32 aq_total_len;
1614 u32 aq_desc_len;
1615 int remainder;
1616 u8 *buff;
1617
1618 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1619
1620 aq_desc_len = sizeof(struct i40e_aq_desc);
1621 aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
1622
1623 /* check offset range */
1624 if (cmd->offset > aq_total_len) {
1625 i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
1626 __func__, cmd->offset, aq_total_len);
1627 *perrno = -EINVAL;
1628 return I40E_ERR_PARAM;
1629 }
1630
1631 /* check copylength range */
1632 if (cmd->data_size > (aq_total_len - cmd->offset)) {
1633 int new_len = aq_total_len - cmd->offset;
1634
1635 i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
1636 __func__, cmd->data_size, new_len);
1637 cmd->data_size = new_len;
1638 }
1639
1640 remainder = cmd->data_size;
1641 if (cmd->offset < aq_desc_len) {
1642 u32 len = aq_desc_len - cmd->offset;
1643
1644 len = min(len, cmd->data_size);
1645 i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
1646 __func__, cmd->offset, cmd->offset + len);
1647
1648 buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
1649 i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA);
1650
1651 bytes += len;
1652 remainder -= len;
1653 buff = hw->nvm_buff.va;
1654 } else {
1655 buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len);
1656 }
1657
1658 if (remainder > 0) {
1659 int start_byte = buff - (u8 *)hw->nvm_buff.va;
1660
1661 i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
1662 __func__, start_byte, start_byte + remainder);
1663 i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA);
1664 }
1665
1666 return I40E_SUCCESS;
1667 }
1668
1669 /**
1670 * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
1671 * @hw: pointer to hardware structure
1672 * @cmd: pointer to nvm update command buffer
1673 * @bytes: pointer to the data buffer
1674 * @perrno: pointer to return error code
1675 *
1676 * cmd structure contains identifiers and data buffer
1677 **/
1678 static enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
1679 struct i40e_nvm_access *cmd,
1680 u8 *bytes, int *perrno)
1681 {
1682 u32 aq_total_len;
1683 u32 aq_desc_len;
1684
1685 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1686
1687 aq_desc_len = sizeof(struct i40e_aq_desc);
1688 aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_aq_event_desc.datalen);
1689
1690 /* check copylength range */
1691 if (cmd->data_size > aq_total_len) {
1692 i40e_debug(hw, I40E_DEBUG_NVM,
1693 "%s: copy length %d too big, trimming to %d\n",
1694 __func__, cmd->data_size, aq_total_len);
1695 cmd->data_size = aq_total_len;
1696 }
1697
1698 i40e_memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size,
1699 I40E_NONDMA_TO_NONDMA);
1700
1701 return I40E_SUCCESS;
1702 }
1703
1704 /**
1705 * i40e_nvmupd_nvm_read - Read NVM
1706 * @hw: pointer to hardware structure
1707 * @cmd: pointer to nvm update command buffer
1708 * @bytes: pointer to the data buffer
1709 * @perrno: pointer to return error code
1710 *
1711 * cmd structure contains identifiers and data buffer
1712 **/
1713 static enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1714 struct i40e_nvm_access *cmd,
1715 u8 *bytes, int *perrno)
1716 {
1717 struct i40e_asq_cmd_details cmd_details;
1718 enum i40e_status_code status;
1719 u8 module, transaction;
1720 bool last;
1721
1722 transaction = i40e_nvmupd_get_transaction(cmd->config);
1723 module = i40e_nvmupd_get_module(cmd->config);
1724 last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
1725
1726 memset(&cmd_details, 0, sizeof(cmd_details));
1727 cmd_details.wb_desc = &hw->nvm_wb_desc;
1728
1729 status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1730 bytes, last, &cmd_details);
1731 if (status) {
1732 i40e_debug(hw, I40E_DEBUG_NVM,
1733 "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
1734 module, cmd->offset, cmd->data_size);
1735 i40e_debug(hw, I40E_DEBUG_NVM,
1736 "i40e_nvmupd_nvm_read status %d aq %d\n",
1737 status, hw->aq.asq_last_status);
1738 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1739 }
1740
1741 return status;
1742 }
1743
1744 /**
1745 * i40e_nvmupd_nvm_erase - Erase an NVM module
1746 * @hw: pointer to hardware structure
1747 * @cmd: pointer to nvm update command buffer
1748 * @perrno: pointer to return error code
1749 *
1750 * module, offset, data_size and data are in cmd structure
1751 **/
1752 static enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1753 struct i40e_nvm_access *cmd,
1754 int *perrno)
1755 {
1756 enum i40e_status_code status = I40E_SUCCESS;
1757 struct i40e_asq_cmd_details cmd_details;
1758 u8 module, transaction;
1759 bool last;
1760
1761 transaction = i40e_nvmupd_get_transaction(cmd->config);
1762 module = i40e_nvmupd_get_module(cmd->config);
1763 last = (transaction & I40E_NVM_LCB);
1764
1765 memset(&cmd_details, 0, sizeof(cmd_details));
1766 cmd_details.wb_desc = &hw->nvm_wb_desc;
1767
1768 status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1769 last, &cmd_details);
1770 if (status) {
1771 i40e_debug(hw, I40E_DEBUG_NVM,
1772 "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
1773 module, cmd->offset, cmd->data_size);
1774 i40e_debug(hw, I40E_DEBUG_NVM,
1775 "i40e_nvmupd_nvm_erase status %d aq %d\n",
1776 status, hw->aq.asq_last_status);
1777 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1778 }
1779
1780 return status;
1781 }
1782
1783 /**
1784 * i40e_nvmupd_nvm_write - Write NVM
1785 * @hw: pointer to hardware structure
1786 * @cmd: pointer to nvm update command buffer
1787 * @bytes: pointer to the data buffer
1788 * @perrno: pointer to return error code
1789 *
1790 * module, offset, data_size and data are in cmd structure
1791 **/
1792 static enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1793 struct i40e_nvm_access *cmd,
1794 u8 *bytes, int *perrno)
1795 {
1796 enum i40e_status_code status = I40E_SUCCESS;
1797 struct i40e_asq_cmd_details cmd_details;
1798 u8 module, transaction;
1799 u8 preservation_flags;
1800 bool last;
1801
1802 transaction = i40e_nvmupd_get_transaction(cmd->config);
1803 module = i40e_nvmupd_get_module(cmd->config);
1804 last = (transaction & I40E_NVM_LCB);
1805 preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
1806
1807 memset(&cmd_details, 0, sizeof(cmd_details));
1808 cmd_details.wb_desc = &hw->nvm_wb_desc;
1809
1810 status = i40e_aq_update_nvm(hw, module, cmd->offset,
1811 (u16)cmd->data_size, bytes, last,
1812 preservation_flags, &cmd_details);
1813 if (status) {
1814 i40e_debug(hw, I40E_DEBUG_NVM,
1815 "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1816 module, cmd->offset, cmd->data_size);
1817 i40e_debug(hw, I40E_DEBUG_NVM,
1818 "i40e_nvmupd_nvm_write status %d aq %d\n",
1819 status, hw->aq.asq_last_status);
1820 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1821 }
1822
1823 return status;
1824 }
Cache object: 89ea7b4172080beee4c96bdfad504656
|