Commit | Line | Data |
---|---|---|
afd4aea0 BH |
1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | |
0a6f40c6 | 3 | * Copyright 2008-2011 Solarflare Communications Inc. |
afd4aea0 BH |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License version 2 as published | |
7 | * by the Free Software Foundation, incorporated herein by reference. | |
8 | */ | |
9 | ||
10 | #include <linux/delay.h> | |
11 | #include "net_driver.h" | |
12 | #include "nic.h" | |
13 | #include "io.h" | |
8b8a95a1 | 14 | #include "farch_regs.h" |
afd4aea0 BH |
15 | #include "mcdi_pcol.h" |
16 | #include "phy.h" | |
17 | ||
18 | /************************************************************************** | |
19 | * | |
20 | * Management-Controller-to-Driver Interface | |
21 | * | |
22 | ************************************************************************** | |
23 | */ | |
24 | ||
ebf98e79 | 25 | #define MCDI_RPC_TIMEOUT (10 * HZ) |
afd4aea0 | 26 | |
3f713bf4 BH |
27 | /* A reboot/assertion causes the MCDI status word to be set after the |
28 | * command word is set or a REBOOT event is sent. If we notice a reboot | |
29 | * via these mechanisms then wait 10ms for the status word to be set. */ | |
30 | #define MCDI_STATUS_DELAY_US 100 | |
31 | #define MCDI_STATUS_DELAY_COUNT 100 | |
32 | #define MCDI_STATUS_SLEEP_MS \ | |
33 | (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000) | |
afd4aea0 BH |
34 | |
35 | #define SEQ_MASK \ | |
36 | EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ)) | |
37 | ||
38 | static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) | |
39 | { | |
f3ad5003 BH |
40 | EFX_BUG_ON_PARANOID(!efx->mcdi); |
41 | return &efx->mcdi->iface; | |
afd4aea0 BH |
42 | } |
43 | ||
f073dde0 | 44 | int efx_mcdi_init(struct efx_nic *efx) |
afd4aea0 BH |
45 | { |
46 | struct efx_mcdi_iface *mcdi; | |
47 | ||
f3ad5003 BH |
48 | efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL); |
49 | if (!efx->mcdi) | |
50 | return -ENOMEM; | |
51 | ||
afd4aea0 BH |
52 | mcdi = efx_mcdi(efx); |
53 | init_waitqueue_head(&mcdi->wq); | |
54 | spin_lock_init(&mcdi->iface_lock); | |
55 | atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); | |
56 | mcdi->mode = MCDI_MODE_POLL; | |
57 | ||
58 | (void) efx_mcdi_poll_reboot(efx); | |
f073dde0 BH |
59 | |
60 | /* Recover from a failed assertion before probing */ | |
61 | return efx_mcdi_handle_assertion(efx); | |
afd4aea0 BH |
62 | } |
63 | ||
f3ad5003 BH |
64 | void efx_mcdi_fini(struct efx_nic *efx) |
65 | { | |
66 | BUG_ON(efx->mcdi && | |
67 | atomic_read(&efx->mcdi->iface.state) != MCDI_STATE_QUIESCENT); | |
68 | kfree(efx->mcdi); | |
69 | } | |
70 | ||
afd4aea0 | 71 | static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, |
9528b921 | 72 | const efx_dword_t *inbuf, size_t inlen) |
afd4aea0 BH |
73 | { |
74 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | |
df2cd8af BH |
75 | efx_dword_t hdr[2]; |
76 | size_t hdr_len; | |
afd4aea0 BH |
77 | u32 xflags, seqno; |
78 | ||
79 | BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); | |
afd4aea0 BH |
80 | |
81 | seqno = mcdi->seqno & SEQ_MASK; | |
82 | xflags = 0; | |
83 | if (mcdi->mode == MCDI_MODE_EVENTS) | |
84 | xflags |= MCDI_HEADER_XFLAGS_EVREQ; | |
85 | ||
df2cd8af BH |
86 | if (efx->type->mcdi_max_ver == 1) { |
87 | /* MCDI v1 */ | |
88 | EFX_POPULATE_DWORD_6(hdr[0], | |
89 | MCDI_HEADER_RESPONSE, 0, | |
90 | MCDI_HEADER_RESYNC, 1, | |
91 | MCDI_HEADER_CODE, cmd, | |
92 | MCDI_HEADER_DATALEN, inlen, | |
93 | MCDI_HEADER_SEQ, seqno, | |
94 | MCDI_HEADER_XFLAGS, xflags); | |
95 | hdr_len = 4; | |
96 | } else { | |
97 | /* MCDI v2 */ | |
98 | BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2); | |
99 | EFX_POPULATE_DWORD_6(hdr[0], | |
100 | MCDI_HEADER_RESPONSE, 0, | |
101 | MCDI_HEADER_RESYNC, 1, | |
102 | MCDI_HEADER_CODE, MC_CMD_V2_EXTN, | |
103 | MCDI_HEADER_DATALEN, 0, | |
104 | MCDI_HEADER_SEQ, seqno, | |
105 | MCDI_HEADER_XFLAGS, xflags); | |
106 | EFX_POPULATE_DWORD_2(hdr[1], | |
107 | MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd, | |
108 | MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen); | |
109 | hdr_len = 8; | |
110 | } | |
afd4aea0 | 111 | |
df2cd8af | 112 | efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen); |
afd4aea0 BH |
113 | } |
114 | ||
5bc283e5 BH |
115 | static int efx_mcdi_errno(unsigned int mcdi_err) |
116 | { | |
117 | switch (mcdi_err) { | |
118 | case 0: | |
119 | return 0; | |
120 | #define TRANSLATE_ERROR(name) \ | |
121 | case MC_CMD_ERR_ ## name: \ | |
122 | return -name; | |
df2cd8af | 123 | TRANSLATE_ERROR(EPERM); |
5bc283e5 BH |
124 | TRANSLATE_ERROR(ENOENT); |
125 | TRANSLATE_ERROR(EINTR); | |
df2cd8af | 126 | TRANSLATE_ERROR(EAGAIN); |
5bc283e5 BH |
127 | TRANSLATE_ERROR(EACCES); |
128 | TRANSLATE_ERROR(EBUSY); | |
129 | TRANSLATE_ERROR(EINVAL); | |
130 | TRANSLATE_ERROR(EDEADLK); | |
131 | TRANSLATE_ERROR(ENOSYS); | |
132 | TRANSLATE_ERROR(ETIME); | |
df2cd8af BH |
133 | TRANSLATE_ERROR(EALREADY); |
134 | TRANSLATE_ERROR(ENOSPC); | |
5bc283e5 | 135 | #undef TRANSLATE_ERROR |
df2cd8af BH |
136 | case MC_CMD_ERR_ALLOC_FAIL: |
137 | return -ENOBUFS; | |
138 | case MC_CMD_ERR_MAC_EXIST: | |
139 | return -EADDRINUSE; | |
5bc283e5 | 140 | default: |
df2cd8af BH |
141 | return -EPROTO; |
142 | } | |
143 | } | |
144 | ||
145 | static void efx_mcdi_read_response_header(struct efx_nic *efx) | |
146 | { | |
147 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | |
148 | unsigned int respseq, respcmd, error; | |
149 | efx_dword_t hdr; | |
150 | ||
151 | efx->type->mcdi_read_response(efx, &hdr, 0, 4); | |
152 | respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ); | |
153 | respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE); | |
154 | error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR); | |
155 | ||
156 | if (respcmd != MC_CMD_V2_EXTN) { | |
157 | mcdi->resp_hdr_len = 4; | |
158 | mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN); | |
159 | } else { | |
160 | efx->type->mcdi_read_response(efx, &hdr, 4, 4); | |
161 | mcdi->resp_hdr_len = 8; | |
162 | mcdi->resp_data_len = | |
163 | EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN); | |
164 | } | |
165 | ||
166 | if (error && mcdi->resp_data_len == 0) { | |
167 | netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); | |
168 | mcdi->resprc = -EIO; | |
169 | } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) { | |
170 | netif_err(efx, hw, efx->net_dev, | |
171 | "MC response mismatch tx seq 0x%x rx seq 0x%x\n", | |
172 | respseq, mcdi->seqno); | |
173 | mcdi->resprc = -EIO; | |
174 | } else if (error) { | |
175 | efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4); | |
176 | mcdi->resprc = | |
177 | efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0)); | |
178 | } else { | |
179 | mcdi->resprc = 0; | |
5bc283e5 BH |
180 | } |
181 | } | |
182 | ||
afd4aea0 BH |
183 | static int efx_mcdi_poll(struct efx_nic *efx) |
184 | { | |
185 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | |
ebf98e79 | 186 | unsigned long time, finish; |
5bc283e5 | 187 | unsigned int spins; |
5bc283e5 | 188 | int rc; |
afd4aea0 BH |
189 | |
190 | /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ | |
5bc283e5 | 191 | rc = efx_mcdi_poll_reboot(efx); |
df2cd8af | 192 | if (rc) { |
369327fa | 193 | spin_lock_bh(&mcdi->iface_lock); |
df2cd8af BH |
194 | mcdi->resprc = rc; |
195 | mcdi->resp_hdr_len = 0; | |
196 | mcdi->resp_data_len = 0; | |
369327fa | 197 | spin_unlock_bh(&mcdi->iface_lock); |
df2cd8af BH |
198 | return 0; |
199 | } | |
afd4aea0 BH |
200 | |
201 | /* Poll for completion. Poll quickly (once a us) for the 1st jiffy, | |
202 | * because generally mcdi responses are fast. After that, back off | |
203 | * and poll once a jiffy (approximately) | |
204 | */ | |
205 | spins = TICK_USEC; | |
ebf98e79 | 206 | finish = jiffies + MCDI_RPC_TIMEOUT; |
afd4aea0 BH |
207 | |
208 | while (1) { | |
209 | if (spins != 0) { | |
210 | --spins; | |
211 | udelay(1); | |
55029c1d BH |
212 | } else { |
213 | schedule_timeout_uninterruptible(1); | |
214 | } | |
afd4aea0 | 215 | |
ebf98e79 | 216 | time = jiffies; |
afd4aea0 | 217 | |
86c432ca | 218 | rmb(); |
f3ad5003 | 219 | if (efx->type->mcdi_poll_response(efx)) |
afd4aea0 BH |
220 | break; |
221 | ||
ebf98e79 | 222 | if (time_after(time, finish)) |
afd4aea0 BH |
223 | return -ETIMEDOUT; |
224 | } | |
225 | ||
369327fa | 226 | spin_lock_bh(&mcdi->iface_lock); |
df2cd8af | 227 | efx_mcdi_read_response_header(efx); |
369327fa | 228 | spin_unlock_bh(&mcdi->iface_lock); |
afd4aea0 BH |
229 | |
230 | /* Return rc=0 like wait_event_timeout() */ | |
231 | return 0; | |
232 | } | |
233 | ||
876be083 BH |
234 | /* Test and clear MC-rebooted flag for this port/function; reset |
235 | * software state as necessary. | |
236 | */ | |
afd4aea0 BH |
237 | int efx_mcdi_poll_reboot(struct efx_nic *efx) |
238 | { | |
f3ad5003 BH |
239 | if (!efx->mcdi) |
240 | return 0; | |
afd4aea0 | 241 | |
cd0ecc9a | 242 | return efx->type->mcdi_poll_reboot(efx); |
afd4aea0 BH |
243 | } |
244 | ||
245 | static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi) | |
246 | { | |
247 | /* Wait until the interface becomes QUIESCENT and we win the race | |
248 | * to mark it RUNNING. */ | |
249 | wait_event(mcdi->wq, | |
250 | atomic_cmpxchg(&mcdi->state, | |
251 | MCDI_STATE_QUIESCENT, | |
252 | MCDI_STATE_RUNNING) | |
253 | == MCDI_STATE_QUIESCENT); | |
254 | } | |
255 | ||
256 | static int efx_mcdi_await_completion(struct efx_nic *efx) | |
257 | { | |
258 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | |
259 | ||
260 | if (wait_event_timeout( | |
261 | mcdi->wq, | |
262 | atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED, | |
ebf98e79 | 263 | MCDI_RPC_TIMEOUT) == 0) |
afd4aea0 BH |
264 | return -ETIMEDOUT; |
265 | ||
266 | /* Check if efx_mcdi_set_mode() switched us back to polled completions. | |
267 | * In which case, poll for completions directly. If efx_mcdi_ev_cpl() | |
268 | * completed the request first, then we'll just end up completing the | |
269 | * request again, which is safe. | |
270 | * | |
271 | * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which | |
272 | * wait_event_timeout() implicitly provides. | |
273 | */ | |
274 | if (mcdi->mode == MCDI_MODE_POLL) | |
275 | return efx_mcdi_poll(efx); | |
276 | ||
277 | return 0; | |
278 | } | |
279 | ||
280 | static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi) | |
281 | { | |
282 | /* If the interface is RUNNING, then move to COMPLETED and wake any | |
283 | * waiters. If the interface isn't in RUNNING then we've received a | |
284 | * duplicate completion after we've already transitioned back to | |
285 | * QUIESCENT. [A subsequent invocation would increment seqno, so would | |
286 | * have failed the seqno check]. | |
287 | */ | |
288 | if (atomic_cmpxchg(&mcdi->state, | |
289 | MCDI_STATE_RUNNING, | |
290 | MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) { | |
291 | wake_up(&mcdi->wq); | |
292 | return true; | |
293 | } | |
294 | ||
295 | return false; | |
296 | } | |
297 | ||
298 | static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) | |
299 | { | |
300 | atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); | |
301 | wake_up(&mcdi->wq); | |
302 | } | |
303 | ||
304 | static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, | |
5bc283e5 | 305 | unsigned int datalen, unsigned int mcdi_err) |
afd4aea0 BH |
306 | { |
307 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | |
308 | bool wake = false; | |
309 | ||
310 | spin_lock(&mcdi->iface_lock); | |
311 | ||
312 | if ((seqno ^ mcdi->seqno) & SEQ_MASK) { | |
313 | if (mcdi->credits) | |
314 | /* The request has been cancelled */ | |
315 | --mcdi->credits; | |
316 | else | |
62776d03 BH |
317 | netif_err(efx, hw, efx->net_dev, |
318 | "MC response mismatch tx seq 0x%x rx " | |
319 | "seq 0x%x\n", seqno, mcdi->seqno); | |
afd4aea0 | 320 | } else { |
df2cd8af BH |
321 | if (efx->type->mcdi_max_ver >= 2) { |
322 | /* MCDI v2 responses don't fit in an event */ | |
323 | efx_mcdi_read_response_header(efx); | |
324 | } else { | |
325 | mcdi->resprc = efx_mcdi_errno(mcdi_err); | |
326 | mcdi->resp_hdr_len = 4; | |
327 | mcdi->resp_data_len = datalen; | |
328 | } | |
afd4aea0 BH |
329 | |
330 | wake = true; | |
331 | } | |
332 | ||
333 | spin_unlock(&mcdi->iface_lock); | |
334 | ||
335 | if (wake) | |
336 | efx_mcdi_complete(mcdi); | |
337 | } | |
338 | ||
afd4aea0 | 339 | int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, |
9528b921 BH |
340 | const efx_dword_t *inbuf, size_t inlen, |
341 | efx_dword_t *outbuf, size_t outlen, | |
afd4aea0 | 342 | size_t *outlen_actual) |
c3cba721 | 343 | { |
df2cd8af BH |
344 | int rc; |
345 | ||
346 | rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen); | |
347 | if (rc) | |
348 | return rc; | |
c3cba721 SH |
349 | return efx_mcdi_rpc_finish(efx, cmd, inlen, |
350 | outbuf, outlen, outlen_actual); | |
351 | } | |
352 | ||
df2cd8af BH |
353 | int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, |
354 | const efx_dword_t *inbuf, size_t inlen) | |
afd4aea0 BH |
355 | { |
356 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | |
c3cba721 | 357 | |
df2cd8af BH |
358 | if (efx->type->mcdi_max_ver < 0 || |
359 | (efx->type->mcdi_max_ver < 2 && | |
360 | cmd > MC_CMD_CMD_SPACE_ESCAPE_7)) | |
361 | return -EINVAL; | |
362 | ||
363 | if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 || | |
364 | (efx->type->mcdi_max_ver < 2 && | |
365 | inlen > MCDI_CTL_SDU_LEN_MAX_V1)) | |
366 | return -EMSGSIZE; | |
367 | ||
afd4aea0 BH |
368 | efx_mcdi_acquire(mcdi); |
369 | ||
370 | /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ | |
371 | spin_lock_bh(&mcdi->iface_lock); | |
372 | ++mcdi->seqno; | |
373 | spin_unlock_bh(&mcdi->iface_lock); | |
374 | ||
375 | efx_mcdi_copyin(efx, cmd, inbuf, inlen); | |
df2cd8af | 376 | return 0; |
c3cba721 SH |
377 | } |
378 | ||
379 | int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, | |
9528b921 BH |
380 | efx_dword_t *outbuf, size_t outlen, |
381 | size_t *outlen_actual) | |
c3cba721 SH |
382 | { |
383 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | |
384 | int rc; | |
385 | ||
afd4aea0 BH |
386 | if (mcdi->mode == MCDI_MODE_POLL) |
387 | rc = efx_mcdi_poll(efx); | |
388 | else | |
389 | rc = efx_mcdi_await_completion(efx); | |
390 | ||
391 | if (rc != 0) { | |
392 | /* Close the race with efx_mcdi_ev_cpl() executing just too late | |
393 | * and completing a request we've just cancelled, by ensuring | |
394 | * that the seqno check therein fails. | |
395 | */ | |
396 | spin_lock_bh(&mcdi->iface_lock); | |
397 | ++mcdi->seqno; | |
398 | ++mcdi->credits; | |
399 | spin_unlock_bh(&mcdi->iface_lock); | |
400 | ||
62776d03 BH |
401 | netif_err(efx, hw, efx->net_dev, |
402 | "MC command 0x%x inlen %d mode %d timed out\n", | |
403 | cmd, (int)inlen, mcdi->mode); | |
afd4aea0 | 404 | } else { |
369327fa | 405 | size_t hdr_len, data_len; |
afd4aea0 BH |
406 | |
407 | /* At the very least we need a memory barrier here to ensure | |
408 | * we pick up changes from efx_mcdi_ev_cpl(). Protect against | |
409 | * a spurious efx_mcdi_ev_cpl() running concurrently by | |
410 | * acquiring the iface_lock. */ | |
411 | spin_lock_bh(&mcdi->iface_lock); | |
5bc283e5 | 412 | rc = mcdi->resprc; |
369327fa BH |
413 | hdr_len = mcdi->resp_hdr_len; |
414 | data_len = mcdi->resp_data_len; | |
afd4aea0 BH |
415 | spin_unlock_bh(&mcdi->iface_lock); |
416 | ||
5bc283e5 BH |
417 | BUG_ON(rc > 0); |
418 | ||
afd4aea0 | 419 | if (rc == 0) { |
369327fa BH |
420 | efx->type->mcdi_read_response(efx, outbuf, hdr_len, |
421 | min(outlen, data_len)); | |
afd4aea0 | 422 | if (outlen_actual != NULL) |
369327fa | 423 | *outlen_actual = data_len; |
afd4aea0 BH |
424 | } else if (cmd == MC_CMD_REBOOT && rc == -EIO) |
425 | ; /* Don't reset if MC_CMD_REBOOT returns EIO */ | |
426 | else if (rc == -EIO || rc == -EINTR) { | |
62776d03 BH |
427 | netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n", |
428 | -rc); | |
afd4aea0 BH |
429 | efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); |
430 | } else | |
f18ca364 | 431 | netif_dbg(efx, hw, efx->net_dev, |
62776d03 BH |
432 | "MC command 0x%x inlen %d failed rc=%d\n", |
433 | cmd, (int)inlen, -rc); | |
3f713bf4 BH |
434 | |
435 | if (rc == -EIO || rc == -EINTR) { | |
436 | msleep(MCDI_STATUS_SLEEP_MS); | |
437 | efx_mcdi_poll_reboot(efx); | |
438 | } | |
afd4aea0 BH |
439 | } |
440 | ||
441 | efx_mcdi_release(mcdi); | |
442 | return rc; | |
443 | } | |
444 | ||
445 | void efx_mcdi_mode_poll(struct efx_nic *efx) | |
446 | { | |
447 | struct efx_mcdi_iface *mcdi; | |
448 | ||
f3ad5003 | 449 | if (!efx->mcdi) |
afd4aea0 BH |
450 | return; |
451 | ||
452 | mcdi = efx_mcdi(efx); | |
453 | if (mcdi->mode == MCDI_MODE_POLL) | |
454 | return; | |
455 | ||
456 | /* We can switch from event completion to polled completion, because | |
457 | * mcdi requests are always completed in shared memory. We do this by | |
458 | * switching the mode to POLL'd then completing the request. | |
459 | * efx_mcdi_await_completion() will then call efx_mcdi_poll(). | |
460 | * | |
461 | * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(), | |
462 | * which efx_mcdi_complete() provides for us. | |
463 | */ | |
464 | mcdi->mode = MCDI_MODE_POLL; | |
465 | ||
466 | efx_mcdi_complete(mcdi); | |
467 | } | |
468 | ||
469 | void efx_mcdi_mode_event(struct efx_nic *efx) | |
470 | { | |
471 | struct efx_mcdi_iface *mcdi; | |
472 | ||
f3ad5003 | 473 | if (!efx->mcdi) |
afd4aea0 BH |
474 | return; |
475 | ||
476 | mcdi = efx_mcdi(efx); | |
477 | ||
478 | if (mcdi->mode == MCDI_MODE_EVENTS) | |
479 | return; | |
480 | ||
481 | /* We can't switch from polled to event completion in the middle of a | |
482 | * request, because the completion method is specified in the request. | |
483 | * So acquire the interface to serialise the requestors. We don't need | |
484 | * to acquire the iface_lock to change the mode here, but we do need a | |
485 | * write memory barrier ensure that efx_mcdi_rpc() sees it, which | |
486 | * efx_mcdi_acquire() provides. | |
487 | */ | |
488 | efx_mcdi_acquire(mcdi); | |
489 | mcdi->mode = MCDI_MODE_EVENTS; | |
490 | efx_mcdi_release(mcdi); | |
491 | } | |
492 | ||
493 | static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) | |
494 | { | |
495 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | |
496 | ||
497 | /* If there is an outstanding MCDI request, it has been terminated | |
498 | * either by a BADASSERT or REBOOT event. If the mcdi interface is | |
499 | * in polled mode, then do nothing because the MC reboot handler will | |
500 | * set the header correctly. However, if the mcdi interface is waiting | |
501 | * for a CMDDONE event it won't receive it [and since all MCDI events | |
502 | * are sent to the same queue, we can't be racing with | |
503 | * efx_mcdi_ev_cpl()] | |
504 | * | |
505 | * There's a race here with efx_mcdi_rpc(), because we might receive | |
506 | * a REBOOT event *before* the request has been copied out. In polled | |
25985edc | 507 | * mode (during startup) this is irrelevant, because efx_mcdi_complete() |
afd4aea0 BH |
508 | * is ignored. In event mode, this condition is just an edge-case of |
509 | * receiving a REBOOT event after posting the MCDI request. Did the mc | |
510 | * reboot before or after the copyout? The best we can do always is | |
511 | * just return failure. | |
512 | */ | |
513 | spin_lock(&mcdi->iface_lock); | |
514 | if (efx_mcdi_complete(mcdi)) { | |
515 | if (mcdi->mode == MCDI_MODE_EVENTS) { | |
516 | mcdi->resprc = rc; | |
df2cd8af BH |
517 | mcdi->resp_hdr_len = 0; |
518 | mcdi->resp_data_len = 0; | |
18e3ee2c | 519 | ++mcdi->credits; |
afd4aea0 | 520 | } |
3f713bf4 BH |
521 | } else { |
522 | int count; | |
523 | ||
afd4aea0 BH |
524 | /* Nobody was waiting for an MCDI request, so trigger a reset */ |
525 | efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); | |
526 | ||
3f713bf4 BH |
527 | /* Consume the status word since efx_mcdi_rpc_finish() won't */ |
528 | for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) { | |
529 | if (efx_mcdi_poll_reboot(efx)) | |
530 | break; | |
531 | udelay(MCDI_STATUS_DELAY_US); | |
532 | } | |
533 | } | |
534 | ||
afd4aea0 BH |
535 | spin_unlock(&mcdi->iface_lock); |
536 | } | |
537 | ||
afd4aea0 BH |
538 | /* Called from falcon_process_eventq for MCDI events */ |
539 | void efx_mcdi_process_event(struct efx_channel *channel, | |
540 | efx_qword_t *event) | |
541 | { | |
542 | struct efx_nic *efx = channel->efx; | |
543 | int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE); | |
544 | u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA); | |
545 | ||
546 | switch (code) { | |
547 | case MCDI_EVENT_CODE_BADSSERT: | |
62776d03 BH |
548 | netif_err(efx, hw, efx->net_dev, |
549 | "MC watchdog or assertion failure at 0x%x\n", data); | |
5bc283e5 | 550 | efx_mcdi_ev_death(efx, -EINTR); |
afd4aea0 BH |
551 | break; |
552 | ||
553 | case MCDI_EVENT_CODE_PMNOTICE: | |
62776d03 | 554 | netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n"); |
afd4aea0 BH |
555 | break; |
556 | ||
557 | case MCDI_EVENT_CODE_CMDDONE: | |
558 | efx_mcdi_ev_cpl(efx, | |
559 | MCDI_EVENT_FIELD(*event, CMDDONE_SEQ), | |
560 | MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN), | |
561 | MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO)); | |
562 | break; | |
563 | ||
564 | case MCDI_EVENT_CODE_LINKCHANGE: | |
565 | efx_mcdi_process_link_change(efx, event); | |
566 | break; | |
567 | case MCDI_EVENT_CODE_SENSOREVT: | |
568 | efx_mcdi_sensor_event(efx, event); | |
569 | break; | |
570 | case MCDI_EVENT_CODE_SCHEDERR: | |
62776d03 BH |
571 | netif_info(efx, hw, efx->net_dev, |
572 | "MC Scheduler error address=0x%x\n", data); | |
afd4aea0 BH |
573 | break; |
574 | case MCDI_EVENT_CODE_REBOOT: | |
62776d03 | 575 | netif_info(efx, hw, efx->net_dev, "MC Reboot\n"); |
5bc283e5 | 576 | efx_mcdi_ev_death(efx, -EIO); |
afd4aea0 BH |
577 | break; |
578 | case MCDI_EVENT_CODE_MAC_STATS_DMA: | |
579 | /* MAC stats are gather lazily. We can ignore this. */ | |
580 | break; | |
cd2d5b52 BH |
581 | case MCDI_EVENT_CODE_FLR: |
582 | efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF)); | |
583 | break; | |
7c236c43 SH |
584 | case MCDI_EVENT_CODE_PTP_RX: |
585 | case MCDI_EVENT_CODE_PTP_FAULT: | |
586 | case MCDI_EVENT_CODE_PTP_PPS: | |
587 | efx_ptp_event(efx, event); | |
588 | break; | |
afd4aea0 | 589 | |
3de82b91 AR |
590 | case MCDI_EVENT_CODE_TX_ERR: |
591 | case MCDI_EVENT_CODE_RX_ERR: | |
592 | netif_err(efx, hw, efx->net_dev, | |
593 | "%s DMA error (event: "EFX_QWORD_FMT")\n", | |
594 | code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX", | |
595 | EFX_QWORD_VAL(*event)); | |
596 | efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); | |
597 | break; | |
afd4aea0 | 598 | default: |
62776d03 BH |
599 | netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", |
600 | code); | |
afd4aea0 BH |
601 | } |
602 | } | |
603 | ||
604 | /************************************************************************** | |
605 | * | |
606 | * Specific request functions | |
607 | * | |
608 | ************************************************************************** | |
609 | */ | |
610 | ||
e5f0fd27 | 611 | void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) |
afd4aea0 | 612 | { |
59cfc479 | 613 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN); |
afd4aea0 BH |
614 | size_t outlength; |
615 | const __le16 *ver_words; | |
616 | int rc; | |
617 | ||
618 | BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0); | |
619 | ||
620 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0, | |
621 | outbuf, sizeof(outbuf), &outlength); | |
622 | if (rc) | |
623 | goto fail; | |
624 | ||
05a9320f | 625 | if (outlength < MC_CMD_GET_VERSION_OUT_LEN) { |
00bbb4a5 | 626 | rc = -EIO; |
afd4aea0 BH |
627 | goto fail; |
628 | } | |
629 | ||
630 | ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); | |
e5f0fd27 BH |
631 | snprintf(buf, len, "%u.%u.%u.%u", |
632 | le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]), | |
633 | le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3])); | |
634 | return; | |
afd4aea0 BH |
635 | |
636 | fail: | |
62776d03 | 637 | netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
e5f0fd27 | 638 | buf[0] = 0; |
afd4aea0 BH |
639 | } |
640 | ||
641 | int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, | |
642 | bool *was_attached) | |
643 | { | |
59cfc479 BH |
644 | MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN); |
645 | MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN); | |
afd4aea0 BH |
646 | size_t outlen; |
647 | int rc; | |
648 | ||
649 | MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE, | |
650 | driver_operating ? 1 : 0); | |
651 | MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1); | |
f2b0befd | 652 | MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY); |
afd4aea0 BH |
653 | |
654 | rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), | |
655 | outbuf, sizeof(outbuf), &outlen); | |
656 | if (rc) | |
657 | goto fail; | |
00bbb4a5 BH |
658 | if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) { |
659 | rc = -EIO; | |
afd4aea0 | 660 | goto fail; |
00bbb4a5 | 661 | } |
afd4aea0 BH |
662 | |
663 | if (was_attached != NULL) | |
664 | *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); | |
665 | return 0; | |
666 | ||
667 | fail: | |
62776d03 | 668 | netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
afd4aea0 BH |
669 | return rc; |
670 | } | |
671 | ||
672 | int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, | |
6aa9c7f6 | 673 | u16 *fw_subtype_list, u32 *capabilities) |
afd4aea0 | 674 | { |
59cfc479 | 675 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX); |
c5bb0e98 | 676 | size_t outlen, i; |
afd4aea0 | 677 | int port_num = efx_port_num(efx); |
afd4aea0 BH |
678 | int rc; |
679 | ||
680 | BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); | |
681 | ||
682 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0, | |
683 | outbuf, sizeof(outbuf), &outlen); | |
684 | if (rc) | |
685 | goto fail; | |
686 | ||
05a9320f | 687 | if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) { |
00bbb4a5 | 688 | rc = -EIO; |
afd4aea0 BH |
689 | goto fail; |
690 | } | |
691 | ||
afd4aea0 | 692 | if (mac_address) |
c5bb0e98 BH |
693 | memcpy(mac_address, |
694 | port_num ? | |
695 | MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) : | |
696 | MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0), | |
697 | ETH_ALEN); | |
bfeed902 | 698 | if (fw_subtype_list) { |
bfeed902 | 699 | for (i = 0; |
c5bb0e98 BH |
700 | i < MCDI_VAR_ARRAY_LEN(outlen, |
701 | GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST); | |
702 | i++) | |
703 | fw_subtype_list[i] = MCDI_ARRAY_WORD( | |
704 | outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i); | |
705 | for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++) | |
706 | fw_subtype_list[i] = 0; | |
bfeed902 | 707 | } |
6aa9c7f6 MS |
708 | if (capabilities) { |
709 | if (port_num) | |
710 | *capabilities = MCDI_DWORD(outbuf, | |
711 | GET_BOARD_CFG_OUT_CAPABILITIES_PORT1); | |
712 | else | |
713 | *capabilities = MCDI_DWORD(outbuf, | |
714 | GET_BOARD_CFG_OUT_CAPABILITIES_PORT0); | |
715 | } | |
afd4aea0 BH |
716 | |
717 | return 0; | |
718 | ||
719 | fail: | |
62776d03 BH |
720 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n", |
721 | __func__, rc, (int)outlen); | |
afd4aea0 BH |
722 | |
723 | return rc; | |
724 | } | |
725 | ||
726 | int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) | |
727 | { | |
59cfc479 | 728 | MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN); |
afd4aea0 BH |
729 | u32 dest = 0; |
730 | int rc; | |
731 | ||
732 | if (uart) | |
733 | dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART; | |
734 | if (evq) | |
735 | dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ; | |
736 | ||
737 | MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest); | |
738 | MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq); | |
739 | ||
740 | BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0); | |
741 | ||
742 | rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf), | |
743 | NULL, 0, NULL); | |
744 | if (rc) | |
745 | goto fail; | |
746 | ||
747 | return 0; | |
748 | ||
749 | fail: | |
62776d03 | 750 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
afd4aea0 BH |
751 | return rc; |
752 | } | |
753 | ||
754 | int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) | |
755 | { | |
59cfc479 | 756 | MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN); |
afd4aea0 BH |
757 | size_t outlen; |
758 | int rc; | |
759 | ||
760 | BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0); | |
761 | ||
762 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0, | |
763 | outbuf, sizeof(outbuf), &outlen); | |
764 | if (rc) | |
765 | goto fail; | |
00bbb4a5 BH |
766 | if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) { |
767 | rc = -EIO; | |
afd4aea0 | 768 | goto fail; |
00bbb4a5 | 769 | } |
afd4aea0 BH |
770 | |
771 | *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES); | |
772 | return 0; | |
773 | ||
774 | fail: | |
62776d03 BH |
775 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", |
776 | __func__, rc); | |
afd4aea0 BH |
777 | return rc; |
778 | } | |
779 | ||
780 | int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, | |
781 | size_t *size_out, size_t *erase_size_out, | |
782 | bool *protected_out) | |
783 | { | |
59cfc479 BH |
784 | MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN); |
785 | MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN); | |
afd4aea0 BH |
786 | size_t outlen; |
787 | int rc; | |
788 | ||
789 | MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type); | |
790 | ||
791 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf), | |
792 | outbuf, sizeof(outbuf), &outlen); | |
793 | if (rc) | |
794 | goto fail; | |
00bbb4a5 BH |
795 | if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) { |
796 | rc = -EIO; | |
afd4aea0 | 797 | goto fail; |
00bbb4a5 | 798 | } |
afd4aea0 BH |
799 | |
800 | *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE); | |
801 | *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE); | |
802 | *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) & | |
05a9320f | 803 | (1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN)); |
afd4aea0 BH |
804 | return 0; |
805 | ||
806 | fail: | |
62776d03 | 807 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
afd4aea0 BH |
808 | return rc; |
809 | } | |
810 | ||
2e803407 BH |
811 | static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) |
812 | { | |
59cfc479 BH |
813 | MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN); |
814 | MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN); | |
2e803407 BH |
815 | int rc; |
816 | ||
817 | MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type); | |
818 | ||
819 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf), | |
820 | outbuf, sizeof(outbuf), NULL); | |
821 | if (rc) | |
822 | return rc; | |
823 | ||
824 | switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) { | |
825 | case MC_CMD_NVRAM_TEST_PASS: | |
826 | case MC_CMD_NVRAM_TEST_NOTSUPP: | |
827 | return 0; | |
828 | default: | |
829 | return -EIO; | |
830 | } | |
831 | } | |
832 | ||
833 | int efx_mcdi_nvram_test_all(struct efx_nic *efx) | |
834 | { | |
835 | u32 nvram_types; | |
836 | unsigned int type; | |
837 | int rc; | |
838 | ||
839 | rc = efx_mcdi_nvram_types(efx, &nvram_types); | |
840 | if (rc) | |
b548a988 | 841 | goto fail1; |
2e803407 BH |
842 | |
843 | type = 0; | |
844 | while (nvram_types != 0) { | |
845 | if (nvram_types & 1) { | |
846 | rc = efx_mcdi_nvram_test(efx, type); | |
847 | if (rc) | |
b548a988 | 848 | goto fail2; |
2e803407 BH |
849 | } |
850 | type++; | |
851 | nvram_types >>= 1; | |
852 | } | |
853 | ||
854 | return 0; | |
b548a988 BH |
855 | |
856 | fail2: | |
62776d03 BH |
857 | netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n", |
858 | __func__, type); | |
b548a988 | 859 | fail1: |
62776d03 | 860 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
b548a988 | 861 | return rc; |
2e803407 BH |
862 | } |
863 | ||
8b2103ad | 864 | static int efx_mcdi_read_assertion(struct efx_nic *efx) |
afd4aea0 | 865 | { |
59cfc479 BH |
866 | MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN); |
867 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN); | |
c5bb0e98 | 868 | unsigned int flags, index; |
afd4aea0 BH |
869 | const char *reason; |
870 | size_t outlen; | |
871 | int retry; | |
872 | int rc; | |
873 | ||
8b2103ad SH |
874 | /* Attempt to read any stored assertion state before we reboot |
875 | * the mcfw out of the assertion handler. Retry twice, once | |
afd4aea0 BH |
876 | * because a boot-time assertion might cause this command to fail |
877 | * with EINTR. And once again because GET_ASSERTS can race with | |
878 | * MC_CMD_REBOOT running on the other port. */ | |
879 | retry = 2; | |
880 | do { | |
8b2103ad | 881 | MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1); |
afd4aea0 | 882 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS, |
8b2103ad SH |
883 | inbuf, MC_CMD_GET_ASSERTS_IN_LEN, |
884 | outbuf, sizeof(outbuf), &outlen); | |
afd4aea0 BH |
885 | } while ((rc == -EINTR || rc == -EIO) && retry-- > 0); |
886 | ||
887 | if (rc) | |
888 | return rc; | |
889 | if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) | |
00bbb4a5 | 890 | return -EIO; |
afd4aea0 | 891 | |
8b2103ad SH |
892 | /* Print out any recorded assertion state */ |
893 | flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS); | |
afd4aea0 BH |
894 | if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) |
895 | return 0; | |
896 | ||
afd4aea0 BH |
897 | reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) |
898 | ? "system-level assertion" | |
899 | : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) | |
900 | ? "thread-level assertion" | |
901 | : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) | |
902 | ? "watchdog reset" | |
903 | : "unknown assertion"; | |
62776d03 BH |
904 | netif_err(efx, hw, efx->net_dev, |
905 | "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, | |
906 | MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS), | |
907 | MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); | |
afd4aea0 BH |
908 | |
909 | /* Print out the registers */ | |
c5bb0e98 BH |
910 | for (index = 0; |
911 | index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM; | |
912 | index++) | |
913 | netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", | |
914 | 1 + index, | |
915 | MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS, | |
916 | index)); | |
afd4aea0 BH |
917 | |
918 | return 0; | |
919 | } | |
920 | ||
8b2103ad SH |
921 | static void efx_mcdi_exit_assertion(struct efx_nic *efx) |
922 | { | |
59cfc479 | 923 | MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN); |
8b2103ad | 924 | |
0f1e54ae BH |
925 | /* If the MC is running debug firmware, it might now be |
926 | * waiting for a debugger to attach, but we just want it to | |
927 | * reboot. We set a flag that makes the command a no-op if it | |
928 | * has already done so. We don't know what return code to | |
929 | * expect (0 or -EIO), so ignore it. | |
930 | */ | |
8b2103ad SH |
931 | BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); |
932 | MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, | |
933 | MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION); | |
0f1e54ae BH |
934 | (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN, |
935 | NULL, 0, NULL); | |
8b2103ad SH |
936 | } |
937 | ||
938 | int efx_mcdi_handle_assertion(struct efx_nic *efx) | |
939 | { | |
940 | int rc; | |
941 | ||
942 | rc = efx_mcdi_read_assertion(efx); | |
943 | if (rc) | |
944 | return rc; | |
945 | ||
946 | efx_mcdi_exit_assertion(efx); | |
947 | ||
948 | return 0; | |
949 | } | |
950 | ||
afd4aea0 BH |
951 | void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) |
952 | { | |
59cfc479 | 953 | MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN); |
afd4aea0 BH |
954 | int rc; |
955 | ||
956 | BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF); | |
957 | BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON); | |
958 | BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT); | |
959 | ||
960 | BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0); | |
961 | ||
962 | MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode); | |
963 | ||
964 | rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), | |
965 | NULL, 0, NULL); | |
966 | if (rc) | |
62776d03 BH |
967 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", |
968 | __func__, rc); | |
afd4aea0 BH |
969 | } |
970 | ||
6bff861d | 971 | static int efx_mcdi_reset_port(struct efx_nic *efx) |
afd4aea0 | 972 | { |
05a9320f | 973 | int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL); |
afd4aea0 | 974 | if (rc) |
62776d03 BH |
975 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", |
976 | __func__, rc); | |
afd4aea0 BH |
977 | return rc; |
978 | } | |
979 | ||
6bff861d | 980 | static int efx_mcdi_reset_mc(struct efx_nic *efx) |
afd4aea0 | 981 | { |
59cfc479 | 982 | MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN); |
afd4aea0 BH |
983 | int rc; |
984 | ||
985 | BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); | |
986 | MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0); | |
987 | rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf), | |
988 | NULL, 0, NULL); | |
989 | /* White is black, and up is down */ | |
990 | if (rc == -EIO) | |
991 | return 0; | |
992 | if (rc == 0) | |
993 | rc = -EIO; | |
62776d03 | 994 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
afd4aea0 BH |
995 | return rc; |
996 | } | |
997 | ||
6bff861d BH |
998 | enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason) |
999 | { | |
1000 | return RESET_TYPE_RECOVER_OR_ALL; | |
1001 | } | |
1002 | ||
1003 | int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method) | |
1004 | { | |
1005 | int rc; | |
1006 | ||
1007 | /* Recover from a failed assertion pre-reset */ | |
1008 | rc = efx_mcdi_handle_assertion(efx); | |
1009 | if (rc) | |
1010 | return rc; | |
1011 | ||
1012 | if (method == RESET_TYPE_WORLD) | |
1013 | return efx_mcdi_reset_mc(efx); | |
1014 | else | |
1015 | return efx_mcdi_reset_port(efx); | |
1016 | } | |
1017 | ||
d215697f | 1018 | static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, |
1019 | const u8 *mac, int *id_out) | |
afd4aea0 | 1020 | { |
59cfc479 BH |
1021 | MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN); |
1022 | MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN); | |
afd4aea0 BH |
1023 | size_t outlen; |
1024 | int rc; | |
1025 | ||
1026 | MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type); | |
1027 | MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE, | |
1028 | MC_CMD_FILTER_MODE_SIMPLE); | |
1029 | memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN); | |
1030 | ||
1031 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf), | |
1032 | outbuf, sizeof(outbuf), &outlen); | |
1033 | if (rc) | |
1034 | goto fail; | |
1035 | ||
1036 | if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) { | |
00bbb4a5 | 1037 | rc = -EIO; |
afd4aea0 BH |
1038 | goto fail; |
1039 | } | |
1040 | ||
1041 | *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID); | |
1042 | ||
1043 | return 0; | |
1044 | ||
1045 | fail: | |
1046 | *id_out = -1; | |
62776d03 | 1047 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
afd4aea0 BH |
1048 | return rc; |
1049 | ||
1050 | } | |
1051 | ||
1052 | ||
1053 | int | |
1054 | efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out) | |
1055 | { | |
1056 | return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out); | |
1057 | } | |
1058 | ||
1059 | ||
1060 | int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) | |
1061 | { | |
59cfc479 | 1062 | MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN); |
afd4aea0 BH |
1063 | size_t outlen; |
1064 | int rc; | |
1065 | ||
1066 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0, | |
1067 | outbuf, sizeof(outbuf), &outlen); | |
1068 | if (rc) | |
1069 | goto fail; | |
1070 | ||
1071 | if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) { | |
00bbb4a5 | 1072 | rc = -EIO; |
afd4aea0 BH |
1073 | goto fail; |
1074 | } | |
1075 | ||
1076 | *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID); | |
1077 | ||
1078 | return 0; | |
1079 | ||
1080 | fail: | |
1081 | *id_out = -1; | |
62776d03 | 1082 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
afd4aea0 BH |
1083 | return rc; |
1084 | } | |
1085 | ||
1086 | ||
1087 | int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) | |
1088 | { | |
59cfc479 | 1089 | MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN); |
afd4aea0 BH |
1090 | int rc; |
1091 | ||
1092 | MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id); | |
1093 | ||
1094 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf), | |
1095 | NULL, 0, NULL); | |
1096 | if (rc) | |
1097 | goto fail; | |
1098 | ||
1099 | return 0; | |
1100 | ||
1101 | fail: | |
62776d03 | 1102 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
afd4aea0 BH |
1103 | return rc; |
1104 | } | |
1105 | ||
cd2d5b52 BH |
1106 | int efx_mcdi_flush_rxqs(struct efx_nic *efx) |
1107 | { | |
1108 | struct efx_channel *channel; | |
1109 | struct efx_rx_queue *rx_queue; | |
c5bb0e98 BH |
1110 | MCDI_DECLARE_BUF(inbuf, |
1111 | MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS)); | |
cd2d5b52 BH |
1112 | int rc, count; |
1113 | ||
45078374 BH |
1114 | BUILD_BUG_ON(EFX_MAX_CHANNELS > |
1115 | MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM); | |
1116 | ||
cd2d5b52 BH |
1117 | count = 0; |
1118 | efx_for_each_channel(channel, efx) { | |
1119 | efx_for_each_channel_rx_queue(rx_queue, channel) { | |
1120 | if (rx_queue->flush_pending) { | |
1121 | rx_queue->flush_pending = false; | |
1122 | atomic_dec(&efx->rxq_flush_pending); | |
c5bb0e98 BH |
1123 | MCDI_SET_ARRAY_DWORD( |
1124 | inbuf, FLUSH_RX_QUEUES_IN_QID_OFST, | |
1125 | count, efx_rx_queue_index(rx_queue)); | |
1126 | count++; | |
cd2d5b52 BH |
1127 | } |
1128 | } | |
1129 | } | |
1130 | ||
c5bb0e98 BH |
1131 | rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf, |
1132 | MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL); | |
bbec969b | 1133 | WARN_ON(rc < 0); |
cd2d5b52 | 1134 | |
cd2d5b52 BH |
1135 | return rc; |
1136 | } | |
afd4aea0 BH |
1137 | |
1138 | int efx_mcdi_wol_filter_reset(struct efx_nic *efx) | |
1139 | { | |
1140 | int rc; | |
1141 | ||
1142 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL); | |
1143 | if (rc) | |
1144 | goto fail; | |
1145 | ||
1146 | return 0; | |
1147 | ||
1148 | fail: | |
62776d03 | 1149 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
afd4aea0 BH |
1150 | return rc; |
1151 | } | |
1152 | ||
45a3fd55 BH |
1153 | #ifdef CONFIG_SFC_MTD |
1154 | ||
1155 | #define EFX_MCDI_NVRAM_LEN_MAX 128 | |
1156 | ||
1157 | static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) | |
1158 | { | |
1159 | MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN); | |
1160 | int rc; | |
1161 | ||
1162 | MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type); | |
1163 | ||
1164 | BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0); | |
1165 | ||
1166 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf), | |
1167 | NULL, 0, NULL); | |
1168 | if (rc) | |
1169 | goto fail; | |
1170 | ||
1171 | return 0; | |
1172 | ||
1173 | fail: | |
1174 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
1175 | return rc; | |
1176 | } | |
1177 | ||
1178 | static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, | |
1179 | loff_t offset, u8 *buffer, size_t length) | |
1180 | { | |
1181 | MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN); | |
1182 | MCDI_DECLARE_BUF(outbuf, | |
1183 | MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)); | |
1184 | size_t outlen; | |
1185 | int rc; | |
1186 | ||
1187 | MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type); | |
1188 | MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset); | |
1189 | MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length); | |
1190 | ||
1191 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf), | |
1192 | outbuf, sizeof(outbuf), &outlen); | |
1193 | if (rc) | |
1194 | goto fail; | |
1195 | ||
1196 | memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length); | |
1197 | return 0; | |
1198 | ||
1199 | fail: | |
1200 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
1201 | return rc; | |
1202 | } | |
1203 | ||
1204 | static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, | |
1205 | loff_t offset, const u8 *buffer, size_t length) | |
1206 | { | |
1207 | MCDI_DECLARE_BUF(inbuf, | |
1208 | MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)); | |
1209 | int rc; | |
1210 | ||
1211 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); | |
1212 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset); | |
1213 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length); | |
1214 | memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length); | |
1215 | ||
1216 | BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); | |
1217 | ||
1218 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, | |
1219 | ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), | |
1220 | NULL, 0, NULL); | |
1221 | if (rc) | |
1222 | goto fail; | |
1223 | ||
1224 | return 0; | |
1225 | ||
1226 | fail: | |
1227 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
1228 | return rc; | |
1229 | } | |
1230 | ||
1231 | static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, | |
1232 | loff_t offset, size_t length) | |
1233 | { | |
1234 | MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN); | |
1235 | int rc; | |
1236 | ||
1237 | MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type); | |
1238 | MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset); | |
1239 | MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length); | |
1240 | ||
1241 | BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0); | |
1242 | ||
1243 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf), | |
1244 | NULL, 0, NULL); | |
1245 | if (rc) | |
1246 | goto fail; | |
1247 | ||
1248 | return 0; | |
1249 | ||
1250 | fail: | |
1251 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
1252 | return rc; | |
1253 | } | |
1254 | ||
1255 | static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) | |
1256 | { | |
1257 | MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN); | |
1258 | int rc; | |
1259 | ||
1260 | MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type); | |
1261 | ||
1262 | BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0); | |
1263 | ||
1264 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf), | |
1265 | NULL, 0, NULL); | |
1266 | if (rc) | |
1267 | goto fail; | |
1268 | ||
1269 | return 0; | |
1270 | ||
1271 | fail: | |
1272 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
1273 | return rc; | |
1274 | } | |
1275 | ||
1276 | int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, | |
1277 | size_t len, size_t *retlen, u8 *buffer) | |
1278 | { | |
1279 | struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); | |
1280 | struct efx_nic *efx = mtd->priv; | |
1281 | loff_t offset = start; | |
1282 | loff_t end = min_t(loff_t, start + len, mtd->size); | |
1283 | size_t chunk; | |
1284 | int rc = 0; | |
1285 | ||
1286 | while (offset < end) { | |
1287 | chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); | |
1288 | rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset, | |
1289 | buffer, chunk); | |
1290 | if (rc) | |
1291 | goto out; | |
1292 | offset += chunk; | |
1293 | buffer += chunk; | |
1294 | } | |
1295 | out: | |
1296 | *retlen = offset - start; | |
1297 | return rc; | |
1298 | } | |
1299 | ||
1300 | int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) | |
1301 | { | |
1302 | struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); | |
1303 | struct efx_nic *efx = mtd->priv; | |
1304 | loff_t offset = start & ~((loff_t)(mtd->erasesize - 1)); | |
1305 | loff_t end = min_t(loff_t, start + len, mtd->size); | |
1306 | size_t chunk = part->common.mtd.erasesize; | |
1307 | int rc = 0; | |
1308 | ||
1309 | if (!part->updating) { | |
1310 | rc = efx_mcdi_nvram_update_start(efx, part->nvram_type); | |
1311 | if (rc) | |
1312 | goto out; | |
1313 | part->updating = true; | |
1314 | } | |
1315 | ||
1316 | /* The MCDI interface can in fact do multiple erase blocks at once; | |
1317 | * but erasing may be slow, so we make multiple calls here to avoid | |
1318 | * tripping the MCDI RPC timeout. */ | |
1319 | while (offset < end) { | |
1320 | rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset, | |
1321 | chunk); | |
1322 | if (rc) | |
1323 | goto out; | |
1324 | offset += chunk; | |
1325 | } | |
1326 | out: | |
1327 | return rc; | |
1328 | } | |
1329 | ||
1330 | int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, | |
1331 | size_t len, size_t *retlen, const u8 *buffer) | |
1332 | { | |
1333 | struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); | |
1334 | struct efx_nic *efx = mtd->priv; | |
1335 | loff_t offset = start; | |
1336 | loff_t end = min_t(loff_t, start + len, mtd->size); | |
1337 | size_t chunk; | |
1338 | int rc = 0; | |
1339 | ||
1340 | if (!part->updating) { | |
1341 | rc = efx_mcdi_nvram_update_start(efx, part->nvram_type); | |
1342 | if (rc) | |
1343 | goto out; | |
1344 | part->updating = true; | |
1345 | } | |
1346 | ||
1347 | while (offset < end) { | |
1348 | chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); | |
1349 | rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset, | |
1350 | buffer, chunk); | |
1351 | if (rc) | |
1352 | goto out; | |
1353 | offset += chunk; | |
1354 | buffer += chunk; | |
1355 | } | |
1356 | out: | |
1357 | *retlen = offset - start; | |
1358 | return rc; | |
1359 | } | |
1360 | ||
1361 | int efx_mcdi_mtd_sync(struct mtd_info *mtd) | |
1362 | { | |
1363 | struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); | |
1364 | struct efx_nic *efx = mtd->priv; | |
1365 | int rc = 0; | |
1366 | ||
1367 | if (part->updating) { | |
1368 | part->updating = false; | |
1369 | rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type); | |
1370 | } | |
1371 | ||
1372 | return rc; | |
1373 | } | |
1374 | ||
1375 | void efx_mcdi_mtd_rename(struct efx_mtd_partition *part) | |
1376 | { | |
1377 | struct efx_mcdi_mtd_partition *mcdi_part = | |
1378 | container_of(part, struct efx_mcdi_mtd_partition, common); | |
1379 | struct efx_nic *efx = part->mtd.priv; | |
1380 | ||
1381 | snprintf(part->name, sizeof(part->name), "%s %s:%02x", | |
1382 | efx->name, part->type_name, mcdi_part->fw_subtype); | |
1383 | } | |
1384 | ||
1385 | #endif /* CONFIG_SFC_MTD */ |