Commit | Line | Data |
---|---|---|
62683ab5 GR |
1 | /******************************************************************************* |
2 | * | |
3 | * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver | |
ef8693eb | 4 | * Copyright(c) 2013 - 2014 Intel Corporation. |
62683ab5 GR |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms and conditions of the GNU General Public License, | |
8 | * version 2, as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope it will be useful, but WITHOUT | |
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
13 | * more details. | |
14 | * | |
b831607d JB |
15 | * You should have received a copy of the GNU General Public License along |
16 | * with this program. If not, see <http://www.gnu.org/licenses/>. | |
17 | * | |
62683ab5 GR |
18 | * The full GNU General Public License is included in this distribution in |
19 | * the file called "COPYING". | |
20 | * | |
21 | * Contact Information: | |
22 | * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | |
23 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
24 | * | |
25 | ******************************************************************************/ | |
26 | ||
27 | #include "i40evf.h" | |
28 | #include "i40e_prototype.h" | |
29 | ||
30 | /* busy wait delay in msec */ | |
31 | #define I40EVF_BUSY_WAIT_DELAY 10 | |
32 | #define I40EVF_BUSY_WAIT_COUNT 50 | |
33 | ||
34 | /** | |
35 | * i40evf_send_pf_msg | |
36 | * @adapter: adapter structure | |
37 | * @op: virtual channel opcode | |
38 | * @msg: pointer to message buffer | |
39 | * @len: message length | |
40 | * | |
41 | * Send message to PF and print status if failure. | |
42 | **/ | |
43 | static int i40evf_send_pf_msg(struct i40evf_adapter *adapter, | |
44 | enum i40e_virtchnl_ops op, u8 *msg, u16 len) | |
45 | { | |
46 | struct i40e_hw *hw = &adapter->hw; | |
47 | i40e_status err; | |
48 | ||
ef8693eb MW |
49 | if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) |
50 | return 0; /* nothing to see here, move along */ | |
51 | ||
62683ab5 GR |
52 | err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); |
53 | if (err) | |
54 | dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n", | |
55 | op, err, hw->aq.asq_last_status); | |
56 | return err; | |
57 | } | |
58 | ||
59 | /** | |
60 | * i40evf_send_api_ver | |
61 | * @adapter: adapter structure | |
62 | * | |
63 | * Send API version admin queue message to the PF. The reply is not checked | |
64 | * in this function. Returns 0 if the message was successfully | |
65 | * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. | |
66 | **/ | |
67 | int i40evf_send_api_ver(struct i40evf_adapter *adapter) | |
68 | { | |
69 | struct i40e_virtchnl_version_info vvi; | |
70 | ||
71 | vvi.major = I40E_VIRTCHNL_VERSION_MAJOR; | |
72 | vvi.minor = I40E_VIRTCHNL_VERSION_MINOR; | |
73 | ||
74 | return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_VERSION, (u8 *)&vvi, | |
75 | sizeof(vvi)); | |
76 | } | |
77 | ||
78 | /** | |
79 | * i40evf_verify_api_ver | |
80 | * @adapter: adapter structure | |
81 | * | |
82 | * Compare API versions with the PF. Must be called after admin queue is | |
6a8e93db MW |
83 | * initialized. Returns 0 if API versions match, -EIO if they do not, |
84 | * I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors | |
85 | * from the firmware are propagated. | |
62683ab5 GR |
86 | **/ |
87 | int i40evf_verify_api_ver(struct i40evf_adapter *adapter) | |
88 | { | |
89 | struct i40e_virtchnl_version_info *pf_vvi; | |
90 | struct i40e_hw *hw = &adapter->hw; | |
91 | struct i40e_arq_event_info event; | |
f8d4db35 | 92 | enum i40e_virtchnl_ops op; |
62683ab5 GR |
93 | i40e_status err; |
94 | ||
95 | event.msg_size = I40EVF_MAX_AQ_BUF_SIZE; | |
96 | event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); | |
97 | if (!event.msg_buf) { | |
98 | err = -ENOMEM; | |
99 | goto out; | |
100 | } | |
101 | ||
f8d4db35 MW |
102 | while (1) { |
103 | err = i40evf_clean_arq_element(hw, &event, NULL); | |
104 | /* When the AQ is empty, i40evf_clean_arq_element will return | |
105 | * nonzero and this loop will terminate. | |
106 | */ | |
107 | if (err) | |
108 | goto out_alloc; | |
109 | op = | |
110 | (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high); | |
111 | if (op == I40E_VIRTCHNL_OP_VERSION) | |
112 | break; | |
113 | } | |
114 | ||
62683ab5 GR |
115 | |
116 | err = (i40e_status)le32_to_cpu(event.desc.cookie_low); | |
6a8e93db | 117 | if (err) |
62683ab5 | 118 | goto out_alloc; |
62683ab5 | 119 | |
f8d4db35 | 120 | if (op != I40E_VIRTCHNL_OP_VERSION) { |
6a8e93db | 121 | dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n", |
f8d4db35 | 122 | op); |
62683ab5 GR |
123 | err = -EIO; |
124 | goto out_alloc; | |
125 | } | |
126 | ||
127 | pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf; | |
128 | if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) || | |
129 | (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR)) | |
130 | err = -EIO; | |
131 | ||
132 | out_alloc: | |
133 | kfree(event.msg_buf); | |
134 | out: | |
135 | return err; | |
136 | } | |
137 | ||
138 | /** | |
139 | * i40evf_send_vf_config_msg | |
140 | * @adapter: adapter structure | |
141 | * | |
142 | * Send VF configuration request admin queue message to the PF. The reply | |
143 | * is not checked in this function. Returns 0 if the message was | |
144 | * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. | |
145 | **/ | |
146 | int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) | |
147 | { | |
148 | return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, | |
149 | NULL, 0); | |
150 | } | |
151 | ||
152 | /** | |
153 | * i40evf_get_vf_config | |
154 | * @hw: pointer to the hardware structure | |
155 | * @len: length of buffer | |
156 | * | |
157 | * Get VF configuration from PF and populate hw structure. Must be called after | |
158 | * admin queue is initialized. Busy waits until response is received from PF, | |
159 | * with maximum timeout. Response from PF is returned in the buffer for further | |
160 | * processing by the caller. | |
161 | **/ | |
162 | int i40evf_get_vf_config(struct i40evf_adapter *adapter) | |
163 | { | |
164 | struct i40e_hw *hw = &adapter->hw; | |
165 | struct i40e_arq_event_info event; | |
f8d4db35 | 166 | enum i40e_virtchnl_ops op; |
62683ab5 | 167 | i40e_status err; |
f8d4db35 | 168 | u16 len; |
62683ab5 GR |
169 | |
170 | len = sizeof(struct i40e_virtchnl_vf_resource) + | |
171 | I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource); | |
172 | event.msg_size = len; | |
173 | event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); | |
174 | if (!event.msg_buf) { | |
175 | err = -ENOMEM; | |
176 | goto out; | |
177 | } | |
178 | ||
f8d4db35 MW |
179 | while (1) { |
180 | event.msg_size = len; | |
181 | /* When the AQ is empty, i40evf_clean_arq_element will return | |
182 | * nonzero and this loop will terminate. | |
183 | */ | |
184 | err = i40evf_clean_arq_element(hw, &event, NULL); | |
185 | if (err) | |
186 | goto out_alloc; | |
187 | op = | |
188 | (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high); | |
189 | if (op == I40E_VIRTCHNL_OP_GET_VF_RESOURCES) | |
190 | break; | |
62683ab5 GR |
191 | } |
192 | ||
f8d4db35 | 193 | err = (i40e_status)le32_to_cpu(event.desc.cookie_low); |
62683ab5 GR |
194 | memcpy(adapter->vf_res, event.msg_buf, min(event.msg_size, len)); |
195 | ||
196 | i40e_vf_parse_hw_config(hw, adapter->vf_res); | |
197 | out_alloc: | |
198 | kfree(event.msg_buf); | |
199 | out: | |
200 | return err; | |
201 | } | |
202 | ||
203 | /** | |
204 | * i40evf_configure_queues | |
205 | * @adapter: adapter structure | |
206 | * | |
207 | * Request that the PF set up our (previously allocated) queues. | |
208 | **/ | |
209 | void i40evf_configure_queues(struct i40evf_adapter *adapter) | |
210 | { | |
211 | struct i40e_virtchnl_vsi_queue_config_info *vqci; | |
212 | struct i40e_virtchnl_queue_pair_info *vqpi; | |
213 | int pairs = adapter->vsi_res->num_queue_pairs; | |
214 | int i, len; | |
215 | ||
216 | if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { | |
217 | /* bail because we already have a command pending */ | |
218 | dev_err(&adapter->pdev->dev, "%s: command %d pending\n", | |
219 | __func__, adapter->current_op); | |
220 | return; | |
221 | } | |
222 | adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES; | |
223 | len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) + | |
224 | (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs); | |
225 | vqci = kzalloc(len, GFP_ATOMIC); | |
249c8b8d | 226 | if (!vqci) |
62683ab5 | 227 | return; |
249c8b8d | 228 | |
62683ab5 GR |
229 | vqci->vsi_id = adapter->vsi_res->vsi_id; |
230 | vqci->num_queue_pairs = pairs; | |
231 | vqpi = vqci->qpair; | |
232 | /* Size check is not needed here - HW max is 16 queue pairs, and we | |
233 | * can fit info for 31 of them into the AQ buffer before it overflows. | |
234 | */ | |
235 | for (i = 0; i < pairs; i++) { | |
236 | vqpi->txq.vsi_id = vqci->vsi_id; | |
237 | vqpi->txq.queue_id = i; | |
238 | vqpi->txq.ring_len = adapter->tx_rings[i]->count; | |
239 | vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma; | |
5d29896a AS |
240 | vqpi->txq.headwb_enabled = 1; |
241 | vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr + | |
242 | (vqpi->txq.ring_len * sizeof(struct i40e_tx_desc)); | |
62683ab5 GR |
243 | |
244 | vqpi->rxq.vsi_id = vqci->vsi_id; | |
245 | vqpi->rxq.queue_id = i; | |
246 | vqpi->rxq.ring_len = adapter->rx_rings[i]->count; | |
247 | vqpi->rxq.dma_ring_addr = adapter->rx_rings[i]->dma; | |
248 | vqpi->rxq.max_pkt_size = adapter->netdev->mtu | |
249 | + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; | |
250 | vqpi->rxq.databuffer_size = adapter->rx_rings[i]->rx_buf_len; | |
251 | vqpi++; | |
252 | } | |
253 | ||
fc86a970 MW |
254 | adapter->aq_pending |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES; |
255 | adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES; | |
62683ab5 GR |
256 | i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, |
257 | (u8 *)vqci, len); | |
258 | kfree(vqci); | |
62683ab5 GR |
259 | } |
260 | ||
261 | /** | |
262 | * i40evf_enable_queues | |
263 | * @adapter: adapter structure | |
264 | * | |
265 | * Request that the PF enable all of our queues. | |
266 | **/ | |
267 | void i40evf_enable_queues(struct i40evf_adapter *adapter) | |
268 | { | |
269 | struct i40e_virtchnl_queue_select vqs; | |
270 | ||
271 | if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { | |
272 | /* bail because we already have a command pending */ | |
273 | dev_err(&adapter->pdev->dev, "%s: command %d pending\n", | |
274 | __func__, adapter->current_op); | |
275 | return; | |
276 | } | |
277 | adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES; | |
278 | vqs.vsi_id = adapter->vsi_res->vsi_id; | |
279 | vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1; | |
280 | vqs.rx_queues = vqs.tx_queues; | |
62683ab5 GR |
281 | adapter->aq_pending |= I40EVF_FLAG_AQ_ENABLE_QUEUES; |
282 | adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES; | |
fc86a970 MW |
283 | i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES, |
284 | (u8 *)&vqs, sizeof(vqs)); | |
62683ab5 GR |
285 | } |
286 | ||
287 | /** | |
288 | * i40evf_disable_queues | |
289 | * @adapter: adapter structure | |
290 | * | |
291 | * Request that the PF disable all of our queues. | |
292 | **/ | |
293 | void i40evf_disable_queues(struct i40evf_adapter *adapter) | |
294 | { | |
295 | struct i40e_virtchnl_queue_select vqs; | |
296 | ||
297 | if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { | |
298 | /* bail because we already have a command pending */ | |
299 | dev_err(&adapter->pdev->dev, "%s: command %d pending\n", | |
300 | __func__, adapter->current_op); | |
301 | return; | |
302 | } | |
303 | adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES; | |
304 | vqs.vsi_id = adapter->vsi_res->vsi_id; | |
305 | vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1; | |
306 | vqs.rx_queues = vqs.tx_queues; | |
62683ab5 GR |
307 | adapter->aq_pending |= I40EVF_FLAG_AQ_DISABLE_QUEUES; |
308 | adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES; | |
fc86a970 MW |
309 | i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES, |
310 | (u8 *)&vqs, sizeof(vqs)); | |
62683ab5 GR |
311 | } |
312 | ||
313 | /** | |
314 | * i40evf_map_queues | |
315 | * @adapter: adapter structure | |
316 | * | |
317 | * Request that the PF map queues to interrupt vectors. Misc causes, including | |
318 | * admin queue, are always mapped to vector 0. | |
319 | **/ | |
320 | void i40evf_map_queues(struct i40evf_adapter *adapter) | |
321 | { | |
322 | struct i40e_virtchnl_irq_map_info *vimi; | |
323 | int v_idx, q_vectors, len; | |
324 | struct i40e_q_vector *q_vector; | |
325 | ||
326 | if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { | |
327 | /* bail because we already have a command pending */ | |
328 | dev_err(&adapter->pdev->dev, "%s: command %d pending\n", | |
329 | __func__, adapter->current_op); | |
330 | return; | |
331 | } | |
332 | adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP; | |
333 | ||
334 | q_vectors = adapter->num_msix_vectors - NONQ_VECS; | |
335 | ||
336 | len = sizeof(struct i40e_virtchnl_irq_map_info) + | |
337 | (adapter->num_msix_vectors * | |
338 | sizeof(struct i40e_virtchnl_vector_map)); | |
339 | vimi = kzalloc(len, GFP_ATOMIC); | |
249c8b8d | 340 | if (!vimi) |
62683ab5 | 341 | return; |
62683ab5 GR |
342 | |
343 | vimi->num_vectors = adapter->num_msix_vectors; | |
344 | /* Queue vectors first */ | |
345 | for (v_idx = 0; v_idx < q_vectors; v_idx++) { | |
346 | q_vector = adapter->q_vector[v_idx]; | |
347 | vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id; | |
348 | vimi->vecmap[v_idx].vector_id = v_idx + NONQ_VECS; | |
349 | vimi->vecmap[v_idx].txq_map = q_vector->ring_mask; | |
350 | vimi->vecmap[v_idx].rxq_map = q_vector->ring_mask; | |
351 | } | |
352 | /* Misc vector last - this is only for AdminQ messages */ | |
353 | vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id; | |
354 | vimi->vecmap[v_idx].vector_id = 0; | |
355 | vimi->vecmap[v_idx].txq_map = 0; | |
356 | vimi->vecmap[v_idx].rxq_map = 0; | |
357 | ||
fc86a970 MW |
358 | adapter->aq_pending |= I40EVF_FLAG_AQ_MAP_VECTORS; |
359 | adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS; | |
62683ab5 GR |
360 | i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, |
361 | (u8 *)vimi, len); | |
362 | kfree(vimi); | |
62683ab5 GR |
363 | } |
364 | ||
365 | /** | |
366 | * i40evf_add_ether_addrs | |
367 | * @adapter: adapter structure | |
368 | * @addrs: the MAC address filters to add (contiguous) | |
369 | * @count: number of filters | |
370 | * | |
371 | * Request that the PF add one or more addresses to our filters. | |
372 | **/ | |
373 | void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) | |
374 | { | |
375 | struct i40e_virtchnl_ether_addr_list *veal; | |
376 | int len, i = 0, count = 0; | |
377 | struct i40evf_mac_filter *f; | |
378 | ||
379 | if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { | |
380 | /* bail because we already have a command pending */ | |
381 | dev_err(&adapter->pdev->dev, "%s: command %d pending\n", | |
382 | __func__, adapter->current_op); | |
383 | return; | |
384 | } | |
385 | list_for_each_entry(f, &adapter->mac_filter_list, list) { | |
386 | if (f->add) | |
387 | count++; | |
388 | } | |
389 | if (!count) { | |
390 | adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; | |
391 | return; | |
392 | } | |
393 | adapter->current_op = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS; | |
394 | ||
395 | len = sizeof(struct i40e_virtchnl_ether_addr_list) + | |
396 | (count * sizeof(struct i40e_virtchnl_ether_addr)); | |
397 | if (len > I40EVF_MAX_AQ_BUF_SIZE) { | |
80e72893 | 398 | dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n", |
62683ab5 GR |
399 | __func__); |
400 | count = (I40EVF_MAX_AQ_BUF_SIZE - | |
401 | sizeof(struct i40e_virtchnl_ether_addr_list)) / | |
402 | sizeof(struct i40e_virtchnl_ether_addr); | |
403 | len = I40EVF_MAX_AQ_BUF_SIZE; | |
404 | } | |
405 | ||
406 | veal = kzalloc(len, GFP_ATOMIC); | |
249c8b8d | 407 | if (!veal) |
62683ab5 | 408 | return; |
249c8b8d | 409 | |
62683ab5 GR |
410 | veal->vsi_id = adapter->vsi_res->vsi_id; |
411 | veal->num_elements = count; | |
412 | list_for_each_entry(f, &adapter->mac_filter_list, list) { | |
413 | if (f->add) { | |
9a173901 | 414 | ether_addr_copy(veal->list[i].addr, f->macaddr); |
62683ab5 GR |
415 | i++; |
416 | f->add = false; | |
417 | } | |
418 | } | |
fc86a970 MW |
419 | adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; |
420 | adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; | |
62683ab5 GR |
421 | i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, |
422 | (u8 *)veal, len); | |
423 | kfree(veal); | |
62683ab5 GR |
424 | } |
425 | ||
426 | /** | |
427 | * i40evf_del_ether_addrs | |
428 | * @adapter: adapter structure | |
429 | * @addrs: the MAC address filters to remove (contiguous) | |
430 | * @count: number of filtes | |
431 | * | |
432 | * Request that the PF remove one or more addresses from our filters. | |
433 | **/ | |
434 | void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) | |
435 | { | |
436 | struct i40e_virtchnl_ether_addr_list *veal; | |
437 | struct i40evf_mac_filter *f, *ftmp; | |
438 | int len, i = 0, count = 0; | |
439 | ||
440 | if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { | |
441 | /* bail because we already have a command pending */ | |
442 | dev_err(&adapter->pdev->dev, "%s: command %d pending\n", | |
443 | __func__, adapter->current_op); | |
444 | return; | |
445 | } | |
446 | list_for_each_entry(f, &adapter->mac_filter_list, list) { | |
447 | if (f->remove) | |
448 | count++; | |
449 | } | |
450 | if (!count) { | |
451 | adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; | |
452 | return; | |
453 | } | |
454 | adapter->current_op = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS; | |
455 | ||
456 | len = sizeof(struct i40e_virtchnl_ether_addr_list) + | |
457 | (count * sizeof(struct i40e_virtchnl_ether_addr)); | |
458 | if (len > I40EVF_MAX_AQ_BUF_SIZE) { | |
80e72893 | 459 | dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n", |
62683ab5 GR |
460 | __func__); |
461 | count = (I40EVF_MAX_AQ_BUF_SIZE - | |
462 | sizeof(struct i40e_virtchnl_ether_addr_list)) / | |
463 | sizeof(struct i40e_virtchnl_ether_addr); | |
464 | len = I40EVF_MAX_AQ_BUF_SIZE; | |
465 | } | |
466 | veal = kzalloc(len, GFP_ATOMIC); | |
249c8b8d | 467 | if (!veal) |
62683ab5 | 468 | return; |
249c8b8d | 469 | |
62683ab5 GR |
470 | veal->vsi_id = adapter->vsi_res->vsi_id; |
471 | veal->num_elements = count; | |
472 | list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { | |
473 | if (f->remove) { | |
9a173901 | 474 | ether_addr_copy(veal->list[i].addr, f->macaddr); |
62683ab5 GR |
475 | i++; |
476 | list_del(&f->list); | |
477 | kfree(f); | |
478 | } | |
479 | } | |
fc86a970 MW |
480 | adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; |
481 | adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; | |
62683ab5 GR |
482 | i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, |
483 | (u8 *)veal, len); | |
484 | kfree(veal); | |
62683ab5 GR |
485 | } |
486 | ||
487 | /** | |
488 | * i40evf_add_vlans | |
489 | * @adapter: adapter structure | |
490 | * @vlans: the VLANs to add | |
491 | * @count: number of VLANs | |
492 | * | |
493 | * Request that the PF add one or more VLAN filters to our VSI. | |
494 | **/ | |
495 | void i40evf_add_vlans(struct i40evf_adapter *adapter) | |
496 | { | |
497 | struct i40e_virtchnl_vlan_filter_list *vvfl; | |
498 | int len, i = 0, count = 0; | |
499 | struct i40evf_vlan_filter *f; | |
500 | ||
501 | if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { | |
502 | /* bail because we already have a command pending */ | |
503 | dev_err(&adapter->pdev->dev, "%s: command %d pending\n", | |
504 | __func__, adapter->current_op); | |
505 | return; | |
506 | } | |
507 | ||
508 | list_for_each_entry(f, &adapter->vlan_filter_list, list) { | |
509 | if (f->add) | |
510 | count++; | |
511 | } | |
512 | if (!count) { | |
513 | adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; | |
514 | return; | |
515 | } | |
516 | adapter->current_op = I40E_VIRTCHNL_OP_ADD_VLAN; | |
517 | ||
518 | len = sizeof(struct i40e_virtchnl_vlan_filter_list) + | |
519 | (count * sizeof(u16)); | |
520 | if (len > I40EVF_MAX_AQ_BUF_SIZE) { | |
80e72893 | 521 | dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n", |
62683ab5 GR |
522 | __func__); |
523 | count = (I40EVF_MAX_AQ_BUF_SIZE - | |
524 | sizeof(struct i40e_virtchnl_vlan_filter_list)) / | |
525 | sizeof(u16); | |
526 | len = I40EVF_MAX_AQ_BUF_SIZE; | |
527 | } | |
528 | vvfl = kzalloc(len, GFP_ATOMIC); | |
249c8b8d | 529 | if (!vvfl) |
62683ab5 | 530 | return; |
249c8b8d | 531 | |
62683ab5 GR |
532 | vvfl->vsi_id = adapter->vsi_res->vsi_id; |
533 | vvfl->num_elements = count; | |
534 | list_for_each_entry(f, &adapter->vlan_filter_list, list) { | |
535 | if (f->add) { | |
536 | vvfl->vlan_id[i] = f->vlan; | |
537 | i++; | |
538 | f->add = false; | |
539 | } | |
540 | } | |
62683ab5 GR |
541 | adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; |
542 | adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; | |
fc86a970 MW |
543 | i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); |
544 | kfree(vvfl); | |
62683ab5 GR |
545 | } |
546 | ||
547 | /** | |
548 | * i40evf_del_vlans | |
549 | * @adapter: adapter structure | |
550 | * @vlans: the VLANs to remove | |
551 | * @count: number of VLANs | |
552 | * | |
553 | * Request that the PF remove one or more VLAN filters from our VSI. | |
554 | **/ | |
555 | void i40evf_del_vlans(struct i40evf_adapter *adapter) | |
556 | { | |
557 | struct i40e_virtchnl_vlan_filter_list *vvfl; | |
558 | struct i40evf_vlan_filter *f, *ftmp; | |
559 | int len, i = 0, count = 0; | |
560 | ||
561 | if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { | |
562 | /* bail because we already have a command pending */ | |
563 | dev_err(&adapter->pdev->dev, "%s: command %d pending\n", | |
564 | __func__, adapter->current_op); | |
565 | return; | |
566 | } | |
567 | ||
568 | list_for_each_entry(f, &adapter->vlan_filter_list, list) { | |
569 | if (f->remove) | |
570 | count++; | |
571 | } | |
572 | if (!count) { | |
573 | adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; | |
574 | return; | |
575 | } | |
576 | adapter->current_op = I40E_VIRTCHNL_OP_DEL_VLAN; | |
577 | ||
578 | len = sizeof(struct i40e_virtchnl_vlan_filter_list) + | |
579 | (count * sizeof(u16)); | |
580 | if (len > I40EVF_MAX_AQ_BUF_SIZE) { | |
80e72893 | 581 | dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n", |
62683ab5 GR |
582 | __func__); |
583 | count = (I40EVF_MAX_AQ_BUF_SIZE - | |
584 | sizeof(struct i40e_virtchnl_vlan_filter_list)) / | |
585 | sizeof(u16); | |
586 | len = I40EVF_MAX_AQ_BUF_SIZE; | |
587 | } | |
588 | vvfl = kzalloc(len, GFP_ATOMIC); | |
249c8b8d | 589 | if (!vvfl) |
62683ab5 | 590 | return; |
249c8b8d | 591 | |
62683ab5 GR |
592 | vvfl->vsi_id = adapter->vsi_res->vsi_id; |
593 | vvfl->num_elements = count; | |
594 | list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { | |
595 | if (f->remove) { | |
596 | vvfl->vlan_id[i] = f->vlan; | |
597 | i++; | |
598 | list_del(&f->list); | |
599 | kfree(f); | |
600 | } | |
601 | } | |
62683ab5 GR |
602 | adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; |
603 | adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; | |
fc86a970 MW |
604 | i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); |
605 | kfree(vvfl); | |
62683ab5 GR |
606 | } |
607 | ||
608 | /** | |
609 | * i40evf_set_promiscuous | |
610 | * @adapter: adapter structure | |
611 | * @flags: bitmask to control unicast/multicast promiscuous. | |
612 | * | |
613 | * Request that the PF enable promiscuous mode for our VSI. | |
614 | **/ | |
615 | void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) | |
616 | { | |
617 | struct i40e_virtchnl_promisc_info vpi; | |
618 | ||
619 | if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { | |
620 | /* bail because we already have a command pending */ | |
621 | dev_err(&adapter->pdev->dev, "%s: command %d pending\n", | |
622 | __func__, adapter->current_op); | |
623 | return; | |
624 | } | |
625 | adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; | |
626 | vpi.vsi_id = adapter->vsi_res->vsi_id; | |
627 | vpi.flags = flags; | |
628 | i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, | |
629 | (u8 *)&vpi, sizeof(vpi)); | |
630 | } | |
631 | ||
632 | /** | |
633 | * i40evf_request_stats | |
634 | * @adapter: adapter structure | |
635 | * | |
636 | * Request VSI statistics from PF. | |
637 | **/ | |
638 | void i40evf_request_stats(struct i40evf_adapter *adapter) | |
639 | { | |
640 | struct i40e_virtchnl_queue_select vqs; | |
641 | if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { | |
642 | /* no error message, this isn't crucial */ | |
643 | return; | |
644 | } | |
645 | adapter->current_op = I40E_VIRTCHNL_OP_GET_STATS; | |
646 | vqs.vsi_id = adapter->vsi_res->vsi_id; | |
647 | /* queue maps are ignored for this message - only the vsi is used */ | |
648 | if (i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_STATS, | |
649 | (u8 *)&vqs, sizeof(vqs))) | |
650 | /* if the request failed, don't lock out others */ | |
651 | adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; | |
652 | } | |
625777e3 MW |
653 | /** |
654 | * i40evf_request_reset | |
655 | * @adapter: adapter structure | |
656 | * | |
657 | * Request that the PF reset this VF. No response is expected. | |
658 | **/ | |
659 | void i40evf_request_reset(struct i40evf_adapter *adapter) | |
660 | { | |
661 | /* Don't check CURRENT_OP - this is always higher priority */ | |
662 | i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0); | |
663 | adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; | |
664 | } | |
62683ab5 GR |
665 | |
666 | /** | |
667 | * i40evf_virtchnl_completion | |
668 | * @adapter: adapter structure | |
669 | * @v_opcode: opcode sent by PF | |
670 | * @v_retval: retval sent by PF | |
671 | * @msg: message sent by PF | |
672 | * @msglen: message length | |
673 | * | |
674 | * Asynchronous completion function for admin queue messages. Rather than busy | |
675 | * wait, we fire off our requests and assume that no errors will be returned. | |
676 | * This function handles the reply messages. | |
677 | **/ | |
678 | void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, | |
679 | enum i40e_virtchnl_ops v_opcode, | |
680 | i40e_status v_retval, | |
681 | u8 *msg, u16 msglen) | |
682 | { | |
683 | struct net_device *netdev = adapter->netdev; | |
684 | ||
685 | if (v_opcode == I40E_VIRTCHNL_OP_EVENT) { | |
686 | struct i40e_virtchnl_pf_event *vpe = | |
687 | (struct i40e_virtchnl_pf_event *)msg; | |
688 | switch (vpe->event) { | |
689 | case I40E_VIRTCHNL_EVENT_LINK_CHANGE: | |
690 | adapter->link_up = | |
691 | vpe->event_data.link_event.link_status; | |
692 | if (adapter->link_up && !netif_carrier_ok(netdev)) { | |
693 | dev_info(&adapter->pdev->dev, "NIC Link is Up\n"); | |
694 | netif_carrier_on(netdev); | |
695 | netif_tx_wake_all_queues(netdev); | |
696 | } else if (!adapter->link_up) { | |
697 | dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); | |
698 | netif_carrier_off(netdev); | |
699 | netif_tx_stop_all_queues(netdev); | |
700 | } | |
701 | break; | |
702 | case I40E_VIRTCHNL_EVENT_RESET_IMPENDING: | |
ef8693eb MW |
703 | dev_info(&adapter->pdev->dev, "PF reset warning received\n"); |
704 | if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) { | |
705 | adapter->flags |= I40EVF_FLAG_RESET_PENDING; | |
706 | dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); | |
707 | schedule_work(&adapter->reset_task); | |
708 | } | |
62683ab5 GR |
709 | break; |
710 | default: | |
711 | dev_err(&adapter->pdev->dev, | |
712 | "%s: Unknown event %d from pf\n", | |
713 | __func__, vpe->event); | |
714 | break; | |
715 | ||
716 | } | |
717 | return; | |
718 | } | |
719 | if (v_opcode != adapter->current_op) { | |
80e72893 | 720 | dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d\n", |
62683ab5 GR |
721 | __func__, adapter->current_op, v_opcode); |
722 | /* We're probably completely screwed at this point, but clear | |
723 | * the current op and try to carry on.... | |
724 | */ | |
725 | adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; | |
726 | return; | |
727 | } | |
728 | if (v_retval) { | |
80e72893 | 729 | dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n", |
62683ab5 GR |
730 | __func__, v_retval, v_opcode); |
731 | } | |
732 | switch (v_opcode) { | |
733 | case I40E_VIRTCHNL_OP_GET_STATS: { | |
734 | struct i40e_eth_stats *stats = | |
735 | (struct i40e_eth_stats *)msg; | |
736 | adapter->net_stats.rx_packets = stats->rx_unicast + | |
737 | stats->rx_multicast + | |
738 | stats->rx_broadcast; | |
739 | adapter->net_stats.tx_packets = stats->tx_unicast + | |
740 | stats->tx_multicast + | |
741 | stats->tx_broadcast; | |
742 | adapter->net_stats.rx_bytes = stats->rx_bytes; | |
743 | adapter->net_stats.tx_bytes = stats->tx_bytes; | |
62683ab5 | 744 | adapter->net_stats.tx_errors = stats->tx_errors; |
03da6f6a | 745 | adapter->net_stats.rx_dropped = stats->rx_discards; |
62683ab5 GR |
746 | adapter->net_stats.tx_dropped = stats->tx_discards; |
747 | adapter->current_stats = *stats; | |
748 | } | |
749 | break; | |
750 | case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: | |
751 | adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_MAC_FILTER); | |
752 | break; | |
753 | case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: | |
754 | adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_MAC_FILTER); | |
755 | break; | |
756 | case I40E_VIRTCHNL_OP_ADD_VLAN: | |
757 | adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_VLAN_FILTER); | |
758 | break; | |
759 | case I40E_VIRTCHNL_OP_DEL_VLAN: | |
760 | adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_VLAN_FILTER); | |
761 | break; | |
762 | case I40E_VIRTCHNL_OP_ENABLE_QUEUES: | |
763 | adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ENABLE_QUEUES); | |
764 | /* enable transmits */ | |
765 | i40evf_irq_enable(adapter, true); | |
766 | netif_tx_start_all_queues(adapter->netdev); | |
767 | netif_carrier_on(adapter->netdev); | |
768 | break; | |
769 | case I40E_VIRTCHNL_OP_DISABLE_QUEUES: | |
770 | adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DISABLE_QUEUES); | |
771 | break; | |
772 | case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: | |
773 | adapter->aq_pending &= ~(I40EVF_FLAG_AQ_CONFIGURE_QUEUES); | |
774 | break; | |
775 | case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: | |
776 | adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS); | |
777 | break; | |
778 | default: | |
80e72893 | 779 | dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF\n", |
62683ab5 GR |
780 | __func__, v_opcode); |
781 | break; | |
782 | } /* switch v_opcode */ | |
783 | adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; | |
784 | } |