Commit | Line | Data |
---|---|---|
e586b3b0 AV |
1 | /* |
2 | * Copyright (c) 2015, Mellanox Technologies. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <linux/ip.h> | |
34 | #include <linux/ipv6.h> | |
35 | #include <linux/tcp.h> | |
7ae92ae5 | 36 | #include <net/busy_poll.h> |
e586b3b0 | 37 | #include "en.h" |
12185a9f | 38 | #include "en_tc.h" |
e586b3b0 | 39 | |
ef9814de EBE |
40 | static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp) |
41 | { | |
42 | return tstamp->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL; | |
43 | } | |
44 | ||
7219ab34 TT |
45 | static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc, |
46 | void *data) | |
47 | { | |
48 | u32 ci = cqcc & cq->wq.sz_m1; | |
49 | ||
50 | memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64)); | |
51 | } | |
52 | ||
53 | static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq, | |
54 | struct mlx5e_cq *cq, u32 cqcc) | |
55 | { | |
56 | mlx5e_read_cqe_slot(cq, cqcc, &cq->title); | |
57 | cq->decmprs_left = be32_to_cpu(cq->title.byte_cnt); | |
58 | cq->decmprs_wqe_counter = be16_to_cpu(cq->title.wqe_counter); | |
59 | rq->stats.cqe_compress_blks++; | |
60 | } | |
61 | ||
62 | static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc) | |
63 | { | |
64 | mlx5e_read_cqe_slot(cq, cqcc, cq->mini_arr); | |
65 | cq->mini_arr_idx = 0; | |
66 | } | |
67 | ||
68 | static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n) | |
69 | { | |
70 | u8 op_own = (cqcc >> cq->wq.log_sz) & 1; | |
71 | u32 wq_sz = 1 << cq->wq.log_sz; | |
72 | u32 ci = cqcc & cq->wq.sz_m1; | |
73 | u32 ci_top = min_t(u32, wq_sz, ci + n); | |
74 | ||
75 | for (; ci < ci_top; ci++, n--) { | |
76 | struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci); | |
77 | ||
78 | cqe->op_own = op_own; | |
79 | } | |
80 | ||
81 | if (unlikely(ci == wq_sz)) { | |
82 | op_own = !op_own; | |
83 | for (ci = 0; ci < n; ci++) { | |
84 | struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci); | |
85 | ||
86 | cqe->op_own = op_own; | |
87 | } | |
88 | } | |
89 | } | |
90 | ||
91 | static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, | |
92 | struct mlx5e_cq *cq, u32 cqcc) | |
93 | { | |
94 | u16 wqe_cnt_step; | |
95 | ||
96 | cq->title.byte_cnt = cq->mini_arr[cq->mini_arr_idx].byte_cnt; | |
97 | cq->title.check_sum = cq->mini_arr[cq->mini_arr_idx].checksum; | |
98 | cq->title.op_own &= 0xf0; | |
99 | cq->title.op_own |= 0x01 & (cqcc >> cq->wq.log_sz); | |
100 | cq->title.wqe_counter = cpu_to_be16(cq->decmprs_wqe_counter); | |
101 | ||
102 | wqe_cnt_step = | |
103 | rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ? | |
104 | mpwrq_get_cqe_consumed_strides(&cq->title) : 1; | |
105 | cq->decmprs_wqe_counter = | |
106 | (cq->decmprs_wqe_counter + wqe_cnt_step) & rq->wq.sz_m1; | |
107 | } | |
108 | ||
109 | static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq, | |
110 | struct mlx5e_cq *cq, u32 cqcc) | |
111 | { | |
112 | mlx5e_decompress_cqe(rq, cq, cqcc); | |
113 | cq->title.rss_hash_type = 0; | |
114 | cq->title.rss_hash_result = 0; | |
115 | } | |
116 | ||
117 | static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq, | |
118 | struct mlx5e_cq *cq, | |
119 | int update_owner_only, | |
120 | int budget_rem) | |
121 | { | |
122 | u32 cqcc = cq->wq.cc + update_owner_only; | |
123 | u32 cqe_count; | |
124 | u32 i; | |
125 | ||
126 | cqe_count = min_t(u32, cq->decmprs_left, budget_rem); | |
127 | ||
128 | for (i = update_owner_only; i < cqe_count; | |
129 | i++, cq->mini_arr_idx++, cqcc++) { | |
130 | if (unlikely(cq->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)) | |
131 | mlx5e_read_mini_arr_slot(cq, cqcc); | |
132 | ||
133 | mlx5e_decompress_cqe_no_hash(rq, cq, cqcc); | |
134 | rq->handle_rx_cqe(rq, &cq->title); | |
135 | } | |
136 | mlx5e_cqes_update_owner(cq, cq->wq.cc, cqcc - cq->wq.cc); | |
137 | cq->wq.cc = cqcc; | |
138 | cq->decmprs_left -= cqe_count; | |
139 | rq->stats.cqe_compress_pkts += cqe_count; | |
140 | ||
141 | return cqe_count; | |
142 | } | |
143 | ||
144 | static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, | |
145 | struct mlx5e_cq *cq, | |
146 | int budget_rem) | |
147 | { | |
148 | mlx5e_read_title_slot(rq, cq, cq->wq.cc); | |
149 | mlx5e_read_mini_arr_slot(cq, cq->wq.cc + 1); | |
150 | mlx5e_decompress_cqe(rq, cq, cq->wq.cc); | |
151 | rq->handle_rx_cqe(rq, &cq->title); | |
152 | cq->mini_arr_idx++; | |
153 | ||
154 | return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1; | |
155 | } | |
156 | ||
157 | void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val) | |
158 | { | |
159 | bool was_opened; | |
160 | ||
161 | if (!MLX5_CAP_GEN(priv->mdev, cqe_compression)) | |
162 | return; | |
163 | ||
164 | mutex_lock(&priv->state_lock); | |
165 | ||
166 | if (priv->params.rx_cqe_compress == val) | |
167 | goto unlock; | |
168 | ||
169 | was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); | |
170 | if (was_opened) | |
171 | mlx5e_close_locked(priv->netdev); | |
172 | ||
173 | priv->params.rx_cqe_compress = val; | |
174 | ||
175 | if (was_opened) | |
176 | mlx5e_open_locked(priv->netdev); | |
177 | ||
178 | unlock: | |
179 | mutex_unlock(&priv->state_lock); | |
180 | } | |
181 | ||
2f48af12 | 182 | int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) |
e586b3b0 AV |
183 | { |
184 | struct sk_buff *skb; | |
185 | dma_addr_t dma_addr; | |
186 | ||
c5adb96f | 187 | skb = napi_alloc_skb(rq->cq.napi, rq->wqe_sz); |
e586b3b0 AV |
188 | if (unlikely(!skb)) |
189 | return -ENOMEM; | |
190 | ||
e586b3b0 AV |
191 | dma_addr = dma_map_single(rq->pdev, |
192 | /* hw start padding */ | |
fc11fbf9 SM |
193 | skb->data, |
194 | /* hw end padding */ | |
e586b3b0 AV |
195 | rq->wqe_sz, |
196 | DMA_FROM_DEVICE); | |
197 | ||
198 | if (unlikely(dma_mapping_error(rq->pdev, dma_addr))) | |
199 | goto err_free_skb; | |
200 | ||
201 | *((dma_addr_t *)skb->cb) = dma_addr; | |
c5adb96f | 202 | wqe->data.addr = cpu_to_be64(dma_addr); |
bc77b240 | 203 | wqe->data.lkey = rq->mkey_be; |
e586b3b0 AV |
204 | |
205 | rq->skb[ix] = skb; | |
206 | ||
207 | return 0; | |
208 | ||
209 | err_free_skb: | |
210 | dev_kfree_skb(skb); | |
211 | ||
212 | return -ENOMEM; | |
213 | } | |
214 | ||
bc77b240 TT |
215 | static inline void |
216 | mlx5e_dma_pre_sync_linear_mpwqe(struct device *pdev, | |
217 | struct mlx5e_mpw_info *wi, | |
218 | u32 wqe_offset, u32 len) | |
219 | { | |
220 | dma_sync_single_for_cpu(pdev, wi->dma_info.addr + wqe_offset, | |
221 | len, DMA_FROM_DEVICE); | |
222 | } | |
223 | ||
224 | static inline void | |
225 | mlx5e_dma_pre_sync_fragmented_mpwqe(struct device *pdev, | |
226 | struct mlx5e_mpw_info *wi, | |
227 | u32 wqe_offset, u32 len) | |
228 | { | |
229 | /* No dma pre sync for fragmented MPWQE */ | |
230 | } | |
231 | ||
232 | static inline void | |
233 | mlx5e_add_skb_frag_linear_mpwqe(struct device *pdev, | |
234 | struct sk_buff *skb, | |
235 | struct mlx5e_mpw_info *wi, | |
236 | u32 page_idx, u32 frag_offset, | |
237 | u32 len) | |
238 | { | |
239 | unsigned int truesize = ALIGN(len, MLX5_MPWRQ_STRIDE_SIZE); | |
240 | ||
241 | wi->skbs_frags[page_idx]++; | |
242 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, | |
243 | &wi->dma_info.page[page_idx], frag_offset, | |
244 | len, truesize); | |
245 | } | |
246 | ||
247 | static inline void | |
248 | mlx5e_add_skb_frag_fragmented_mpwqe(struct device *pdev, | |
249 | struct sk_buff *skb, | |
250 | struct mlx5e_mpw_info *wi, | |
251 | u32 page_idx, u32 frag_offset, | |
252 | u32 len) | |
253 | { | |
254 | unsigned int truesize = ALIGN(len, MLX5_MPWRQ_STRIDE_SIZE); | |
255 | ||
256 | dma_sync_single_for_cpu(pdev, | |
257 | wi->umr.dma_info[page_idx].addr + frag_offset, | |
258 | len, DMA_FROM_DEVICE); | |
259 | wi->skbs_frags[page_idx]++; | |
260 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, | |
261 | wi->umr.dma_info[page_idx].page, frag_offset, | |
262 | len, truesize); | |
263 | } | |
264 | ||
265 | static inline void | |
266 | mlx5e_copy_skb_header_linear_mpwqe(struct device *pdev, | |
267 | struct sk_buff *skb, | |
268 | struct mlx5e_mpw_info *wi, | |
269 | u32 page_idx, u32 offset, | |
270 | u32 headlen) | |
271 | { | |
272 | struct page *page = &wi->dma_info.page[page_idx]; | |
273 | ||
274 | skb_copy_to_linear_data(skb, page_address(page) + offset, | |
275 | ALIGN(headlen, sizeof(long))); | |
276 | } | |
277 | ||
278 | static inline void | |
279 | mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev, | |
280 | struct sk_buff *skb, | |
281 | struct mlx5e_mpw_info *wi, | |
282 | u32 page_idx, u32 offset, | |
283 | u32 headlen) | |
284 | { | |
285 | u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset); | |
286 | struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[page_idx]; | |
287 | unsigned int len; | |
288 | ||
289 | /* Aligning len to sizeof(long) optimizes memcpy performance */ | |
290 | len = ALIGN(headlen_pg, sizeof(long)); | |
291 | dma_sync_single_for_cpu(pdev, dma_info->addr + offset, len, | |
292 | DMA_FROM_DEVICE); | |
293 | skb_copy_to_linear_data_offset(skb, 0, | |
294 | page_address(dma_info->page) + offset, | |
295 | len); | |
296 | #if (MLX5_MPWRQ_SMALL_PACKET_THRESHOLD >= MLX5_MPWRQ_STRIDE_SIZE) | |
297 | if (unlikely(offset + headlen > PAGE_SIZE)) { | |
298 | dma_info++; | |
299 | headlen_pg = len; | |
300 | len = ALIGN(headlen - headlen_pg, sizeof(long)); | |
301 | dma_sync_single_for_cpu(pdev, dma_info->addr, len, | |
302 | DMA_FROM_DEVICE); | |
303 | skb_copy_to_linear_data_offset(skb, headlen_pg, | |
304 | page_address(dma_info->page), | |
305 | len); | |
306 | } | |
307 | #endif | |
308 | } | |
309 | ||
310 | static u16 mlx5e_get_wqe_mtt_offset(u16 rq_ix, u16 wqe_ix) | |
311 | { | |
312 | return rq_ix * MLX5_CHANNEL_MAX_NUM_MTTS + | |
313 | wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8); | |
314 | } | |
315 | ||
316 | static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, | |
317 | struct mlx5e_sq *sq, | |
318 | struct mlx5e_umr_wqe *wqe, | |
319 | u16 ix) | |
320 | { | |
321 | struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; | |
322 | struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl; | |
323 | struct mlx5_wqe_data_seg *dseg = &wqe->data; | |
324 | struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; | |
325 | u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS); | |
326 | u16 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix); | |
327 | ||
328 | memset(wqe, 0, sizeof(*wqe)); | |
329 | cseg->opmod_idx_opcode = | |
330 | cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | | |
331 | MLX5_OPCODE_UMR); | |
332 | cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | | |
333 | ds_cnt); | |
334 | cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; | |
335 | cseg->imm = rq->umr_mkey_be; | |
336 | ||
337 | ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN; | |
338 | ucseg->klm_octowords = | |
339 | cpu_to_be16(mlx5e_get_mtt_octw(MLX5_MPWRQ_PAGES_PER_WQE)); | |
340 | ucseg->bsf_octowords = | |
341 | cpu_to_be16(mlx5e_get_mtt_octw(umr_wqe_mtt_offset)); | |
342 | ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); | |
343 | ||
344 | dseg->lkey = sq->mkey_be; | |
345 | dseg->addr = cpu_to_be64(wi->umr.mtt_addr); | |
346 | } | |
347 | ||
348 | static void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix) | |
349 | { | |
350 | struct mlx5e_sq *sq = &rq->channel->icosq; | |
351 | struct mlx5_wq_cyc *wq = &sq->wq; | |
352 | struct mlx5e_umr_wqe *wqe; | |
353 | u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB); | |
354 | u16 pi; | |
355 | ||
356 | /* fill sq edge with nops to avoid wqe wrap around */ | |
357 | while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { | |
358 | sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP; | |
359 | sq->ico_wqe_info[pi].num_wqebbs = 1; | |
360 | mlx5e_send_nop(sq, true); | |
361 | } | |
362 | ||
363 | wqe = mlx5_wq_cyc_get_wqe(wq, pi); | |
364 | mlx5e_build_umr_wqe(rq, sq, wqe, ix); | |
365 | sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_UMR; | |
366 | sq->ico_wqe_info[pi].num_wqebbs = num_wqebbs; | |
367 | sq->pc += num_wqebbs; | |
368 | mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0); | |
369 | } | |
370 | ||
371 | static inline int mlx5e_get_wqe_mtt_sz(void) | |
372 | { | |
373 | /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes. | |
374 | * To avoid copying garbage after the mtt array, we allocate | |
375 | * a little more. | |
376 | */ | |
377 | return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64), | |
378 | MLX5_UMR_MTT_ALIGNMENT); | |
379 | } | |
380 | ||
381 | static int mlx5e_alloc_and_map_page(struct mlx5e_rq *rq, | |
382 | struct mlx5e_mpw_info *wi, | |
383 | int i) | |
384 | { | |
385 | struct page *page; | |
386 | ||
387 | page = dev_alloc_page(); | |
388 | if (unlikely(!page)) | |
389 | return -ENOMEM; | |
390 | ||
391 | wi->umr.dma_info[i].page = page; | |
392 | wi->umr.dma_info[i].addr = dma_map_page(rq->pdev, page, 0, PAGE_SIZE, | |
393 | PCI_DMA_FROMDEVICE); | |
394 | if (unlikely(dma_mapping_error(rq->pdev, wi->umr.dma_info[i].addr))) { | |
395 | put_page(page); | |
396 | return -ENOMEM; | |
397 | } | |
398 | wi->umr.mtt[i] = cpu_to_be64(wi->umr.dma_info[i].addr | MLX5_EN_WR); | |
399 | ||
400 | return 0; | |
401 | } | |
402 | ||
403 | static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq, | |
404 | struct mlx5e_rx_wqe *wqe, | |
405 | u16 ix) | |
406 | { | |
407 | struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; | |
408 | int mtt_sz = mlx5e_get_wqe_mtt_sz(); | |
409 | u32 dma_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix) << PAGE_SHIFT; | |
410 | int i; | |
411 | ||
412 | wi->umr.dma_info = kmalloc(sizeof(*wi->umr.dma_info) * | |
413 | MLX5_MPWRQ_PAGES_PER_WQE, | |
414 | GFP_ATOMIC); | |
415 | if (unlikely(!wi->umr.dma_info)) | |
416 | goto err_out; | |
417 | ||
418 | /* We allocate more than mtt_sz as we will align the pointer */ | |
419 | wi->umr.mtt_no_align = kzalloc(mtt_sz + MLX5_UMR_ALIGN - 1, | |
420 | GFP_ATOMIC); | |
421 | if (unlikely(!wi->umr.mtt_no_align)) | |
422 | goto err_free_umr; | |
423 | ||
424 | wi->umr.mtt = PTR_ALIGN(wi->umr.mtt_no_align, MLX5_UMR_ALIGN); | |
425 | wi->umr.mtt_addr = dma_map_single(rq->pdev, wi->umr.mtt, mtt_sz, | |
426 | PCI_DMA_TODEVICE); | |
427 | if (unlikely(dma_mapping_error(rq->pdev, wi->umr.mtt_addr))) | |
428 | goto err_free_mtt; | |
429 | ||
430 | for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { | |
431 | if (unlikely(mlx5e_alloc_and_map_page(rq, wi, i))) | |
432 | goto err_unmap; | |
433 | atomic_add(MLX5_MPWRQ_STRIDES_PER_PAGE, | |
434 | &wi->umr.dma_info[i].page->_count); | |
435 | wi->skbs_frags[i] = 0; | |
436 | } | |
437 | ||
438 | wi->consumed_strides = 0; | |
439 | wi->dma_pre_sync = mlx5e_dma_pre_sync_fragmented_mpwqe; | |
440 | wi->add_skb_frag = mlx5e_add_skb_frag_fragmented_mpwqe; | |
441 | wi->copy_skb_header = mlx5e_copy_skb_header_fragmented_mpwqe; | |
442 | wi->free_wqe = mlx5e_free_rx_fragmented_mpwqe; | |
443 | wqe->data.lkey = rq->umr_mkey_be; | |
444 | wqe->data.addr = cpu_to_be64(dma_offset); | |
445 | ||
446 | return 0; | |
447 | ||
448 | err_unmap: | |
449 | while (--i >= 0) { | |
450 | dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE, | |
451 | PCI_DMA_FROMDEVICE); | |
452 | atomic_sub(MLX5_MPWRQ_STRIDES_PER_PAGE, | |
453 | &wi->umr.dma_info[i].page->_count); | |
454 | put_page(wi->umr.dma_info[i].page); | |
455 | } | |
456 | dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE); | |
457 | ||
458 | err_free_mtt: | |
459 | kfree(wi->umr.mtt_no_align); | |
460 | ||
461 | err_free_umr: | |
462 | kfree(wi->umr.dma_info); | |
463 | ||
464 | err_out: | |
465 | return -ENOMEM; | |
466 | } | |
467 | ||
468 | void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq, | |
469 | struct mlx5e_mpw_info *wi) | |
470 | { | |
471 | int mtt_sz = mlx5e_get_wqe_mtt_sz(); | |
472 | int i; | |
473 | ||
474 | for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { | |
475 | dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE, | |
476 | PCI_DMA_FROMDEVICE); | |
477 | atomic_sub(MLX5_MPWRQ_STRIDES_PER_PAGE - wi->skbs_frags[i], | |
478 | &wi->umr.dma_info[i].page->_count); | |
479 | put_page(wi->umr.dma_info[i].page); | |
480 | } | |
481 | dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE); | |
482 | kfree(wi->umr.mtt_no_align); | |
483 | kfree(wi->umr.dma_info); | |
484 | } | |
485 | ||
486 | void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq) | |
487 | { | |
488 | struct mlx5_wq_ll *wq = &rq->wq; | |
489 | struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); | |
490 | ||
491 | clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state); | |
492 | mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); | |
493 | rq->stats.mpwqe_frag++; | |
494 | ||
495 | /* ensure wqes are visible to device before updating doorbell record */ | |
496 | dma_wmb(); | |
497 | ||
498 | mlx5_wq_ll_update_db_record(wq); | |
499 | } | |
500 | ||
501 | static int mlx5e_alloc_rx_linear_mpwqe(struct mlx5e_rq *rq, | |
502 | struct mlx5e_rx_wqe *wqe, | |
503 | u16 ix) | |
461017cb TT |
504 | { |
505 | struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; | |
506 | gfp_t gfp_mask; | |
507 | int i; | |
508 | ||
509 | gfp_mask = GFP_ATOMIC | __GFP_COLD | __GFP_MEMALLOC; | |
510 | wi->dma_info.page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, | |
511 | MLX5_MPWRQ_WQE_PAGE_ORDER); | |
512 | if (unlikely(!wi->dma_info.page)) | |
513 | return -ENOMEM; | |
514 | ||
515 | wi->dma_info.addr = dma_map_page(rq->pdev, wi->dma_info.page, 0, | |
516 | rq->wqe_sz, PCI_DMA_FROMDEVICE); | |
517 | if (unlikely(dma_mapping_error(rq->pdev, wi->dma_info.addr))) { | |
518 | put_page(wi->dma_info.page); | |
519 | return -ENOMEM; | |
520 | } | |
521 | ||
522 | /* We split the high-order page into order-0 ones and manage their | |
523 | * reference counter to minimize the memory held by small skb fragments | |
524 | */ | |
525 | split_page(wi->dma_info.page, MLX5_MPWRQ_WQE_PAGE_ORDER); | |
526 | for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { | |
527 | atomic_add(MLX5_MPWRQ_STRIDES_PER_PAGE, | |
528 | &wi->dma_info.page[i]._count); | |
529 | wi->skbs_frags[i] = 0; | |
530 | } | |
531 | ||
532 | wi->consumed_strides = 0; | |
bc77b240 TT |
533 | wi->dma_pre_sync = mlx5e_dma_pre_sync_linear_mpwqe; |
534 | wi->add_skb_frag = mlx5e_add_skb_frag_linear_mpwqe; | |
535 | wi->copy_skb_header = mlx5e_copy_skb_header_linear_mpwqe; | |
536 | wi->free_wqe = mlx5e_free_rx_linear_mpwqe; | |
537 | wqe->data.lkey = rq->mkey_be; | |
538 | wqe->data.addr = cpu_to_be64(wi->dma_info.addr); | |
539 | ||
540 | return 0; | |
541 | } | |
542 | ||
543 | void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq, | |
544 | struct mlx5e_mpw_info *wi) | |
545 | { | |
546 | int i; | |
547 | ||
548 | dma_unmap_page(rq->pdev, wi->dma_info.addr, rq->wqe_sz, | |
549 | PCI_DMA_FROMDEVICE); | |
550 | for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { | |
551 | atomic_sub(MLX5_MPWRQ_STRIDES_PER_PAGE - wi->skbs_frags[i], | |
552 | &wi->dma_info.page[i]._count); | |
553 | put_page(&wi->dma_info.page[i]); | |
554 | } | |
555 | } | |
556 | ||
557 | int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) | |
558 | { | |
559 | int err; | |
560 | ||
561 | err = mlx5e_alloc_rx_linear_mpwqe(rq, wqe, ix); | |
562 | if (unlikely(err)) { | |
563 | err = mlx5e_alloc_rx_fragmented_mpwqe(rq, wqe, ix); | |
564 | if (unlikely(err)) | |
565 | return err; | |
566 | set_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state); | |
567 | mlx5e_post_umr_wqe(rq, ix); | |
568 | return -EBUSY; | |
569 | } | |
461017cb TT |
570 | |
571 | return 0; | |
572 | } | |
573 | ||
bc77b240 TT |
574 | #define RQ_CANNOT_POST(rq) \ |
575 | (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \ | |
576 | test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state)) | |
577 | ||
e586b3b0 AV |
578 | bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) |
579 | { | |
580 | struct mlx5_wq_ll *wq = &rq->wq; | |
581 | ||
bc77b240 | 582 | if (unlikely(RQ_CANNOT_POST(rq))) |
e586b3b0 AV |
583 | return false; |
584 | ||
585 | while (!mlx5_wq_ll_is_full(wq)) { | |
586 | struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); | |
54984407 | 587 | int err; |
e586b3b0 | 588 | |
54984407 TT |
589 | err = rq->alloc_wqe(rq, wqe, wq->head); |
590 | if (unlikely(err)) { | |
591 | if (err != -EBUSY) | |
592 | rq->stats.buff_alloc_err++; | |
e586b3b0 | 593 | break; |
54984407 | 594 | } |
e586b3b0 AV |
595 | |
596 | mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); | |
597 | } | |
598 | ||
599 | /* ensure wqes are visible to device before updating doorbell record */ | |
600 | dma_wmb(); | |
601 | ||
602 | mlx5_wq_ll_update_db_record(wq); | |
603 | ||
604 | return !mlx5_wq_ll_is_full(wq); | |
605 | } | |
606 | ||
461017cb TT |
607 | static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, |
608 | u32 cqe_bcnt) | |
e586b3b0 AV |
609 | { |
610 | struct ethhdr *eth = (struct ethhdr *)(skb->data); | |
611 | struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN); | |
612 | struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + ETH_HLEN); | |
613 | struct tcphdr *tcp; | |
614 | ||
615 | u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); | |
616 | int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) || | |
617 | (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type)); | |
618 | ||
461017cb | 619 | u16 tot_len = cqe_bcnt - ETH_HLEN; |
e586b3b0 AV |
620 | |
621 | if (eth->h_proto == htons(ETH_P_IP)) { | |
622 | tcp = (struct tcphdr *)(skb->data + ETH_HLEN + | |
623 | sizeof(struct iphdr)); | |
624 | ipv6 = NULL; | |
d9a40271 | 625 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; |
e586b3b0 AV |
626 | } else { |
627 | tcp = (struct tcphdr *)(skb->data + ETH_HLEN + | |
628 | sizeof(struct ipv6hdr)); | |
629 | ipv4 = NULL; | |
d9a40271 | 630 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; |
e586b3b0 AV |
631 | } |
632 | ||
633 | if (get_cqe_lro_tcppsh(cqe)) | |
634 | tcp->psh = 1; | |
635 | ||
636 | if (tcp_ack) { | |
637 | tcp->ack = 1; | |
638 | tcp->ack_seq = cqe->lro_ack_seq_num; | |
639 | tcp->window = cqe->lro_tcp_win; | |
640 | } | |
641 | ||
642 | if (ipv4) { | |
643 | ipv4->ttl = cqe->lro_min_ttl; | |
644 | ipv4->tot_len = cpu_to_be16(tot_len); | |
645 | ipv4->check = 0; | |
646 | ipv4->check = ip_fast_csum((unsigned char *)ipv4, | |
647 | ipv4->ihl); | |
648 | } else { | |
649 | ipv6->hop_limit = cqe->lro_min_ttl; | |
650 | ipv6->payload_len = cpu_to_be16(tot_len - | |
651 | sizeof(struct ipv6hdr)); | |
652 | } | |
653 | } | |
654 | ||
655 | static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, | |
656 | struct sk_buff *skb) | |
657 | { | |
658 | u8 cht = cqe->rss_hash_type; | |
659 | int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 : | |
660 | (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 : | |
661 | PKT_HASH_TYPE_NONE; | |
662 | skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht); | |
663 | } | |
664 | ||
bbceefce AS |
665 | static inline bool is_first_ethertype_ip(struct sk_buff *skb) |
666 | { | |
667 | __be16 ethertype = ((struct ethhdr *)skb->data)->h_proto; | |
668 | ||
669 | return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6)); | |
670 | } | |
671 | ||
672 | static inline void mlx5e_handle_csum(struct net_device *netdev, | |
673 | struct mlx5_cqe64 *cqe, | |
674 | struct mlx5e_rq *rq, | |
5f6d12d1 MF |
675 | struct sk_buff *skb, |
676 | bool lro) | |
bbceefce AS |
677 | { |
678 | if (unlikely(!(netdev->features & NETIF_F_RXCSUM))) | |
679 | goto csum_none; | |
680 | ||
5f6d12d1 | 681 | if (lro) { |
bbceefce | 682 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1b223dd3 SM |
683 | return; |
684 | } | |
685 | ||
686 | if (is_first_ethertype_ip(skb)) { | |
bbceefce | 687 | skb->ip_summed = CHECKSUM_COMPLETE; |
ecf842f6 | 688 | skb->csum = csum_unfold((__force __sum16)cqe->check_sum); |
bbceefce | 689 | rq->stats.csum_sw++; |
1b223dd3 | 690 | return; |
bbceefce AS |
691 | } |
692 | ||
1b223dd3 SM |
693 | if (likely((cqe->hds_ip_ext & CQE_L3_OK) && |
694 | (cqe->hds_ip_ext & CQE_L4_OK))) { | |
695 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
696 | if (cqe_is_tunneled(cqe)) { | |
697 | skb->csum_level = 1; | |
698 | skb->encapsulation = 1; | |
699 | rq->stats.csum_inner++; | |
700 | } | |
701 | return; | |
702 | } | |
bbceefce AS |
703 | csum_none: |
704 | skb->ip_summed = CHECKSUM_NONE; | |
705 | rq->stats.csum_none++; | |
706 | } | |
707 | ||
e586b3b0 | 708 | static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, |
461017cb | 709 | u32 cqe_bcnt, |
e586b3b0 AV |
710 | struct mlx5e_rq *rq, |
711 | struct sk_buff *skb) | |
712 | { | |
713 | struct net_device *netdev = rq->netdev; | |
ef9814de | 714 | struct mlx5e_tstamp *tstamp = rq->tstamp; |
e586b3b0 AV |
715 | int lro_num_seg; |
716 | ||
e586b3b0 AV |
717 | lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; |
718 | if (lro_num_seg > 1) { | |
461017cb | 719 | mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); |
d9a40271 | 720 | skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); |
e586b3b0 AV |
721 | rq->stats.lro_packets++; |
722 | rq->stats.lro_bytes += cqe_bcnt; | |
723 | } | |
724 | ||
ef9814de EBE |
725 | if (unlikely(mlx5e_rx_hw_stamp(tstamp))) |
726 | mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb)); | |
727 | ||
e586b3b0 AV |
728 | skb_record_rx_queue(skb, rq->ix); |
729 | ||
730 | if (likely(netdev->features & NETIF_F_RXHASH)) | |
731 | mlx5e_skb_set_hash(cqe, skb); | |
732 | ||
733 | if (cqe_has_vlan(cqe)) | |
734 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), | |
735 | be16_to_cpu(cqe->vlan_info)); | |
12185a9f AV |
736 | |
737 | skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK; | |
e20a0db3 SM |
738 | |
739 | mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg); | |
740 | skb->protocol = eth_type_trans(skb, netdev); | |
e586b3b0 AV |
741 | } |
742 | ||
461017cb TT |
743 | static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, |
744 | struct mlx5_cqe64 *cqe, | |
745 | u32 cqe_bcnt, | |
746 | struct sk_buff *skb) | |
747 | { | |
748 | rq->stats.packets++; | |
749 | rq->stats.bytes += cqe_bcnt; | |
750 | mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); | |
751 | napi_gro_receive(rq->cq.napi, skb); | |
752 | } | |
753 | ||
2f48af12 TT |
754 | void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) |
755 | { | |
756 | struct mlx5e_rx_wqe *wqe; | |
757 | struct sk_buff *skb; | |
758 | __be16 wqe_counter_be; | |
759 | u16 wqe_counter; | |
461017cb | 760 | u32 cqe_bcnt; |
2f48af12 TT |
761 | |
762 | wqe_counter_be = cqe->wqe_counter; | |
763 | wqe_counter = be16_to_cpu(wqe_counter_be); | |
764 | wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); | |
765 | skb = rq->skb[wqe_counter]; | |
766 | prefetch(skb->data); | |
767 | rq->skb[wqe_counter] = NULL; | |
768 | ||
769 | dma_unmap_single(rq->pdev, | |
770 | *((dma_addr_t *)skb->cb), | |
771 | rq->wqe_sz, | |
772 | DMA_FROM_DEVICE); | |
773 | ||
774 | if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { | |
775 | rq->stats.wqe_err++; | |
776 | dev_kfree_skb(skb); | |
777 | goto wq_ll_pop; | |
778 | } | |
779 | ||
461017cb TT |
780 | cqe_bcnt = be32_to_cpu(cqe->byte_cnt); |
781 | skb_put(skb, cqe_bcnt); | |
782 | ||
783 | mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); | |
2f48af12 TT |
784 | |
785 | wq_ll_pop: | |
786 | mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, | |
787 | &wqe->next.next_wqe_index); | |
788 | } | |
789 | ||
bc77b240 TT |
790 | static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq, |
791 | struct mlx5_cqe64 *cqe, | |
792 | struct mlx5e_mpw_info *wi, | |
793 | u32 cqe_bcnt, | |
794 | struct sk_buff *skb) | |
795 | { | |
796 | u32 consumed_bytes = ALIGN(cqe_bcnt, MLX5_MPWRQ_STRIDE_SIZE); | |
797 | u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); | |
798 | u32 wqe_offset = stride_ix * MLX5_MPWRQ_STRIDE_SIZE; | |
799 | u32 head_offset = wqe_offset & (PAGE_SIZE - 1); | |
800 | u32 page_idx = wqe_offset >> PAGE_SHIFT; | |
801 | u32 head_page_idx = page_idx; | |
802 | u16 headlen = min_t(u16, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, cqe_bcnt); | |
803 | u32 frag_offset = head_offset + headlen; | |
804 | u16 byte_cnt = cqe_bcnt - headlen; | |
805 | ||
806 | #if (MLX5_MPWRQ_SMALL_PACKET_THRESHOLD >= MLX5_MPWRQ_STRIDE_SIZE) | |
807 | if (unlikely(frag_offset >= PAGE_SIZE)) { | |
808 | page_idx++; | |
809 | frag_offset -= PAGE_SIZE; | |
810 | } | |
811 | #endif | |
812 | wi->dma_pre_sync(rq->pdev, wi, wqe_offset, consumed_bytes); | |
813 | ||
814 | while (byte_cnt) { | |
815 | u32 pg_consumed_bytes = | |
816 | min_t(u32, PAGE_SIZE - frag_offset, byte_cnt); | |
817 | ||
818 | wi->add_skb_frag(rq->pdev, skb, wi, page_idx, frag_offset, | |
819 | pg_consumed_bytes); | |
820 | byte_cnt -= pg_consumed_bytes; | |
821 | frag_offset = 0; | |
822 | page_idx++; | |
823 | } | |
824 | /* copy header */ | |
825 | wi->copy_skb_header(rq->pdev, skb, wi, head_page_idx, head_offset, | |
826 | headlen); | |
827 | /* skb linear part was allocated with headlen and aligned to long */ | |
828 | skb->tail += headlen; | |
829 | skb->len += headlen; | |
830 | } | |
831 | ||
461017cb TT |
832 | void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) |
833 | { | |
834 | u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); | |
461017cb TT |
835 | u16 wqe_id = be16_to_cpu(cqe->wqe_id); |
836 | struct mlx5e_mpw_info *wi = &rq->wqe_info[wqe_id]; | |
837 | struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id); | |
838 | struct sk_buff *skb; | |
461017cb | 839 | u16 cqe_bcnt; |
461017cb TT |
840 | |
841 | wi->consumed_strides += cstrides; | |
842 | ||
843 | if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { | |
844 | rq->stats.wqe_err++; | |
845 | goto mpwrq_cqe_out; | |
846 | } | |
847 | ||
848 | if (unlikely(mpwrq_is_filler_cqe(cqe))) { | |
849 | rq->stats.mpwqe_filler++; | |
850 | goto mpwrq_cqe_out; | |
851 | } | |
852 | ||
c5adb96f TT |
853 | skb = napi_alloc_skb(rq->cq.napi, |
854 | ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, | |
855 | sizeof(long))); | |
54984407 TT |
856 | if (unlikely(!skb)) { |
857 | rq->stats.buff_alloc_err++; | |
461017cb | 858 | goto mpwrq_cqe_out; |
54984407 | 859 | } |
461017cb TT |
860 | |
861 | prefetch(skb->data); | |
461017cb | 862 | cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); |
461017cb | 863 | |
bc77b240 | 864 | mlx5e_mpwqe_fill_rx_skb(rq, cqe, wi, cqe_bcnt, skb); |
461017cb TT |
865 | mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); |
866 | ||
867 | mpwrq_cqe_out: | |
868 | if (likely(wi->consumed_strides < MLX5_MPWRQ_NUM_STRIDES)) | |
869 | return; | |
870 | ||
bc77b240 | 871 | wi->free_wqe(rq, wi); |
461017cb TT |
872 | mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index); |
873 | } | |
874 | ||
44fb6fbb | 875 | int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) |
e586b3b0 | 876 | { |
e3391054 | 877 | struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); |
7219ab34 | 878 | int work_done = 0; |
e586b3b0 | 879 | |
7219ab34 TT |
880 | if (cq->decmprs_left) |
881 | work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget); | |
882 | ||
883 | for (; work_done < budget; work_done++) { | |
2f48af12 | 884 | struct mlx5_cqe64 *cqe = mlx5e_get_cqe(cq); |
e586b3b0 | 885 | |
e586b3b0 AV |
886 | if (!cqe) |
887 | break; | |
888 | ||
7219ab34 TT |
889 | if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) { |
890 | work_done += | |
891 | mlx5e_decompress_cqes_start(rq, cq, | |
892 | budget - work_done); | |
893 | continue; | |
894 | } | |
895 | ||
a1f5a1a8 AS |
896 | mlx5_cqwq_pop(&cq->wq); |
897 | ||
2f48af12 | 898 | rq->handle_rx_cqe(rq, cqe); |
e586b3b0 AV |
899 | } |
900 | ||
901 | mlx5_cqwq_update_db_record(&cq->wq); | |
902 | ||
903 | /* ensure cq space is freed before enabling more cqes */ | |
904 | wmb(); | |
905 | ||
44fb6fbb | 906 | return work_done; |
e586b3b0 | 907 | } |