| 1 | /* |
| 2 | * Copyright (c) 2015, Mellanox Technologies. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | */ |
| 32 | |
| 33 | #include <linux/ip.h> |
| 34 | #include <linux/ipv6.h> |
| 35 | #include <linux/tcp.h> |
| 36 | #include <net/busy_poll.h> |
| 37 | #include "en.h" |
| 38 | #include "en_tc.h" |
| 39 | |
| 40 | static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp) |
| 41 | { |
| 42 | return tstamp->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL; |
| 43 | } |
| 44 | |
| 45 | int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) |
| 46 | { |
| 47 | struct sk_buff *skb; |
| 48 | dma_addr_t dma_addr; |
| 49 | |
| 50 | skb = napi_alloc_skb(rq->cq.napi, rq->wqe_sz); |
| 51 | if (unlikely(!skb)) |
| 52 | return -ENOMEM; |
| 53 | |
| 54 | dma_addr = dma_map_single(rq->pdev, |
| 55 | /* hw start padding */ |
| 56 | skb->data, |
| 57 | /* hw end padding */ |
| 58 | rq->wqe_sz, |
| 59 | DMA_FROM_DEVICE); |
| 60 | |
| 61 | if (unlikely(dma_mapping_error(rq->pdev, dma_addr))) |
| 62 | goto err_free_skb; |
| 63 | |
| 64 | *((dma_addr_t *)skb->cb) = dma_addr; |
| 65 | wqe->data.addr = cpu_to_be64(dma_addr); |
| 66 | wqe->data.lkey = rq->mkey_be; |
| 67 | |
| 68 | rq->skb[ix] = skb; |
| 69 | |
| 70 | return 0; |
| 71 | |
| 72 | err_free_skb: |
| 73 | dev_kfree_skb(skb); |
| 74 | |
| 75 | return -ENOMEM; |
| 76 | } |
| 77 | |
| 78 | static inline void |
| 79 | mlx5e_dma_pre_sync_linear_mpwqe(struct device *pdev, |
| 80 | struct mlx5e_mpw_info *wi, |
| 81 | u32 wqe_offset, u32 len) |
| 82 | { |
| 83 | dma_sync_single_for_cpu(pdev, wi->dma_info.addr + wqe_offset, |
| 84 | len, DMA_FROM_DEVICE); |
| 85 | } |
| 86 | |
| 87 | static inline void |
| 88 | mlx5e_dma_pre_sync_fragmented_mpwqe(struct device *pdev, |
| 89 | struct mlx5e_mpw_info *wi, |
| 90 | u32 wqe_offset, u32 len) |
| 91 | { |
| 92 | /* No dma pre sync for fragmented MPWQE */ |
| 93 | } |
| 94 | |
| 95 | static inline void |
| 96 | mlx5e_add_skb_frag_linear_mpwqe(struct device *pdev, |
| 97 | struct sk_buff *skb, |
| 98 | struct mlx5e_mpw_info *wi, |
| 99 | u32 page_idx, u32 frag_offset, |
| 100 | u32 len) |
| 101 | { |
| 102 | unsigned int truesize = ALIGN(len, MLX5_MPWRQ_STRIDE_SIZE); |
| 103 | |
| 104 | wi->skbs_frags[page_idx]++; |
| 105 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, |
| 106 | &wi->dma_info.page[page_idx], frag_offset, |
| 107 | len, truesize); |
| 108 | } |
| 109 | |
| 110 | static inline void |
| 111 | mlx5e_add_skb_frag_fragmented_mpwqe(struct device *pdev, |
| 112 | struct sk_buff *skb, |
| 113 | struct mlx5e_mpw_info *wi, |
| 114 | u32 page_idx, u32 frag_offset, |
| 115 | u32 len) |
| 116 | { |
| 117 | unsigned int truesize = ALIGN(len, MLX5_MPWRQ_STRIDE_SIZE); |
| 118 | |
| 119 | dma_sync_single_for_cpu(pdev, |
| 120 | wi->umr.dma_info[page_idx].addr + frag_offset, |
| 121 | len, DMA_FROM_DEVICE); |
| 122 | wi->skbs_frags[page_idx]++; |
| 123 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, |
| 124 | wi->umr.dma_info[page_idx].page, frag_offset, |
| 125 | len, truesize); |
| 126 | } |
| 127 | |
| 128 | static inline void |
| 129 | mlx5e_copy_skb_header_linear_mpwqe(struct device *pdev, |
| 130 | struct sk_buff *skb, |
| 131 | struct mlx5e_mpw_info *wi, |
| 132 | u32 page_idx, u32 offset, |
| 133 | u32 headlen) |
| 134 | { |
| 135 | struct page *page = &wi->dma_info.page[page_idx]; |
| 136 | |
| 137 | skb_copy_to_linear_data(skb, page_address(page) + offset, |
| 138 | ALIGN(headlen, sizeof(long))); |
| 139 | } |
| 140 | |
| 141 | static inline void |
| 142 | mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev, |
| 143 | struct sk_buff *skb, |
| 144 | struct mlx5e_mpw_info *wi, |
| 145 | u32 page_idx, u32 offset, |
| 146 | u32 headlen) |
| 147 | { |
| 148 | u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset); |
| 149 | struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[page_idx]; |
| 150 | unsigned int len; |
| 151 | |
| 152 | /* Aligning len to sizeof(long) optimizes memcpy performance */ |
| 153 | len = ALIGN(headlen_pg, sizeof(long)); |
| 154 | dma_sync_single_for_cpu(pdev, dma_info->addr + offset, len, |
| 155 | DMA_FROM_DEVICE); |
| 156 | skb_copy_to_linear_data_offset(skb, 0, |
| 157 | page_address(dma_info->page) + offset, |
| 158 | len); |
| 159 | #if (MLX5_MPWRQ_SMALL_PACKET_THRESHOLD >= MLX5_MPWRQ_STRIDE_SIZE) |
| 160 | if (unlikely(offset + headlen > PAGE_SIZE)) { |
| 161 | dma_info++; |
| 162 | headlen_pg = len; |
| 163 | len = ALIGN(headlen - headlen_pg, sizeof(long)); |
| 164 | dma_sync_single_for_cpu(pdev, dma_info->addr, len, |
| 165 | DMA_FROM_DEVICE); |
| 166 | skb_copy_to_linear_data_offset(skb, headlen_pg, |
| 167 | page_address(dma_info->page), |
| 168 | len); |
| 169 | } |
| 170 | #endif |
| 171 | } |
| 172 | |
| 173 | static u16 mlx5e_get_wqe_mtt_offset(u16 rq_ix, u16 wqe_ix) |
| 174 | { |
| 175 | return rq_ix * MLX5_CHANNEL_MAX_NUM_MTTS + |
| 176 | wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8); |
| 177 | } |
| 178 | |
| 179 | static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, |
| 180 | struct mlx5e_sq *sq, |
| 181 | struct mlx5e_umr_wqe *wqe, |
| 182 | u16 ix) |
| 183 | { |
| 184 | struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; |
| 185 | struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl; |
| 186 | struct mlx5_wqe_data_seg *dseg = &wqe->data; |
| 187 | struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; |
| 188 | u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS); |
| 189 | u16 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix); |
| 190 | |
| 191 | memset(wqe, 0, sizeof(*wqe)); |
| 192 | cseg->opmod_idx_opcode = |
| 193 | cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | |
| 194 | MLX5_OPCODE_UMR); |
| 195 | cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | |
| 196 | ds_cnt); |
| 197 | cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; |
| 198 | cseg->imm = rq->umr_mkey_be; |
| 199 | |
| 200 | ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN; |
| 201 | ucseg->klm_octowords = |
| 202 | cpu_to_be16(mlx5e_get_mtt_octw(MLX5_MPWRQ_PAGES_PER_WQE)); |
| 203 | ucseg->bsf_octowords = |
| 204 | cpu_to_be16(mlx5e_get_mtt_octw(umr_wqe_mtt_offset)); |
| 205 | ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); |
| 206 | |
| 207 | dseg->lkey = sq->mkey_be; |
| 208 | dseg->addr = cpu_to_be64(wi->umr.mtt_addr); |
| 209 | } |
| 210 | |
| 211 | static void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix) |
| 212 | { |
| 213 | struct mlx5e_sq *sq = &rq->channel->icosq; |
| 214 | struct mlx5_wq_cyc *wq = &sq->wq; |
| 215 | struct mlx5e_umr_wqe *wqe; |
| 216 | u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB); |
| 217 | u16 pi; |
| 218 | |
| 219 | /* fill sq edge with nops to avoid wqe wrap around */ |
| 220 | while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { |
| 221 | sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP; |
| 222 | sq->ico_wqe_info[pi].num_wqebbs = 1; |
| 223 | mlx5e_send_nop(sq, true); |
| 224 | } |
| 225 | |
| 226 | wqe = mlx5_wq_cyc_get_wqe(wq, pi); |
| 227 | mlx5e_build_umr_wqe(rq, sq, wqe, ix); |
| 228 | sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_UMR; |
| 229 | sq->ico_wqe_info[pi].num_wqebbs = num_wqebbs; |
| 230 | sq->pc += num_wqebbs; |
| 231 | mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0); |
| 232 | } |
| 233 | |
| 234 | static inline int mlx5e_get_wqe_mtt_sz(void) |
| 235 | { |
| 236 | /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes. |
| 237 | * To avoid copying garbage after the mtt array, we allocate |
| 238 | * a little more. |
| 239 | */ |
| 240 | return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64), |
| 241 | MLX5_UMR_MTT_ALIGNMENT); |
| 242 | } |
| 243 | |
| 244 | static int mlx5e_alloc_and_map_page(struct mlx5e_rq *rq, |
| 245 | struct mlx5e_mpw_info *wi, |
| 246 | int i) |
| 247 | { |
| 248 | struct page *page; |
| 249 | |
| 250 | page = dev_alloc_page(); |
| 251 | if (unlikely(!page)) |
| 252 | return -ENOMEM; |
| 253 | |
| 254 | wi->umr.dma_info[i].page = page; |
| 255 | wi->umr.dma_info[i].addr = dma_map_page(rq->pdev, page, 0, PAGE_SIZE, |
| 256 | PCI_DMA_FROMDEVICE); |
| 257 | if (unlikely(dma_mapping_error(rq->pdev, wi->umr.dma_info[i].addr))) { |
| 258 | put_page(page); |
| 259 | return -ENOMEM; |
| 260 | } |
| 261 | wi->umr.mtt[i] = cpu_to_be64(wi->umr.dma_info[i].addr | MLX5_EN_WR); |
| 262 | |
| 263 | return 0; |
| 264 | } |
| 265 | |
| 266 | static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq, |
| 267 | struct mlx5e_rx_wqe *wqe, |
| 268 | u16 ix) |
| 269 | { |
| 270 | struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; |
| 271 | int mtt_sz = mlx5e_get_wqe_mtt_sz(); |
| 272 | u32 dma_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix) << PAGE_SHIFT; |
| 273 | int i; |
| 274 | |
| 275 | wi->umr.dma_info = kmalloc(sizeof(*wi->umr.dma_info) * |
| 276 | MLX5_MPWRQ_PAGES_PER_WQE, |
| 277 | GFP_ATOMIC); |
| 278 | if (unlikely(!wi->umr.dma_info)) |
| 279 | goto err_out; |
| 280 | |
| 281 | /* We allocate more than mtt_sz as we will align the pointer */ |
| 282 | wi->umr.mtt_no_align = kzalloc(mtt_sz + MLX5_UMR_ALIGN - 1, |
| 283 | GFP_ATOMIC); |
| 284 | if (unlikely(!wi->umr.mtt_no_align)) |
| 285 | goto err_free_umr; |
| 286 | |
| 287 | wi->umr.mtt = PTR_ALIGN(wi->umr.mtt_no_align, MLX5_UMR_ALIGN); |
| 288 | wi->umr.mtt_addr = dma_map_single(rq->pdev, wi->umr.mtt, mtt_sz, |
| 289 | PCI_DMA_TODEVICE); |
| 290 | if (unlikely(dma_mapping_error(rq->pdev, wi->umr.mtt_addr))) |
| 291 | goto err_free_mtt; |
| 292 | |
| 293 | for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { |
| 294 | if (unlikely(mlx5e_alloc_and_map_page(rq, wi, i))) |
| 295 | goto err_unmap; |
| 296 | atomic_add(MLX5_MPWRQ_STRIDES_PER_PAGE, |
| 297 | &wi->umr.dma_info[i].page->_count); |
| 298 | wi->skbs_frags[i] = 0; |
| 299 | } |
| 300 | |
| 301 | wi->consumed_strides = 0; |
| 302 | wi->dma_pre_sync = mlx5e_dma_pre_sync_fragmented_mpwqe; |
| 303 | wi->add_skb_frag = mlx5e_add_skb_frag_fragmented_mpwqe; |
| 304 | wi->copy_skb_header = mlx5e_copy_skb_header_fragmented_mpwqe; |
| 305 | wi->free_wqe = mlx5e_free_rx_fragmented_mpwqe; |
| 306 | wqe->data.lkey = rq->umr_mkey_be; |
| 307 | wqe->data.addr = cpu_to_be64(dma_offset); |
| 308 | |
| 309 | return 0; |
| 310 | |
| 311 | err_unmap: |
| 312 | while (--i >= 0) { |
| 313 | dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE, |
| 314 | PCI_DMA_FROMDEVICE); |
| 315 | atomic_sub(MLX5_MPWRQ_STRIDES_PER_PAGE, |
| 316 | &wi->umr.dma_info[i].page->_count); |
| 317 | put_page(wi->umr.dma_info[i].page); |
| 318 | } |
| 319 | dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE); |
| 320 | |
| 321 | err_free_mtt: |
| 322 | kfree(wi->umr.mtt_no_align); |
| 323 | |
| 324 | err_free_umr: |
| 325 | kfree(wi->umr.dma_info); |
| 326 | |
| 327 | err_out: |
| 328 | return -ENOMEM; |
| 329 | } |
| 330 | |
| 331 | void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq, |
| 332 | struct mlx5e_mpw_info *wi) |
| 333 | { |
| 334 | int mtt_sz = mlx5e_get_wqe_mtt_sz(); |
| 335 | int i; |
| 336 | |
| 337 | for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { |
| 338 | dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE, |
| 339 | PCI_DMA_FROMDEVICE); |
| 340 | atomic_sub(MLX5_MPWRQ_STRIDES_PER_PAGE - wi->skbs_frags[i], |
| 341 | &wi->umr.dma_info[i].page->_count); |
| 342 | put_page(wi->umr.dma_info[i].page); |
| 343 | } |
| 344 | dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE); |
| 345 | kfree(wi->umr.mtt_no_align); |
| 346 | kfree(wi->umr.dma_info); |
| 347 | } |
| 348 | |
| 349 | void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq) |
| 350 | { |
| 351 | struct mlx5_wq_ll *wq = &rq->wq; |
| 352 | struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); |
| 353 | |
| 354 | clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state); |
| 355 | mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); |
| 356 | rq->stats.mpwqe_frag++; |
| 357 | |
| 358 | /* ensure wqes are visible to device before updating doorbell record */ |
| 359 | dma_wmb(); |
| 360 | |
| 361 | mlx5_wq_ll_update_db_record(wq); |
| 362 | } |
| 363 | |
| 364 | static int mlx5e_alloc_rx_linear_mpwqe(struct mlx5e_rq *rq, |
| 365 | struct mlx5e_rx_wqe *wqe, |
| 366 | u16 ix) |
| 367 | { |
| 368 | struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; |
| 369 | gfp_t gfp_mask; |
| 370 | int i; |
| 371 | |
| 372 | gfp_mask = GFP_ATOMIC | __GFP_COLD | __GFP_MEMALLOC; |
| 373 | wi->dma_info.page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, |
| 374 | MLX5_MPWRQ_WQE_PAGE_ORDER); |
| 375 | if (unlikely(!wi->dma_info.page)) |
| 376 | return -ENOMEM; |
| 377 | |
| 378 | wi->dma_info.addr = dma_map_page(rq->pdev, wi->dma_info.page, 0, |
| 379 | rq->wqe_sz, PCI_DMA_FROMDEVICE); |
| 380 | if (unlikely(dma_mapping_error(rq->pdev, wi->dma_info.addr))) { |
| 381 | put_page(wi->dma_info.page); |
| 382 | return -ENOMEM; |
| 383 | } |
| 384 | |
| 385 | /* We split the high-order page into order-0 ones and manage their |
| 386 | * reference counter to minimize the memory held by small skb fragments |
| 387 | */ |
| 388 | split_page(wi->dma_info.page, MLX5_MPWRQ_WQE_PAGE_ORDER); |
| 389 | for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { |
| 390 | atomic_add(MLX5_MPWRQ_STRIDES_PER_PAGE, |
| 391 | &wi->dma_info.page[i]._count); |
| 392 | wi->skbs_frags[i] = 0; |
| 393 | } |
| 394 | |
| 395 | wi->consumed_strides = 0; |
| 396 | wi->dma_pre_sync = mlx5e_dma_pre_sync_linear_mpwqe; |
| 397 | wi->add_skb_frag = mlx5e_add_skb_frag_linear_mpwqe; |
| 398 | wi->copy_skb_header = mlx5e_copy_skb_header_linear_mpwqe; |
| 399 | wi->free_wqe = mlx5e_free_rx_linear_mpwqe; |
| 400 | wqe->data.lkey = rq->mkey_be; |
| 401 | wqe->data.addr = cpu_to_be64(wi->dma_info.addr); |
| 402 | |
| 403 | return 0; |
| 404 | } |
| 405 | |
| 406 | void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq, |
| 407 | struct mlx5e_mpw_info *wi) |
| 408 | { |
| 409 | int i; |
| 410 | |
| 411 | dma_unmap_page(rq->pdev, wi->dma_info.addr, rq->wqe_sz, |
| 412 | PCI_DMA_FROMDEVICE); |
| 413 | for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { |
| 414 | atomic_sub(MLX5_MPWRQ_STRIDES_PER_PAGE - wi->skbs_frags[i], |
| 415 | &wi->dma_info.page[i]._count); |
| 416 | put_page(&wi->dma_info.page[i]); |
| 417 | } |
| 418 | } |
| 419 | |
| 420 | int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) |
| 421 | { |
| 422 | int err; |
| 423 | |
| 424 | err = mlx5e_alloc_rx_linear_mpwqe(rq, wqe, ix); |
| 425 | if (unlikely(err)) { |
| 426 | err = mlx5e_alloc_rx_fragmented_mpwqe(rq, wqe, ix); |
| 427 | if (unlikely(err)) |
| 428 | return err; |
| 429 | set_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state); |
| 430 | mlx5e_post_umr_wqe(rq, ix); |
| 431 | return -EBUSY; |
| 432 | } |
| 433 | |
| 434 | return 0; |
| 435 | } |
| 436 | |
| 437 | #define RQ_CANNOT_POST(rq) \ |
| 438 | (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \ |
| 439 | test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state)) |
| 440 | |
| 441 | bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) |
| 442 | { |
| 443 | struct mlx5_wq_ll *wq = &rq->wq; |
| 444 | |
| 445 | if (unlikely(RQ_CANNOT_POST(rq))) |
| 446 | return false; |
| 447 | |
| 448 | while (!mlx5_wq_ll_is_full(wq)) { |
| 449 | struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); |
| 450 | |
| 451 | if (unlikely(rq->alloc_wqe(rq, wqe, wq->head))) |
| 452 | break; |
| 453 | |
| 454 | mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); |
| 455 | } |
| 456 | |
| 457 | /* ensure wqes are visible to device before updating doorbell record */ |
| 458 | dma_wmb(); |
| 459 | |
| 460 | mlx5_wq_ll_update_db_record(wq); |
| 461 | |
| 462 | return !mlx5_wq_ll_is_full(wq); |
| 463 | } |
| 464 | |
| 465 | static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, |
| 466 | u32 cqe_bcnt) |
| 467 | { |
| 468 | struct ethhdr *eth = (struct ethhdr *)(skb->data); |
| 469 | struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN); |
| 470 | struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + ETH_HLEN); |
| 471 | struct tcphdr *tcp; |
| 472 | |
| 473 | u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); |
| 474 | int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) || |
| 475 | (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type)); |
| 476 | |
| 477 | u16 tot_len = cqe_bcnt - ETH_HLEN; |
| 478 | |
| 479 | if (eth->h_proto == htons(ETH_P_IP)) { |
| 480 | tcp = (struct tcphdr *)(skb->data + ETH_HLEN + |
| 481 | sizeof(struct iphdr)); |
| 482 | ipv6 = NULL; |
| 483 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; |
| 484 | } else { |
| 485 | tcp = (struct tcphdr *)(skb->data + ETH_HLEN + |
| 486 | sizeof(struct ipv6hdr)); |
| 487 | ipv4 = NULL; |
| 488 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; |
| 489 | } |
| 490 | |
| 491 | if (get_cqe_lro_tcppsh(cqe)) |
| 492 | tcp->psh = 1; |
| 493 | |
| 494 | if (tcp_ack) { |
| 495 | tcp->ack = 1; |
| 496 | tcp->ack_seq = cqe->lro_ack_seq_num; |
| 497 | tcp->window = cqe->lro_tcp_win; |
| 498 | } |
| 499 | |
| 500 | if (ipv4) { |
| 501 | ipv4->ttl = cqe->lro_min_ttl; |
| 502 | ipv4->tot_len = cpu_to_be16(tot_len); |
| 503 | ipv4->check = 0; |
| 504 | ipv4->check = ip_fast_csum((unsigned char *)ipv4, |
| 505 | ipv4->ihl); |
| 506 | } else { |
| 507 | ipv6->hop_limit = cqe->lro_min_ttl; |
| 508 | ipv6->payload_len = cpu_to_be16(tot_len - |
| 509 | sizeof(struct ipv6hdr)); |
| 510 | } |
| 511 | } |
| 512 | |
| 513 | static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, |
| 514 | struct sk_buff *skb) |
| 515 | { |
| 516 | u8 cht = cqe->rss_hash_type; |
| 517 | int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 : |
| 518 | (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 : |
| 519 | PKT_HASH_TYPE_NONE; |
| 520 | skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht); |
| 521 | } |
| 522 | |
| 523 | static inline bool is_first_ethertype_ip(struct sk_buff *skb) |
| 524 | { |
| 525 | __be16 ethertype = ((struct ethhdr *)skb->data)->h_proto; |
| 526 | |
| 527 | return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6)); |
| 528 | } |
| 529 | |
| 530 | static inline void mlx5e_handle_csum(struct net_device *netdev, |
| 531 | struct mlx5_cqe64 *cqe, |
| 532 | struct mlx5e_rq *rq, |
| 533 | struct sk_buff *skb, |
| 534 | bool lro) |
| 535 | { |
| 536 | if (unlikely(!(netdev->features & NETIF_F_RXCSUM))) |
| 537 | goto csum_none; |
| 538 | |
| 539 | if (lro) { |
| 540 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 541 | } else if (likely(is_first_ethertype_ip(skb))) { |
| 542 | skb->ip_summed = CHECKSUM_COMPLETE; |
| 543 | skb->csum = csum_unfold((__force __sum16)cqe->check_sum); |
| 544 | rq->stats.csum_sw++; |
| 545 | } else { |
| 546 | goto csum_none; |
| 547 | } |
| 548 | |
| 549 | return; |
| 550 | |
| 551 | csum_none: |
| 552 | skb->ip_summed = CHECKSUM_NONE; |
| 553 | rq->stats.csum_none++; |
| 554 | } |
| 555 | |
| 556 | static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, |
| 557 | u32 cqe_bcnt, |
| 558 | struct mlx5e_rq *rq, |
| 559 | struct sk_buff *skb) |
| 560 | { |
| 561 | struct net_device *netdev = rq->netdev; |
| 562 | struct mlx5e_tstamp *tstamp = rq->tstamp; |
| 563 | int lro_num_seg; |
| 564 | |
| 565 | lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; |
| 566 | if (lro_num_seg > 1) { |
| 567 | mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); |
| 568 | skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); |
| 569 | rq->stats.lro_packets++; |
| 570 | rq->stats.lro_bytes += cqe_bcnt; |
| 571 | } |
| 572 | |
| 573 | if (unlikely(mlx5e_rx_hw_stamp(tstamp))) |
| 574 | mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb)); |
| 575 | |
| 576 | skb_record_rx_queue(skb, rq->ix); |
| 577 | |
| 578 | if (likely(netdev->features & NETIF_F_RXHASH)) |
| 579 | mlx5e_skb_set_hash(cqe, skb); |
| 580 | |
| 581 | if (cqe_has_vlan(cqe)) |
| 582 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), |
| 583 | be16_to_cpu(cqe->vlan_info)); |
| 584 | |
| 585 | skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK; |
| 586 | |
| 587 | mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg); |
| 588 | skb->protocol = eth_type_trans(skb, netdev); |
| 589 | } |
| 590 | |
| 591 | static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, |
| 592 | struct mlx5_cqe64 *cqe, |
| 593 | u32 cqe_bcnt, |
| 594 | struct sk_buff *skb) |
| 595 | { |
| 596 | rq->stats.packets++; |
| 597 | rq->stats.bytes += cqe_bcnt; |
| 598 | mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); |
| 599 | napi_gro_receive(rq->cq.napi, skb); |
| 600 | } |
| 601 | |
| 602 | void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) |
| 603 | { |
| 604 | struct mlx5e_rx_wqe *wqe; |
| 605 | struct sk_buff *skb; |
| 606 | __be16 wqe_counter_be; |
| 607 | u16 wqe_counter; |
| 608 | u32 cqe_bcnt; |
| 609 | |
| 610 | wqe_counter_be = cqe->wqe_counter; |
| 611 | wqe_counter = be16_to_cpu(wqe_counter_be); |
| 612 | wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); |
| 613 | skb = rq->skb[wqe_counter]; |
| 614 | prefetch(skb->data); |
| 615 | rq->skb[wqe_counter] = NULL; |
| 616 | |
| 617 | dma_unmap_single(rq->pdev, |
| 618 | *((dma_addr_t *)skb->cb), |
| 619 | rq->wqe_sz, |
| 620 | DMA_FROM_DEVICE); |
| 621 | |
| 622 | if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { |
| 623 | rq->stats.wqe_err++; |
| 624 | dev_kfree_skb(skb); |
| 625 | goto wq_ll_pop; |
| 626 | } |
| 627 | |
| 628 | cqe_bcnt = be32_to_cpu(cqe->byte_cnt); |
| 629 | skb_put(skb, cqe_bcnt); |
| 630 | |
| 631 | mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); |
| 632 | |
| 633 | wq_ll_pop: |
| 634 | mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, |
| 635 | &wqe->next.next_wqe_index); |
| 636 | } |
| 637 | |
| 638 | static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq, |
| 639 | struct mlx5_cqe64 *cqe, |
| 640 | struct mlx5e_mpw_info *wi, |
| 641 | u32 cqe_bcnt, |
| 642 | struct sk_buff *skb) |
| 643 | { |
| 644 | u32 consumed_bytes = ALIGN(cqe_bcnt, MLX5_MPWRQ_STRIDE_SIZE); |
| 645 | u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); |
| 646 | u32 wqe_offset = stride_ix * MLX5_MPWRQ_STRIDE_SIZE; |
| 647 | u32 head_offset = wqe_offset & (PAGE_SIZE - 1); |
| 648 | u32 page_idx = wqe_offset >> PAGE_SHIFT; |
| 649 | u32 head_page_idx = page_idx; |
| 650 | u16 headlen = min_t(u16, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, cqe_bcnt); |
| 651 | u32 frag_offset = head_offset + headlen; |
| 652 | u16 byte_cnt = cqe_bcnt - headlen; |
| 653 | |
| 654 | #if (MLX5_MPWRQ_SMALL_PACKET_THRESHOLD >= MLX5_MPWRQ_STRIDE_SIZE) |
| 655 | if (unlikely(frag_offset >= PAGE_SIZE)) { |
| 656 | page_idx++; |
| 657 | frag_offset -= PAGE_SIZE; |
| 658 | } |
| 659 | #endif |
| 660 | wi->dma_pre_sync(rq->pdev, wi, wqe_offset, consumed_bytes); |
| 661 | |
| 662 | while (byte_cnt) { |
| 663 | u32 pg_consumed_bytes = |
| 664 | min_t(u32, PAGE_SIZE - frag_offset, byte_cnt); |
| 665 | |
| 666 | wi->add_skb_frag(rq->pdev, skb, wi, page_idx, frag_offset, |
| 667 | pg_consumed_bytes); |
| 668 | byte_cnt -= pg_consumed_bytes; |
| 669 | frag_offset = 0; |
| 670 | page_idx++; |
| 671 | } |
| 672 | /* copy header */ |
| 673 | wi->copy_skb_header(rq->pdev, skb, wi, head_page_idx, head_offset, |
| 674 | headlen); |
| 675 | /* skb linear part was allocated with headlen and aligned to long */ |
| 676 | skb->tail += headlen; |
| 677 | skb->len += headlen; |
| 678 | } |
| 679 | |
| 680 | void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) |
| 681 | { |
| 682 | u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); |
| 683 | u16 wqe_id = be16_to_cpu(cqe->wqe_id); |
| 684 | struct mlx5e_mpw_info *wi = &rq->wqe_info[wqe_id]; |
| 685 | struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id); |
| 686 | struct sk_buff *skb; |
| 687 | u16 cqe_bcnt; |
| 688 | |
| 689 | wi->consumed_strides += cstrides; |
| 690 | |
| 691 | if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { |
| 692 | rq->stats.wqe_err++; |
| 693 | goto mpwrq_cqe_out; |
| 694 | } |
| 695 | |
| 696 | if (unlikely(mpwrq_is_filler_cqe(cqe))) { |
| 697 | rq->stats.mpwqe_filler++; |
| 698 | goto mpwrq_cqe_out; |
| 699 | } |
| 700 | |
| 701 | skb = napi_alloc_skb(rq->cq.napi, |
| 702 | ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, |
| 703 | sizeof(long))); |
| 704 | if (unlikely(!skb)) |
| 705 | goto mpwrq_cqe_out; |
| 706 | |
| 707 | prefetch(skb->data); |
| 708 | cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); |
| 709 | |
| 710 | mlx5e_mpwqe_fill_rx_skb(rq, cqe, wi, cqe_bcnt, skb); |
| 711 | mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); |
| 712 | |
| 713 | mpwrq_cqe_out: |
| 714 | if (likely(wi->consumed_strides < MLX5_MPWRQ_NUM_STRIDES)) |
| 715 | return; |
| 716 | |
| 717 | wi->free_wqe(rq, wi); |
| 718 | mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index); |
| 719 | } |
| 720 | |
| 721 | int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) |
| 722 | { |
| 723 | struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); |
| 724 | int work_done; |
| 725 | |
| 726 | for (work_done = 0; work_done < budget; work_done++) { |
| 727 | struct mlx5_cqe64 *cqe = mlx5e_get_cqe(cq); |
| 728 | |
| 729 | if (!cqe) |
| 730 | break; |
| 731 | |
| 732 | mlx5_cqwq_pop(&cq->wq); |
| 733 | |
| 734 | rq->handle_rx_cqe(rq, cqe); |
| 735 | } |
| 736 | |
| 737 | mlx5_cqwq_update_db_record(&cq->wq); |
| 738 | |
| 739 | /* ensure cq space is freed before enabling more cqes */ |
| 740 | wmb(); |
| 741 | |
| 742 | return work_done; |
| 743 | } |