Skip to content

Commit 7957837

Browse files
khalidm96Saeed Mahameed
authored andcommitted
net/mlx5e: Fix broken SKB allocation in HW-GRO
In case the HW doesn't perform header-data split, it will write the whole packet into the data buffer in the WQ, in this case the SHAMPO CQE handler couldn't use the header entry to build the SKB, instead it should allocate a new memory to build the SKB using the function: mlx5e_skb_from_cqe_mpwrq_nonlinear. Fixes: f97d5c2 ("net/mlx5e: Add handle SHAMPO cqe support") Signed-off-by: Khalid Manaa <[email protected]> Reviewed-by: Tariq Toukan <[email protected]> Signed-off-by: Saeed Mahameed <[email protected]>
1 parent b8d9114 commit 7957837

File tree

1 file changed

+17
-9
lines changed
  • drivers/net/ethernet/mellanox/mlx5/core

1 file changed

+17
-9
lines changed

drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

Lines changed: 17 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1871,7 +1871,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
18711871
return skb;
18721872
}
18731873

1874-
static void
1874+
static struct sk_buff *
18751875
mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
18761876
struct mlx5_cqe64 *cqe, u16 header_index)
18771877
{
@@ -1895,7 +1895,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
18951895
skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size);
18961896

18971897
if (unlikely(!skb))
1898-
return;
1898+
return NULL;
18991899

19001900
/* queue up for recycling/reuse */
19011901
page_ref_inc(head->page);
@@ -1907,7 +1907,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
19071907
ALIGN(head_size, sizeof(long)));
19081908
if (unlikely(!skb)) {
19091909
rq->stats->buff_alloc_err++;
1910-
return;
1910+
return NULL;
19111911
}
19121912

19131913
prefetchw(skb->data);
@@ -1918,9 +1918,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
19181918
skb->tail += head_size;
19191919
skb->len += head_size;
19201920
}
1921-
rq->hw_gro_data->skb = skb;
1922-
NAPI_GRO_CB(skb)->count = 1;
1923-
skb_shinfo(skb)->gso_size = mpwrq_get_cqe_byte_cnt(cqe) - head_size;
1921+
return skb;
19241922
}
19251923

19261924
static void
@@ -1980,6 +1978,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
19801978
u32 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
19811979
u16 wqe_id = be16_to_cpu(cqe->wqe_id);
19821980
u32 page_idx = wqe_offset >> PAGE_SHIFT;
1981+
u16 head_size = cqe->shampo.header_size;
19831982
struct sk_buff **skb = &rq->hw_gro_data->skb;
19841983
bool flush = cqe->shampo.flush;
19851984
bool match = cqe->shampo.match;
@@ -2011,9 +2010,16 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
20112010
}
20122011

20132012
if (!*skb) {
2014-
mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
2013+
if (likely(head_size))
2014+
*skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
2015+
else
2016+
*skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe_bcnt, data_offset,
2017+
page_idx);
20152018
if (unlikely(!*skb))
20162019
goto free_hd_entry;
2020+
2021+
NAPI_GRO_CB(*skb)->count = 1;
2022+
skb_shinfo(*skb)->gso_size = cqe_bcnt - head_size;
20172023
} else {
20182024
NAPI_GRO_CB(*skb)->count++;
20192025
if (NAPI_GRO_CB(*skb)->count == 2 &&
@@ -2027,8 +2033,10 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
20272033
}
20282034
}
20292035

2030-
di = &wi->umr.dma_info[page_idx];
2031-
mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset);
2036+
if (likely(head_size)) {
2037+
di = &wi->umr.dma_info[page_idx];
2038+
mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset);
2039+
}
20322040

20332041
mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
20342042
if (flush)

0 commit comments

Comments
 (0)