dpdk/0487-af_xdp-support-tx-multi-buffer.patch

107 lines
3.3 KiB
Diff
Raw Permalink Normal View History

From 27093341fc1f24e540d6c0c2af79c4f5e4dd0d76 Mon Sep 17 00:00:00 2001
From: yangchen <yangchen145@huawei.com>
Date: Mon, 16 Dec 2024 11:36:32 +0800
Subject: [PATCH] af_xdp: support tx multi-buffer
---
drivers/net/af_xdp/rte_eth_af_xdp.c | 53 ++++++++++++++++++++---------
1 file changed, 37 insertions(+), 16 deletions(-)
diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
index 4254a5c..a927fd3 100644
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
@@ -532,7 +532,7 @@ pull_umem_cq(struct xsk_umem_info *umem, int size, struct xsk_ring_cons *cq)
addr = *xsk_ring_cons__comp_addr(cq, idx_cq++);
#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
addr = xsk_umem__extract_addr(addr);
- rte_pktmbuf_free((struct rte_mbuf *)
+ rte_pktmbuf_free_seg((struct rte_mbuf *)
xsk_umem__get_data(umem->buffer,
addr + umem->mb_pool->header_size));
#else
@@ -580,30 +580,50 @@ af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
uint64_t addr, offset;
struct xsk_ring_cons *cq = &txq->pair->cq;
uint32_t free_thresh = cq->size >> 1;
+ uint32_t mbuf_len;
+ uint32_t xsk_desc_count = 0;
+ struct rte_mbuf *cur_mbuf;
if (xsk_cons_nb_avail(cq, free_thresh) >= free_thresh)
pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq);
for (i = 0; i < nb_pkts; i++) {
mbuf = bufs[i];
+ cur_mbuf = mbuf;
+ mbuf_len = mbuf->pkt_len;
+
if (mbuf->pool == umem->mb_pool) {
- if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
+ uint32_t nb_reserve = mbuf->nb_segs;
+ uint32_t nb_desc = 0;
+
+ if (!xsk_ring_prod__reserve(&txq->tx, nb_reserve, &idx_tx)) {
kick_tx(txq, cq);
- if (!xsk_ring_prod__reserve(&txq->tx, 1,
- &idx_tx))
+ if (!xsk_ring_prod__reserve(&txq->tx, nb_reserve, &idx_tx))
goto out;
}
- desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
- desc->len = mbuf->pkt_len;
- addr = (uint64_t)mbuf - (uint64_t)umem->buffer -
- umem->mb_pool->header_size;
- offset = rte_pktmbuf_mtod(mbuf, uint64_t) -
- (uint64_t)mbuf +
- umem->mb_pool->header_size;
- offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
- desc->addr = addr | offset;
- count++;
+
+ do {
+ desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + nb_desc++);
+ addr = (uint64_t)cur_mbuf - (uint64_t)umem->buffer - umem->mb_pool->header_size;
+ offset = rte_pktmbuf_mtod(cur_mbuf, uint64_t) - (uint64_t)cur_mbuf + umem->mb_pool->header_size;
+ offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
+ desc->addr = addr | offset;
+
+ if (nb_desc != nb_reserve) {
+ desc->len = cur_mbuf->data_len;
+#if defined(XDP_PKT_CONTD)
+ desc->options = XDP_PKT_CONTD;
+#endif
+ } else {
+ desc->len = cur_mbuf->data_len;
+ desc->options = 0;
+ count++;
+ }
+
+ xsk_desc_count++;
+ cur_mbuf = cur_mbuf->next;
+ } while (cur_mbuf != NULL);
} else {
struct rte_mbuf *local_mbuf =
rte_pktmbuf_alloc(umem->mb_pool);
@@ -632,13 +652,14 @@ af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
desc->len);
rte_pktmbuf_free(mbuf);
count++;
+ xsk_desc_count++;
}
- tx_bytes += mbuf->pkt_len;
+ tx_bytes += mbuf_len;
}
out:
- xsk_ring_prod__submit(&txq->tx, count);
+ xsk_ring_prod__submit(&txq->tx, xsk_desc_count);
kick_tx(txq, cq);
txq->stats.tx_pkts += count;
--
2.33.0