dpdk/0488-af_xdp-support-tx-metadata.patch
jiangheng ede9327d26 af_xdp: support tx metadata
(cherry picked from commit 582e798db5d500bf6cc1542f0070196aac08c46e)
2025-01-22 16:56:43 +08:00

118 lines
3.8 KiB
Diff

From 074ef818772c3770b980f0da0ce500fff452c130 Mon Sep 17 00:00:00 2001
From: jiangheng <jiangheng14@huawei.com>
Date: Sat, 18 Jan 2025 17:19:20 +0800
Subject: [PATCH] af_xdp: support tx metadata
---
drivers/net/af_xdp/rte_eth_af_xdp.c | 62 +++++++++++++++++++++++++++++
1 file changed, 62 insertions(+)
diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
index a927fd3..3a71a8c 100644
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
@@ -39,6 +39,9 @@
#include <rte_spinlock.h>
#include <rte_power_intrinsics.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+
#include "compat.h"
#include "eal_filesystem.h"
@@ -102,6 +105,12 @@ RTE_LOG_REGISTER_DEFAULT(af_xdp_logtype, NOTICE);
#define UDS_FIN_MSG "/fin"
#define UDS_FIN_ACK_MSG "/fin_ack"
+#if defined(XDP_TX_METADATA)
+#define IPV4_TCP_CHECKSUM_OFFLOAD(flag) (((flag) & (RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_IPV4)) == \
+ (RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_IPV4))
+#define IS_MULTI_BUFFER(flag) (((flag) & RTE_MBUF_F_TX_TCP_SEG) == RTE_MBUF_F_TX_TCP_SEG)
+#endif
+
static int afxdp_dev_count;
/* Message header to synchronize fds via IPC */
@@ -309,6 +318,38 @@ reserve_fill_queue(struct xsk_umem_info *umem, uint16_t reserve_size,
}
#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
+#if defined(XDP_TX_METADATA)
+/* only support tcpv4 currently */
+static void xdp_metadata_checksum_set(struct rte_mbuf *mbuf)
+{
+ void *buff_addr = rte_pktmbuf_mtod(mbuf, char *);
+ struct xsk_tx_metadata *meta = RTE_PTR_SUB(buff_addr, sizeof(struct xsk_tx_metadata));
+ struct rte_ipv4_hdr *ipv4_hdr = RTE_PTR_ADD(buff_addr, mbuf->l2_len);
+ struct rte_tcp_hdr *tcp_hdr = RTE_PTR_ADD(buff_addr, mbuf->l2_len + mbuf->l3_len);
+
+ meta->flags |= XDP_TXMD_FLAGS_CHECKSUM;
+ meta->request.csum_offset = offsetof(struct rte_tcp_hdr, cksum);
+ meta->request.csum_start = mbuf->l2_len + mbuf->l3_len;
+
+ tcp_hdr->cksum = ~rte_ipv4_phdr_cksum(ipv4_hdr, 0);
+}
+
+static void xdp_metadata_tso_set(struct rte_mbuf *mbuf)
+{
+ void *buff_addr = rte_pktmbuf_mtod(mbuf, char *);
+ struct xsk_tx_metadata *meta = RTE_PTR_SUB(buff_addr, sizeof(struct xsk_tx_metadata));
+
+ meta->flags |= XDP_TXMD_FLAGS_TSO;
+ meta->gso.gso_size = mbuf->tso_segsz;
+#define SKB_GSO_TCPV4 (1 << 0)
+ meta->gso.gso_type = SKB_GSO_TCPV4;
+ meta->gso.gso_segs = mbuf->pkt_len / meta->gso.gso_size;
+ if (mbuf->pkt_len % meta->gso.gso_size != 0) {
+ meta->gso.gso_segs += 1;
+ }
+}
+#endif
+
static uint16_t
af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
@@ -583,6 +624,9 @@ af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
uint32_t mbuf_len;
uint32_t xsk_desc_count = 0;
struct rte_mbuf *cur_mbuf;
+#if defined(XDP_TX_METADATA)
+ struct rte_mbuf *first_mbuf = NULL;
+#endif
if (xsk_cons_nb_avail(cq, free_thresh) >= free_thresh)
pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq);
@@ -620,10 +664,28 @@ af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
desc->options = 0;
count++;
}
+#if defined(XDP_TX_METADATA)
+ if (first_mbuf == NULL) {
+ first_mbuf = cur_mbuf;
+ desc->options |= XDP_TX_METADATA;
+ struct xsk_tx_metadata *meta = RTE_PTR_SUB(rte_pktmbuf_mtod(first_mbuf, char *),
+ sizeof(struct xsk_tx_metadata));
+ meta->flags = 0;
+ if (IPV4_TCP_CHECKSUM_OFFLOAD(first_mbuf->ol_flags)) {
+ xdp_metadata_checksum_set(first_mbuf);
+ }
+ if (IS_MULTI_BUFFER(first_mbuf->ol_flags)) {
+ xdp_metadata_tso_set(first_mbuf);
+ }
+ }
+#endif
xsk_desc_count++;
cur_mbuf = cur_mbuf->next;
} while (cur_mbuf != NULL);
+#if defined(XDP_TX_METADATA)
+ first_mbuf = NULL;
+#endif
} else {
struct rte_mbuf *local_mbuf =
rte_pktmbuf_alloc(umem->mb_pool);
--
2.33.0