225 lines
6.2 KiB
Diff
225 lines
6.2 KiB
Diff
From 0829966e018a4db6655618bd0b4cd24479dca5d0 Mon Sep 17 00:00:00 2001
|
|
From: jiangheng <jiangheng14@huawei.com>
|
|
Date: Fri, 27 Sep 2024 15:56:35 +0800
|
|
Subject: [PATCH] net/af_xdp: add interrupt support
|
|
|
|
---
|
|
drivers/net/af_xdp/rte_eth_af_xdp.c | 138 +++++++++++++++++++++++++++-
|
|
1 file changed, 133 insertions(+), 5 deletions(-)
|
|
|
|
diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
|
|
index e79a308..9ae3ee9 100644
|
|
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
|
|
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
|
|
@@ -334,7 +334,9 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
|
|
(void)recvfrom(xsk_socket__fd(rxq->xsk), NULL, 0,
|
|
MSG_DONTWAIT, NULL, NULL);
|
|
} else if (xsk_ring_prod__needs_wakeup(fq)) {
|
|
- (void)poll(&rxq->fds[0], 1, 1000);
|
|
+ if (rxq->fds[0].fd != 0) {
|
|
+ (void)poll(&rxq->fds[0], 1, 1000);
|
|
+ }
|
|
}
|
|
|
|
return 0;
|
|
@@ -409,8 +411,11 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
|
|
nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
|
|
if (nb_pkts == 0) {
|
|
#if defined(XDP_USE_NEED_WAKEUP)
|
|
- if (xsk_ring_prod__needs_wakeup(fq))
|
|
- (void)poll(rxq->fds, 1, 1000);
|
|
+ if (xsk_ring_prod__needs_wakeup(fq)) {
|
|
+ if (rxq->fds[0].fd != 0) {
|
|
+ (void)poll(rxq->fds, 1, 1000);
|
|
+ }
|
|
+ }
|
|
#endif
|
|
return 0;
|
|
}
|
|
@@ -765,6 +770,51 @@ find_internal_resource(struct pmd_internals *port_int)
|
|
return list;
|
|
}
|
|
|
|
+static int xdp_queues_bind_intr(struct rte_eth_dev *dev)
|
|
+{
|
|
+ uint32_t i;
|
|
+
|
|
+ for (i = 0; i < dev->data->nb_rx_queues; ++i) {
|
|
+ if (rte_intr_vec_list_index_set(dev->intr_handle, i, i + 1)) {
|
|
+ return -rte_errno;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int xdp_configure_intr(struct rte_eth_dev *dev)
|
|
+{
|
|
+ struct pmd_internals *internals = dev->data->dev_private;
|
|
+ int ret;
|
|
+
|
|
+ ret = rte_intr_efd_enable(dev->intr_handle, dev->data->nb_rx_queues);
|
|
+ if (ret < 0) {
|
|
+ AF_XDP_LOG(ERR, "Failed to enable intr efd\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = rte_intr_vec_list_alloc(dev->intr_handle, "intr_vec", internals->max_queue_cnt);
|
|
+ if (ret < 0) {
|
|
+ AF_XDP_LOG(ERR, "Failed to allocate %u rxq vectors\n", internals->max_queue_cnt);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = rte_intr_enable(dev->intr_handle);
|
|
+ if (ret < 0) {
|
|
+ AF_XDP_LOG(ERR, "Failed to enable interrupt\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = xdp_queues_bind_intr(dev);
|
|
+ if (ret < 0) {
|
|
+ AF_XDP_LOG(ERR, "Failed to bind queue/interrupt\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static int
|
|
eth_dev_configure(struct rte_eth_dev *dev)
|
|
{
|
|
@@ -774,6 +824,13 @@ eth_dev_configure(struct rte_eth_dev *dev)
|
|
if (dev->data->nb_rx_queues != dev->data->nb_tx_queues)
|
|
return -EINVAL;
|
|
|
|
+ if (dev->data->dev_conf.intr_conf.rxq) {
|
|
+ if (xdp_configure_intr(dev) < 0) {
|
|
+ AF_XDP_LOG(ERR, "Failed to configure interrupt\n");
|
|
+ return -1;
|
|
+ }
|
|
+ }
|
|
+
|
|
if (internal->shared_umem) {
|
|
struct internal_list *list = NULL;
|
|
const char *name = dev->device->name;
|
|
@@ -1823,8 +1880,17 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
|
|
if (!rxq->busy_budget)
|
|
AF_XDP_LOG(DEBUG, "Preferred busy polling not enabled\n");
|
|
|
|
- rxq->fds[0].fd = xsk_socket__fd(rxq->xsk);
|
|
- rxq->fds[0].events = POLLIN;
|
|
+ if (dev->data->dev_conf.intr_conf.rxq) {
|
|
+ if (rte_intr_efds_index_set(dev->intr_handle, rx_queue_id, xsk_socket__fd(rxq->xsk))) {
|
|
+ AF_XDP_LOG(ERR, "Failed to set intr efds, queue id: %d\n", rx_queue_id);
|
|
+ ret = -rte_errno;
|
|
+ goto err;
|
|
+ }
|
|
+ rxq->fds[0].fd = 0;
|
|
+ } else {
|
|
+ rxq->fds[0].fd = xsk_socket__fd(rxq->xsk);
|
|
+ rxq->fds[0].events = POLLIN;
|
|
+ }
|
|
|
|
process_private->rxq_xsk_fds[rx_queue_id] = rxq->fds[0].fd;
|
|
|
|
@@ -1915,6 +1981,18 @@ eth_dev_promiscuous_disable(struct rte_eth_dev *dev)
|
|
return eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC);
|
|
}
|
|
|
|
+static int
|
|
+eth_dev_rx_queue_intr_enable(__rte_unused struct rte_eth_dev *dev, __rte_unused uint16_t queue_id)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int
|
|
+eth_dev_rx_queue_intr_disable(__rte_unused struct rte_eth_dev *dev, __rte_unused uint16_t queue_id)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static const struct eth_dev_ops ops = {
|
|
.dev_start = eth_dev_start,
|
|
.dev_stop = eth_dev_stop,
|
|
@@ -1930,6 +2008,8 @@ static const struct eth_dev_ops ops = {
|
|
.stats_get = eth_stats_get,
|
|
.stats_reset = eth_stats_reset,
|
|
.get_monitor_addr = eth_get_monitor_addr,
|
|
+ .rx_queue_intr_enable = eth_dev_rx_queue_intr_enable,
|
|
+ .rx_queue_intr_disable = eth_dev_rx_queue_intr_disable,
|
|
};
|
|
|
|
/* AF_XDP Device Plugin option works in unprivileged
|
|
@@ -1951,6 +2031,8 @@ static const struct eth_dev_ops ops_afxdp_dp = {
|
|
.stats_get = eth_stats_get,
|
|
.stats_reset = eth_stats_reset,
|
|
.get_monitor_addr = eth_get_monitor_addr,
|
|
+ .rx_queue_intr_enable = eth_dev_rx_queue_intr_enable,
|
|
+ .rx_queue_intr_disable = eth_dev_rx_queue_intr_disable,
|
|
};
|
|
|
|
/** parse busy_budget argument */
|
|
@@ -2166,6 +2248,47 @@ get_iface_info(const char *if_name,
|
|
return -1;
|
|
}
|
|
|
|
+static int xdp_fill_intr_handle(struct rte_eth_dev *eth_dev)
|
|
+{
|
|
+ struct pmd_internals *internals = eth_dev->data->dev_private;
|
|
+ if (eth_dev->intr_handle == NULL) {
|
|
+ eth_dev->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE);
|
|
+ if (eth_dev->intr_handle == NULL) {
|
|
+ AF_XDP_LOG(ERR, "Failed to allocate intr_handle\n");
|
|
+ return -1;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ for (int i = 0; i < internals->queue_cnt; ++i) {
|
|
+ if (rte_intr_efds_index_set(eth_dev->intr_handle, i, -1)) {
|
|
+ return -rte_errno;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (rte_intr_nb_efd_set(eth_dev->intr_handle, internals->queue_cnt)) {
|
|
+ return -rte_errno;
|
|
+ }
|
|
+
|
|
+ if (rte_intr_max_intr_set(eth_dev->intr_handle, internals->queue_cnt + 1)) {
|
|
+ return -rte_errno;
|
|
+ }
|
|
+
|
|
+ if (rte_intr_type_set(eth_dev->intr_handle, RTE_INTR_HANDLE_VDEV)) {
|
|
+ return -rte_errno;
|
|
+ }
|
|
+
|
|
+ /* For xdp vdev, no need to read counter for clean */
|
|
+ if (rte_intr_efd_counter_size_set(eth_dev->intr_handle, 0)) {
|
|
+ return -rte_errno;
|
|
+ }
|
|
+
|
|
+ if (rte_intr_fd_set(eth_dev->intr_handle, -1)) {
|
|
+ return -rte_errno;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static struct rte_eth_dev *
|
|
init_internals(struct rte_vdev_device *dev, const char *if_name,
|
|
int start_queue_idx, int queue_cnt, int shared_umem,
|
|
@@ -2522,6 +2645,11 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
|
|
}
|
|
afxdp_dev_count++;
|
|
|
|
+ if (xdp_fill_intr_handle(eth_dev) < 0) {
|
|
+ AF_XDP_LOG(ERR, "Failed to init interrupt handler\n");
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
rte_eth_dev_probing_finish(eth_dev);
|
|
|
|
return 0;
|
|
--
|
|
2.33.0
|
|
|