164 lines
4.4 KiB
Diff
164 lines
4.4 KiB
Diff
From af8d89a0c6d5cfd4d77ec1ec224e5a72996026df Mon Sep 17 00:00:00 2001
|
|
From: Ciara Loftus <ciara.loftus@intel.com>
|
|
Date: Thu, 9 Dec 2021 17:19:47 +0000
|
|
Subject: [PATCH] net/af_xdp: fix build with -Wunused-function
|
|
|
|
[ upstream commit af8d89a0c6d5cfd4d77ec1ec224e5a72996026df ]
|
|
|
|
The get_shared_umem function is only called when the kernel
|
|
flag XDP_UMEM_UNALIGNED_CHUNK_FLAG is defined. Move the
|
|
function implementation and associated helper so that it only
|
|
gets compiled when that flag is set.
|
|
|
|
Fixes: 74b46340e2d4 ("net/af_xdp: support shared UMEM")
|
|
Cc: stable@dpdk.org
|
|
|
|
Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
|
|
Acked-by: Ferruh Yigit <ferruh.yigit@intel.com>
|
|
---
|
|
drivers/net/af_xdp/rte_eth_af_xdp.c | 121 ++++++++++++++--------------
|
|
1 file changed, 60 insertions(+), 61 deletions(-)
|
|
|
|
diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
|
|
index 96c2c9d939..b3ed704b36 100644
|
|
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
|
|
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
|
|
@@ -697,67 +697,6 @@ find_internal_resource(struct pmd_internals *port_int)
|
|
return list;
|
|
}
|
|
|
|
-/* Check if the netdev,qid context already exists */
|
|
-static inline bool
|
|
-ctx_exists(struct pkt_rx_queue *rxq, const char *ifname,
|
|
- struct pkt_rx_queue *list_rxq, const char *list_ifname)
|
|
-{
|
|
- bool exists = false;
|
|
-
|
|
- if (rxq->xsk_queue_idx == list_rxq->xsk_queue_idx &&
|
|
- !strncmp(ifname, list_ifname, IFNAMSIZ)) {
|
|
- AF_XDP_LOG(ERR, "ctx %s,%i already exists, cannot share umem\n",
|
|
- ifname, rxq->xsk_queue_idx);
|
|
- exists = true;
|
|
- }
|
|
-
|
|
- return exists;
|
|
-}
|
|
-
|
|
-/* Get a pointer to an existing UMEM which overlays the rxq's mb_pool */
|
|
-static inline int
|
|
-get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname,
|
|
- struct xsk_umem_info **umem)
|
|
-{
|
|
- struct internal_list *list;
|
|
- struct pmd_internals *internals;
|
|
- int i = 0, ret = 0;
|
|
- struct rte_mempool *mb_pool = rxq->mb_pool;
|
|
-
|
|
- if (mb_pool == NULL)
|
|
- return ret;
|
|
-
|
|
- pthread_mutex_lock(&internal_list_lock);
|
|
-
|
|
- TAILQ_FOREACH(list, &internal_list, next) {
|
|
- internals = list->eth_dev->data->dev_private;
|
|
- for (i = 0; i < internals->queue_cnt; i++) {
|
|
- struct pkt_rx_queue *list_rxq =
|
|
- &internals->rx_queues[i];
|
|
- if (rxq == list_rxq)
|
|
- continue;
|
|
- if (mb_pool == internals->rx_queues[i].mb_pool) {
|
|
- if (ctx_exists(rxq, ifname, list_rxq,
|
|
- internals->if_name)) {
|
|
- ret = -1;
|
|
- goto out;
|
|
- }
|
|
- if (__atomic_load_n(
|
|
- &internals->rx_queues[i].umem->refcnt,
|
|
- __ATOMIC_ACQUIRE)) {
|
|
- *umem = internals->rx_queues[i].umem;
|
|
- goto out;
|
|
- }
|
|
- }
|
|
- }
|
|
- }
|
|
-
|
|
-out:
|
|
- pthread_mutex_unlock(&internal_list_lock);
|
|
-
|
|
- return ret;
|
|
-}
|
|
-
|
|
static int
|
|
eth_dev_configure(struct rte_eth_dev *dev)
|
|
{
|
|
@@ -1013,6 +952,66 @@ static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align)
|
|
return aligned_addr;
|
|
}
|
|
|
|
+/* Check if the netdev,qid context already exists */
|
|
+static inline bool
|
|
+ctx_exists(struct pkt_rx_queue *rxq, const char *ifname,
|
|
+ struct pkt_rx_queue *list_rxq, const char *list_ifname)
|
|
+{
|
|
+ bool exists = false;
|
|
+
|
|
+ if (rxq->xsk_queue_idx == list_rxq->xsk_queue_idx &&
|
|
+ !strncmp(ifname, list_ifname, IFNAMSIZ)) {
|
|
+ AF_XDP_LOG(ERR, "ctx %s,%i already exists, cannot share umem\n",
|
|
+ ifname, rxq->xsk_queue_idx);
|
|
+ exists = true;
|
|
+ }
|
|
+
|
|
+ return exists;
|
|
+}
|
|
+
|
|
+/* Get a pointer to an existing UMEM which overlays the rxq's mb_pool */
|
|
+static inline int
|
|
+get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname,
|
|
+ struct xsk_umem_info **umem)
|
|
+{
|
|
+ struct internal_list *list;
|
|
+ struct pmd_internals *internals;
|
|
+ int i = 0, ret = 0;
|
|
+ struct rte_mempool *mb_pool = rxq->mb_pool;
|
|
+
|
|
+ if (mb_pool == NULL)
|
|
+ return ret;
|
|
+
|
|
+ pthread_mutex_lock(&internal_list_lock);
|
|
+
|
|
+ TAILQ_FOREACH(list, &internal_list, next) {
|
|
+ internals = list->eth_dev->data->dev_private;
|
|
+ for (i = 0; i < internals->queue_cnt; i++) {
|
|
+ struct pkt_rx_queue *list_rxq =
|
|
+ &internals->rx_queues[i];
|
|
+ if (rxq == list_rxq)
|
|
+ continue;
|
|
+ if (mb_pool == internals->rx_queues[i].mb_pool) {
|
|
+ if (ctx_exists(rxq, ifname, list_rxq,
|
|
+ internals->if_name)) {
|
|
+ ret = -1;
|
|
+ goto out;
|
|
+ }
|
|
+ if (__atomic_load_n(&internals->rx_queues[i].umem->refcnt,
|
|
+ __ATOMIC_ACQUIRE)) {
|
|
+ *umem = internals->rx_queues[i].umem;
|
|
+ goto out;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+out:
|
|
+ pthread_mutex_unlock(&internal_list_lock);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
static struct
|
|
xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
|
|
struct pkt_rx_queue *rxq)
|
|
--
|
|
2.33.0
|
|
|