!1010 [sync] PR-1007: QEMU update to version to 6.2.0-100

From: @openeuler-sync-bot 
Reviewed-by: @imxcc 
Signed-off-by: @imxcc
This commit is contained in:
openeuler-ci-bot 2024-10-14 13:52:36 +00:00 committed by Gitee
commit 167d42c4f2
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
5 changed files with 833 additions and 1 deletions

View File

@ -0,0 +1,71 @@
From da2996d0ea1769719830993991f8d0cedb21ccf1 Mon Sep 17 00:00:00 2001
From: Mattias Nissler <mnissler@rivosinc.com>
Date: Mon, 16 Sep 2024 10:57:08 -0700
Subject: [PATCH] mac_dbdma: Remove leftover `dma_memory_unmap`
calls(CVE-2024-8612)
cherry-pick from 2d0a071e625d7234e8c5623b7e7bf445e1bef72c
These were passing a NULL buffer pointer unconditionally, which happens
to behave in a mostly benign way (except for the chance of an excess
memory region unref and a bounce buffer leak). Per the function comment,
this was never meant to be accepted though, and triggers an assertion
with the "softmmu: Support concurrent bounce buffers" change.
Given that the code in question never sets up any mappings, just remove
the unnecessary dma_memory_unmap calls along with the DBDMA_io struct
fields that are now entirely unused.
Signed-off-by: Mattias Nissler <mnissler@rivosinc.com>
Message-Id: <20240916175708.1829059-1-mnissler@rivosinc.com>
Fixes: be1e343995 ("macio: switch over to new byte-aligned DMA helpers")
Reviewed-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
Tested-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
---
hw/ide/macio.c | 6 ------
include/hw/ppc/mac_dbdma.h | 4 ----
2 files changed, 10 deletions(-)
diff --git a/hw/ide/macio.c b/hw/ide/macio.c
index f08318cf97..f23ce15459 100644
--- a/hw/ide/macio.c
+++ b/hw/ide/macio.c
@@ -119,9 +119,6 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
return;
done:
- dma_memory_unmap(&address_space_memory, io->dma_mem, io->dma_len,
- io->dir, io->dma_len);
-
if (ret < 0) {
block_acct_failed(blk_get_stats(s->blk), &s->acct);
} else {
@@ -202,9 +199,6 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
return;
done:
- dma_memory_unmap(&address_space_memory, io->dma_mem, io->dma_len,
- io->dir, io->dma_len);
-
if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
if (ret < 0) {
block_acct_failed(blk_get_stats(s->blk), &s->acct);
diff --git a/include/hw/ppc/mac_dbdma.h b/include/hw/ppc/mac_dbdma.h
index 4a3f644516..c774f6bf84 100644
--- a/include/hw/ppc/mac_dbdma.h
+++ b/include/hw/ppc/mac_dbdma.h
@@ -44,10 +44,6 @@ struct DBDMA_io {
DBDMA_end dma_end;
/* DMA is in progress, don't start another one */
bool processing;
- /* DMA request */
- void *dma_mem;
- dma_addr_t dma_len;
- DMADirection dir;
};
/*
--
2.45.1.windows.1

View File

@ -3,7 +3,7 @@
Name: qemu
Version: 6.2.0
Release: 99
Release: 100
Epoch: 10
Summary: QEMU is a generic and open source machine emulator and virtualizer
License: GPLv2 and BSD and MIT and CC-BY-SA-4.0
@ -1044,6 +1044,11 @@ Patch1029: target-arm-Fix-alignment-for-VLD4.32.patch
Patch1030: hw-net-lan9118-Signal-TSFL_INT-flag-when-TX-FIFO-rea.patch
Patch1031: qtest-fuzz-lsi53c895a-test-set-guest-RAM-to-2G.patch
Patch1032: target-i386-Introduce-SapphireRapids-v3-to-add-missi.patch
Patch1033: system-physmem-Propagate-AddressSpace-to-MapClient-h.patch
Patch1034: system-physmem-Per-AddressSpace-bounce-buffering.patch
Patch1035: softmmu-Support-concurrent-bounce-buffers-CVE-2024-8.patch
Patch1036: mac_dbdma-Remove-leftover-dma_memory_unmap-calls-CVE.patch
BuildRequires: flex
BuildRequires: gcc
@ -1642,6 +1647,12 @@ getent passwd qemu >/dev/null || \
%endif
%changelog
* Mon Oct 14 2024 <fengjiabo1@huawei.com> - 10:6.2.0-100
- mac_dbdma: Remove leftover `dma_memory_unmap` calls(CVE-2024-8612)
- softmmu: Support concurrent bounce buffers(CVE-2024-8612)
- system/physmem: Per-AddressSpace bounce buffering
- system/physmem: Propagate AddressSpace to MapClient helpers
* Wed Sep 18 2024 <fengjiabo1@huawei.com> - 10:6.2.0-99
- target/i386: Introduce SapphireRapids-v3 to add missing features
- qtest/fuzz-lsi53c895a-test: set guest RAM to 2G

View File

@ -0,0 +1,284 @@
From 409eea48ae96cce3fea6c42339805be180c86da2 Mon Sep 17 00:00:00 2001
From: Mattias Nissler <mnissler@rivosinc.com>
Date: Mon, 19 Aug 2024 06:54:54 -0700
Subject: [PATCH] softmmu: Support concurrent bounce buffers(CVE-2024-8612)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
cherry-pick from 637b0aa139565cb82a7b9269e62214f87082635c
When DMA memory can't be directly accessed, as is the case when
running the device model in a separate process without shareable DMA
file descriptors, bounce buffering is used.
It is not uncommon for device models to request mapping of several DMA
regions at the same time. Examples include:
* net devices, e.g. when transmitting a packet that is split across
several TX descriptors (observed with igb)
* USB host controllers, when handling a packet with multiple data TRBs
(observed with xhci)
Previously, qemu only provided a single bounce buffer per AddressSpace
and would fail DMA map requests while the buffer was already in use. In
turn, this would cause DMA failures that ultimately manifest as hardware
errors from the guest perspective.
This change allocates DMA bounce buffers dynamically instead of
supporting only a single buffer. Thus, multiple DMA mappings work
correctly also when RAM can't be mmap()-ed.
The total bounce buffer allocation size is limited individually for each
AddressSpace. The default limit is 4096 bytes, matching the previous
maximum buffer size. A new x-max-bounce-buffer-size parameter is
provided to configure the limit for PCI devices.
Signed-off-by: Mattias Nissler <mnissler@rivosinc.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Acked-by: Peter Xu <peterx@redhat.com>
Link: https://lore.kernel.org/r/20240819135455.2957406-1-mnissler@rivosinc.com
Signed-off-by: Peter Xu <peterx@redhat.com>
---
hw/pci/pci.c | 8 +++++
include/exec/memory.h | 14 +++-----
include/hw/pci/pci.h | 3 ++
softmmu/memory.c | 5 +--
softmmu/physmem.c | 77 ++++++++++++++++++++++++++++++-------------
5 files changed, 74 insertions(+), 33 deletions(-)
diff --git a/hw/pci/pci.c b/hw/pci/pci.c
index 9ea67dba31..df58028b4c 100644
--- a/hw/pci/pci.c
+++ b/hw/pci/pci.c
@@ -79,6 +79,8 @@ static Property pci_props[] = {
DEFINE_PROP_STRING("failover_pair_id", PCIDevice,
failover_pair_id),
DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0),
+ DEFINE_PROP_SIZE32("x-max-bounce-buffer-size", PCIDevice,
+ max_bounce_buffer_size, DEFAULT_MAX_BOUNCE_BUFFER_SIZE),
DEFINE_PROP_END_OF_LIST()
};
@@ -1108,6 +1110,8 @@ static PCIDevice *do_pci_register_device(PCIDevice *pci_dev,
"bus master container", UINT64_MAX);
address_space_init(&pci_dev->bus_master_as,
&pci_dev->bus_master_container_region, pci_dev->name);
+ pci_dev->bus_master_as.max_bounce_buffer_size =
+ pci_dev->max_bounce_buffer_size;
if (phase_check(PHASE_MACHINE_READY)) {
pci_init_bus_master(pci_dev);
@@ -2696,6 +2700,10 @@ static void pci_device_class_init(ObjectClass *klass, void *data)
k->unrealize = pci_qdev_unrealize;
k->bus_type = TYPE_PCI_BUS;
device_class_set_props(k, pci_props);
+ object_class_property_set_description(
+ klass, "x-max-bounce-buffer-size",
+ "Maximum buffer size allocated for bounce buffers used for mapped "
+ "access to indirect DMA memory");
}
static void pci_device_class_base_init(ObjectClass *klass, void *data)
diff --git a/include/exec/memory.h b/include/exec/memory.h
index f6bde44ecf..2444e0f39d 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -1040,13 +1040,7 @@ typedef struct AddressSpaceMapClient {
QLIST_ENTRY(AddressSpaceMapClient) link;
} AddressSpaceMapClient;
-typedef struct {
- MemoryRegion *mr;
- void *buffer;
- hwaddr addr;
- hwaddr len;
- bool in_use;
-} BounceBuffer;
+#define DEFAULT_MAX_BOUNCE_BUFFER_SIZE (4096)
/**
* struct AddressSpace: describes a mapping of addresses to #MemoryRegion objects
@@ -1065,8 +1059,10 @@ struct AddressSpace {
QTAILQ_HEAD(, MemoryListener) listeners;
QTAILQ_ENTRY(AddressSpace) address_spaces_link;
- /* Bounce buffer to use for this address space. */
- BounceBuffer bounce;
+ /* Maximum DMA bounce buffer size used for indirect memory map requests */
+ size_t max_bounce_buffer_size;
+ /* Total size of bounce buffers currently allocated, atomically accessed */
+ size_t bounce_buffer_size;
/* List of callbacks to invoke when buffers free up */
QemuMutex map_client_list_lock;
QLIST_HEAD(, AddressSpaceMapClient) map_client_list;
diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
index 483d5c7c72..458126a18c 100644
--- a/include/hw/pci/pci.h
+++ b/include/hw/pci/pci.h
@@ -361,6 +361,9 @@ struct PCIDevice {
/* ID of standby device in net_failover pair */
char *failover_pair_id;
uint32_t acpi_index;
+
+ /* Maximum DMA bounce buffer size used for indirect memory map requests */
+ uint32_t max_bounce_buffer_size;
};
void pci_register_bar(PCIDevice *pci_dev, int region_num,
diff --git a/softmmu/memory.c b/softmmu/memory.c
index 3ab6a58902..0bb49da904 100644
--- a/softmmu/memory.c
+++ b/softmmu/memory.c
@@ -2946,7 +2946,8 @@ void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
as->ioeventfds = NULL;
QTAILQ_INIT(&as->listeners);
QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
- as->bounce.in_use = false;
+ as->max_bounce_buffer_size = DEFAULT_MAX_BOUNCE_BUFFER_SIZE;
+ as->bounce_buffer_size = 0;
qemu_mutex_init(&as->map_client_list_lock);
QLIST_INIT(&as->map_client_list);
as->name = g_strdup(name ? name : "anonymous");
@@ -2956,7 +2957,7 @@ void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
static void do_address_space_destroy(AddressSpace *as)
{
- assert(!qatomic_read(&as->bounce.in_use));
+ assert(qatomic_read(&as->bounce_buffer_size) == 0);
assert(QLIST_EMPTY(&as->map_client_list));
qemu_mutex_destroy(&as->map_client_list_lock);
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
index 9905d55c67..45d290cde5 100644
--- a/softmmu/physmem.c
+++ b/softmmu/physmem.c
@@ -3081,6 +3081,20 @@ void cpu_flush_icache_range(hwaddr start, hwaddr len)
NULL, len, FLUSH_CACHE);
}
+/*
+ * A magic value stored in the first 8 bytes of the bounce buffer struct. Used
+ * to detect illegal pointers passed to address_space_unmap.
+ */
+#define BOUNCE_BUFFER_MAGIC 0xb4017ceb4ffe12ed
+
+typedef struct {
+ uint64_t magic;
+ MemoryRegion *mr;
+ hwaddr addr;
+ size_t len;
+ uint8_t buffer[];
+} BounceBuffer;
+
static void
address_space_unregister_map_client_do(AddressSpaceMapClient *client)
{
@@ -3106,7 +3120,7 @@ void address_space_register_map_client(AddressSpace *as, QEMUBH *bh)
qemu_mutex_lock(&as->map_client_list_lock);
client->bh = bh;
QLIST_INSERT_HEAD(&as->map_client_list, client, link);
- if (!qatomic_read(&as->bounce.in_use)) {
+ if (qatomic_read(&as->bounce_buffer_size) < as->max_bounce_buffer_size) {
address_space_notify_map_clients_locked(as);
}
qemu_mutex_unlock(&as->map_client_list_lock);
@@ -3237,28 +3251,40 @@ void *address_space_map(AddressSpace *as,
mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs);
if (!memory_access_is_direct(mr, is_write)) {
- if (qatomic_xchg(&as->bounce.in_use, true)) {
+ size_t used = qatomic_read(&as->bounce_buffer_size);
+ for (;;) {
+ hwaddr alloc = MIN(as->max_bounce_buffer_size - used, l);
+ size_t new_size = used + alloc;
+ size_t actual =
+ qatomic_cmpxchg(&as->bounce_buffer_size, used, new_size);
+ if (actual == used) {
+ l = alloc;
+ break;
+ }
+ used = actual;
+ }
+
+ if (l == 0) {
*plen = 0;
return NULL;
}
- /* Avoid unbounded allocations */
- l = MIN(l, TARGET_PAGE_SIZE);
- as->bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
- as->bounce.addr = addr;
- as->bounce.len = l;
+ BounceBuffer *bounce = g_malloc0(l + sizeof(BounceBuffer));
+ bounce->magic = BOUNCE_BUFFER_MAGIC;
memory_region_ref(mr);
- as->bounce.mr = mr;
+ bounce->mr = mr;
+ bounce->addr = addr;
+ bounce->len = l;
+
if (!is_write) {
flatview_read(fv, addr, MEMTXATTRS_UNSPECIFIED,
- as->bounce.buffer, l);
+ bounce->buffer, l);
}
*plen = l;
- return as->bounce.buffer;
+ return bounce->buffer;
}
-
memory_region_ref(mr);
*plen = flatview_extend_translation(fv, addr, len, mr, xlat,
l, is_write, attrs);
@@ -3275,12 +3301,11 @@ void *address_space_map(AddressSpace *as,
void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
bool is_write, hwaddr access_len)
{
- if (buffer != as->bounce.buffer) {
- MemoryRegion *mr;
- ram_addr_t addr1;
+ MemoryRegion *mr;
+ ram_addr_t addr1;
- mr = memory_region_from_host(buffer, &addr1);
- assert(mr != NULL);
+ mr = memory_region_from_host(buffer, &addr1);
+ if (mr != NULL) {
if (is_write) {
invalidate_and_set_dirty(mr, addr1, access_len);
}
@@ -3290,14 +3315,22 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
memory_region_unref(mr);
return;
}
+
+
+ BounceBuffer *bounce = container_of(buffer, BounceBuffer, buffer);
+ assert(bounce->magic == BOUNCE_BUFFER_MAGIC);
+
if (is_write) {
- address_space_write(as, as->bounce.addr, MEMTXATTRS_UNSPECIFIED,
- as->bounce.buffer, access_len);
+ address_space_write(as, bounce->addr, MEMTXATTRS_UNSPECIFIED,
+ bounce->buffer, access_len);
}
- qemu_vfree(as->bounce.buffer);
- as->bounce.buffer = NULL;
- memory_region_unref(as->bounce.mr);
- qatomic_mb_set(&as->bounce.in_use, false);
+
+ qatomic_sub(&as->bounce_buffer_size, bounce->len);
+ bounce->magic = ~BOUNCE_BUFFER_MAGIC;
+ memory_region_unref(bounce->mr);
+ g_free(bounce);
+ /* Write bounce_buffer_size before reading map_client_list. */
+ smp_mb();
address_space_notify_map_clients(as);
}
--
2.45.1.windows.1

View File

@ -0,0 +1,261 @@
From dcab52cf7a81db32d744aae81888a3ed1d3a9efa Mon Sep 17 00:00:00 2001
From: Mattias Nissler <mnissler@rivosinc.com>
Date: Thu, 7 Sep 2023 06:04:23 -0700
Subject: [PATCH] system/physmem: Per-AddressSpace bounce buffering
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
cherry-pick 69e78f1b3484e429274352a464a94fa1d78be339
Instead of using a single global bounce buffer, give each AddressSpace
its own bounce buffer. The MapClient callback mechanism moves to
AddressSpace accordingly.
This is in preparation for generalizing bounce buffer handling further
to allow multiple bounce buffers, with a total allocation limit
configured per AddressSpace.
Reviewed-by: Peter Xu <peterx@redhat.com>
Tested-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Mattias Nissler <mnissler@rivosinc.com>
Message-ID: <20240507094210.300566-2-mnissler@rivosinc.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
[PMD: Split patch, part 2/2]
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
---
include/exec/memory.h | 19 +++++++++++
softmmu/memory.c | 7 ++++
softmmu/physmem.c | 79 ++++++++++++++++---------------------------
3 files changed, 56 insertions(+), 49 deletions(-)
diff --git a/include/exec/memory.h b/include/exec/memory.h
index 859e3da182..f6bde44ecf 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -1035,6 +1035,19 @@ struct MemoryListener {
QTAILQ_ENTRY(MemoryListener) link_as;
};
+typedef struct AddressSpaceMapClient {
+ QEMUBH *bh;
+ QLIST_ENTRY(AddressSpaceMapClient) link;
+} AddressSpaceMapClient;
+
+typedef struct {
+ MemoryRegion *mr;
+ void *buffer;
+ hwaddr addr;
+ hwaddr len;
+ bool in_use;
+} BounceBuffer;
+
/**
* struct AddressSpace: describes a mapping of addresses to #MemoryRegion objects
*/
@@ -1051,6 +1064,12 @@ struct AddressSpace {
struct MemoryRegionIoeventfd *ioeventfds;
QTAILQ_HEAD(, MemoryListener) listeners;
QTAILQ_ENTRY(AddressSpace) address_spaces_link;
+
+ /* Bounce buffer to use for this address space. */
+ BounceBuffer bounce;
+ /* List of callbacks to invoke when buffers free up */
+ QemuMutex map_client_list_lock;
+ QLIST_HEAD(, AddressSpaceMapClient) map_client_list;
};
typedef struct AddressSpaceDispatch AddressSpaceDispatch;
diff --git a/softmmu/memory.c b/softmmu/memory.c
index 0bf37f11aa..3ab6a58902 100644
--- a/softmmu/memory.c
+++ b/softmmu/memory.c
@@ -2946,6 +2946,9 @@ void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
as->ioeventfds = NULL;
QTAILQ_INIT(&as->listeners);
QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
+ as->bounce.in_use = false;
+ qemu_mutex_init(&as->map_client_list_lock);
+ QLIST_INIT(&as->map_client_list);
as->name = g_strdup(name ? name : "anonymous");
address_space_update_topology(as);
address_space_update_ioeventfds(as);
@@ -2953,6 +2956,10 @@ void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
static void do_address_space_destroy(AddressSpace *as)
{
+ assert(!qatomic_read(&as->bounce.in_use));
+ assert(QLIST_EMPTY(&as->map_client_list));
+ qemu_mutex_destroy(&as->map_client_list_lock);
+
assert(QTAILQ_EMPTY(&as->listeners));
flatview_unref(as->current_map);
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
index b86d7a0cf4..9905d55c67 100644
--- a/softmmu/physmem.c
+++ b/softmmu/physmem.c
@@ -3081,26 +3081,8 @@ void cpu_flush_icache_range(hwaddr start, hwaddr len)
NULL, len, FLUSH_CACHE);
}
-typedef struct {
- MemoryRegion *mr;
- void *buffer;
- hwaddr addr;
- hwaddr len;
- bool in_use;
-} BounceBuffer;
-
-static BounceBuffer bounce;
-
-typedef struct MapClient {
- QEMUBH *bh;
- QLIST_ENTRY(MapClient) link;
-} MapClient;
-
-QemuMutex map_client_list_lock;
-static QLIST_HEAD(, MapClient) map_client_list
- = QLIST_HEAD_INITIALIZER(map_client_list);
-
-static void address_space_unregister_map_client_do(MapClient *client)
+static void
+address_space_unregister_map_client_do(AddressSpaceMapClient *client)
{
QLIST_REMOVE(client, link);
g_free(client);
@@ -3108,10 +3090,10 @@ static void address_space_unregister_map_client_do(MapClient *client)
static void address_space_notify_map_clients_locked(AddressSpace *as)
{
- MapClient *client;
+ AddressSpaceMapClient *client;
- while (!QLIST_EMPTY(&map_client_list)) {
- client = QLIST_FIRST(&map_client_list);
+ while (!QLIST_EMPTY(&as->map_client_list)) {
+ client = QLIST_FIRST(&as->map_client_list);
qemu_bh_schedule(client->bh);
address_space_unregister_map_client_do(client);
}
@@ -3119,15 +3101,15 @@ static void address_space_notify_map_clients_locked(AddressSpace *as)
void address_space_register_map_client(AddressSpace *as, QEMUBH *bh)
{
- MapClient *client = g_malloc(sizeof(*client));
+ AddressSpaceMapClient *client = g_malloc(sizeof(*client));
- qemu_mutex_lock(&map_client_list_lock);
+ qemu_mutex_lock(&as->map_client_list_lock);
client->bh = bh;
- QLIST_INSERT_HEAD(&map_client_list, client, link);
- if (!qatomic_read(&bounce.in_use)) {
+ QLIST_INSERT_HEAD(&as->map_client_list, client, link);
+ if (!qatomic_read(&as->bounce.in_use)) {
address_space_notify_map_clients_locked(as);
}
- qemu_mutex_unlock(&map_client_list_lock);
+ qemu_mutex_unlock(&as->map_client_list_lock);
}
void cpu_exec_init_all(void)
@@ -3143,28 +3125,27 @@ void cpu_exec_init_all(void)
finalize_target_page_bits();
io_mem_init();
memory_map_init();
- qemu_mutex_init(&map_client_list_lock);
}
void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh)
{
- MapClient *client;
+ AddressSpaceMapClient *client;
- qemu_mutex_lock(&map_client_list_lock);
- QLIST_FOREACH(client, &map_client_list, link) {
+ qemu_mutex_lock(&as->map_client_list_lock);
+ QLIST_FOREACH(client, &as->map_client_list, link) {
if (client->bh == bh) {
address_space_unregister_map_client_do(client);
break;
}
}
- qemu_mutex_unlock(&map_client_list_lock);
+ qemu_mutex_unlock(&as->map_client_list_lock);
}
static void address_space_notify_map_clients(AddressSpace *as)
{
- qemu_mutex_lock(&map_client_list_lock);
+ qemu_mutex_lock(&as->map_client_list_lock);
address_space_notify_map_clients_locked(as);
- qemu_mutex_unlock(&map_client_list_lock);
+ qemu_mutex_unlock(&as->map_client_list_lock);
}
static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len,
@@ -3256,25 +3237,25 @@ void *address_space_map(AddressSpace *as,
mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs);
if (!memory_access_is_direct(mr, is_write)) {
- if (qatomic_xchg(&bounce.in_use, true)) {
+ if (qatomic_xchg(&as->bounce.in_use, true)) {
*plen = 0;
return NULL;
}
/* Avoid unbounded allocations */
l = MIN(l, TARGET_PAGE_SIZE);
- bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
- bounce.addr = addr;
- bounce.len = l;
+ as->bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
+ as->bounce.addr = addr;
+ as->bounce.len = l;
memory_region_ref(mr);
- bounce.mr = mr;
+ as->bounce.mr = mr;
if (!is_write) {
flatview_read(fv, addr, MEMTXATTRS_UNSPECIFIED,
- bounce.buffer, l);
+ as->bounce.buffer, l);
}
*plen = l;
- return bounce.buffer;
+ return as->bounce.buffer;
}
@@ -3294,7 +3275,7 @@ void *address_space_map(AddressSpace *as,
void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
bool is_write, hwaddr access_len)
{
- if (buffer != bounce.buffer) {
+ if (buffer != as->bounce.buffer) {
MemoryRegion *mr;
ram_addr_t addr1;
@@ -3310,13 +3291,13 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
return;
}
if (is_write) {
- address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
- bounce.buffer, access_len);
+ address_space_write(as, as->bounce.addr, MEMTXATTRS_UNSPECIFIED,
+ as->bounce.buffer, access_len);
}
- qemu_vfree(bounce.buffer);
- bounce.buffer = NULL;
- memory_region_unref(bounce.mr);
- qatomic_mb_set(&bounce.in_use, false);
+ qemu_vfree(as->bounce.buffer);
+ as->bounce.buffer = NULL;
+ memory_region_unref(as->bounce.mr);
+ qatomic_mb_set(&as->bounce.in_use, false);
address_space_notify_map_clients(as);
}
--
2.45.1.windows.1

View File

@ -0,0 +1,205 @@
From 055bd1a398060bb44aa13a1b732df843f759d5e4 Mon Sep 17 00:00:00 2001
From: Mattias Nissler <mnissler@rivosinc.com>
Date: Thu, 7 Sep 2023 06:04:23 -0700
Subject: [PATCH] system/physmem: Propagate AddressSpace to MapClient helpers
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
cherry-pick from 5c62719710bab66a98f68ebdba333e2240ed6668
Propagate AddressSpace handler to following helpers:
- register_map_client()
- unregister_map_client()
- notify_map_clients[_locked]()
Rename them using 'address_space_' prefix instead of 'cpu_'.
The AddressSpace argument will be used in the next commit.
Reviewed-by: Peter Xu <peterx@redhat.com>
Tested-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Mattias Nissler <mnissler@rivosinc.com>
Message-ID: <20240507094210.300566-2-mnissler@rivosinc.com>
[PMD: Split patch, part 1/2]
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
---
include/exec/cpu-common.h | 2 --
include/exec/memory.h | 26 ++++++++++++++++++++++++--
softmmu/dma-helpers.c | 4 ++--
softmmu/physmem.c | 24 ++++++++++++------------
4 files changed, 38 insertions(+), 18 deletions(-)
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
index cdee668f20..2a3050f553 100644
--- a/include/exec/cpu-common.h
+++ b/include/exec/cpu-common.h
@@ -87,8 +87,6 @@ void *cpu_physical_memory_map(hwaddr addr,
bool is_write);
void cpu_physical_memory_unmap(void *buffer, hwaddr len,
bool is_write, hwaddr access_len);
-void cpu_register_map_client(QEMUBH *bh);
-void cpu_unregister_map_client(QEMUBH *bh);
bool cpu_physical_memory_is_io(hwaddr phys_addr);
diff --git a/include/exec/memory.h b/include/exec/memory.h
index bbf1468d59..859e3da182 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -2772,8 +2772,8 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
* May return %NULL and set *@plen to zero(0), if resources needed to perform
* the mapping are exhausted.
* Use only for reads OR writes - not for read-modify-write operations.
- * Use cpu_register_map_client() to know when retrying the map operation is
- * likely to succeed.
+ * Use address_space_register_map_client() to know when retrying the map
+ * operation is likely to succeed.
*
* @as: #AddressSpace to be accessed
* @addr: address within that address space
@@ -2798,6 +2798,28 @@ void *address_space_map(AddressSpace *as, hwaddr addr,
void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
bool is_write, hwaddr access_len);
+/*
+ * address_space_register_map_client: Register a callback to invoke when
+ * resources for address_space_map() are available again.
+ *
+ * address_space_map may fail when there are not enough resources available,
+ * such as when bounce buffer memory would exceed the limit. The callback can
+ * be used to retry the address_space_map operation. Note that the callback
+ * gets automatically removed after firing.
+ *
+ * @as: #AddressSpace to be accessed
+ * @bh: callback to invoke when address_space_map() retry is appropriate
+ */
+void address_space_register_map_client(AddressSpace *as, QEMUBH *bh);
+
+/*
+ * address_space_unregister_map_client: Unregister a callback that has
+ * previously been registered and not fired yet.
+ *
+ * @as: #AddressSpace to be accessed
+ * @bh: callback to unregister
+ */
+void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh);
/* Internal functions, part of the implementation of address_space_read. */
MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
diff --git a/softmmu/dma-helpers.c b/softmmu/dma-helpers.c
index c2028b6585..3f664156db 100644
--- a/softmmu/dma-helpers.c
+++ b/softmmu/dma-helpers.c
@@ -165,7 +165,7 @@ static void dma_blk_cb(void *opaque, int ret)
if (dbs->iov.size == 0) {
trace_dma_map_wait(dbs);
dbs->bh = aio_bh_new(dbs->ctx, reschedule_dma, dbs);
- cpu_register_map_client(dbs->bh);
+ address_space_register_map_client(dbs->sg->as, dbs->bh);
return;
}
@@ -195,7 +195,7 @@ static void dma_aio_cancel(BlockAIOCB *acb)
}
if (dbs->bh) {
- cpu_unregister_map_client(dbs->bh);
+ address_space_unregister_map_client(dbs->sg->as, dbs->bh);
qemu_bh_delete(dbs->bh);
dbs->bh = NULL;
}
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
index e5c3557d54..b86d7a0cf4 100644
--- a/softmmu/physmem.c
+++ b/softmmu/physmem.c
@@ -3100,24 +3100,24 @@ QemuMutex map_client_list_lock;
static QLIST_HEAD(, MapClient) map_client_list
= QLIST_HEAD_INITIALIZER(map_client_list);
-static void cpu_unregister_map_client_do(MapClient *client)
+static void address_space_unregister_map_client_do(MapClient *client)
{
QLIST_REMOVE(client, link);
g_free(client);
}
-static void cpu_notify_map_clients_locked(void)
+static void address_space_notify_map_clients_locked(AddressSpace *as)
{
MapClient *client;
while (!QLIST_EMPTY(&map_client_list)) {
client = QLIST_FIRST(&map_client_list);
qemu_bh_schedule(client->bh);
- cpu_unregister_map_client_do(client);
+ address_space_unregister_map_client_do(client);
}
}
-void cpu_register_map_client(QEMUBH *bh)
+void address_space_register_map_client(AddressSpace *as, QEMUBH *bh)
{
MapClient *client = g_malloc(sizeof(*client));
@@ -3125,7 +3125,7 @@ void cpu_register_map_client(QEMUBH *bh)
client->bh = bh;
QLIST_INSERT_HEAD(&map_client_list, client, link);
if (!qatomic_read(&bounce.in_use)) {
- cpu_notify_map_clients_locked();
+ address_space_notify_map_clients_locked(as);
}
qemu_mutex_unlock(&map_client_list_lock);
}
@@ -3146,24 +3146,24 @@ void cpu_exec_init_all(void)
qemu_mutex_init(&map_client_list_lock);
}
-void cpu_unregister_map_client(QEMUBH *bh)
+void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh)
{
MapClient *client;
qemu_mutex_lock(&map_client_list_lock);
QLIST_FOREACH(client, &map_client_list, link) {
if (client->bh == bh) {
- cpu_unregister_map_client_do(client);
+ address_space_unregister_map_client_do(client);
break;
}
}
qemu_mutex_unlock(&map_client_list_lock);
}
-static void cpu_notify_map_clients(void)
+static void address_space_notify_map_clients(AddressSpace *as)
{
qemu_mutex_lock(&map_client_list_lock);
- cpu_notify_map_clients_locked();
+ address_space_notify_map_clients_locked(as);
qemu_mutex_unlock(&map_client_list_lock);
}
@@ -3231,8 +3231,8 @@ flatview_extend_translation(FlatView *fv, hwaddr addr,
* May map a subset of the requested range, given by and returned in *plen.
* May return NULL if resources needed to perform the mapping are exhausted.
* Use only for reads OR writes - not for read-modify-write operations.
- * Use cpu_register_map_client() to know when retrying the map operation is
- * likely to succeed.
+ * Use address_space_register_map_client() to know when retrying the map
+ * operation is likely to succeed.
*/
void *address_space_map(AddressSpace *as,
hwaddr addr,
@@ -3317,7 +3317,7 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
bounce.buffer = NULL;
memory_region_unref(bounce.mr);
qatomic_mb_set(&bounce.in_use, false);
- cpu_notify_map_clients();
+ address_space_notify_map_clients(as);
}
void *cpu_physical_memory_map(hwaddr addr,
--
2.45.1.windows.1