qemu/vdpa-fix-vdpa-device-migrate-rollback-wrong-when-sus.patch
Jiabo Feng 0dd8f840c7 QEMU update to verssion 6.2.0-96:
- vdpa: Fix bug where vdpa appliance migration does not resume after rollback
- block: Parse filenames only when explicitly requested (CVE-2024-4467)
- block: introduce bdrv_open_file_child() helper
- iotests/270: Don't store data-file with json: prefix in image (CVE-2024-4467)
- iotests/244: Don't store data-file with protocol in image (CVE-2024-4467)
- qcow2: Don't open data_file with BDRV_O_NO_IO (CVE-2024-4467)
- qcow2: Do not reopen data_file in invalidate_cache
- hw/intc/arm_gic: Fix deactivation of SPI lines chery-pick from 7175a562f157d39725ab396e39c1e8e410d206b3
- vhost-user: Skip unnecessary duplicated VHOST_USER_SET_LOG_BASE requests
- target/ppc: Split off common embedded TLB init cheery-pick from 581eea5d656b73c6532109f4ced4c73fd4e5fd47`
- vdpa: fix vdpa device migrate rollback wrong when suspend device failed 1.
- hw/virtio/virtio-pci:Support shadow device for virtio-net/blk/scsi devices

Signed-off-by: Jiabo Feng <fengjiabo1@huawei.com>
(cherry picked from commit ad45062d44e901468eeb8c4ac0729587daaa1e1f)
2024-07-12 09:23:41 +08:00

141 lines
4.5 KiB
Diff

From 493ef78aebf2aac04e9dbf5d1f21eb0c18763917 Mon Sep 17 00:00:00 2001
From: jiangdongxu <jiangdongxu1@huawei.com>
Date: Sat, 22 Jun 2024 07:02:48 +0000
Subject: [PATCH] vdpa: fix vdpa device migrate rollback wrong when suspend
device failed 1. set vdpa->suspended before call vhost_dev_suspend to make
sure vdpa device will resume when suspend failed. 2. using
vdpa->vhost_started to judge device started instead of vdev->started 3. using
state == RUN_STATE_FINISH_MIGRATE instead of ms->state ==
MIGRATION_STATUS_ACTIVE to judge vm in migration. As migrate_fd_cancel will
change ms->state, which will result in some vdpa devices not being suspended.
Signed-off-by: jiangdongxu <jiangdongxu1@huawei.com>
---
hw/virtio/vdpa-dev-mig.c | 83 ++++------------------------------------
1 file changed, 8 insertions(+), 75 deletions(-)
diff --git a/hw/virtio/vdpa-dev-mig.c b/hw/virtio/vdpa-dev-mig.c
index 679d37b182..23238c9f19 100644
--- a/hw/virtio/vdpa-dev-mig.c
+++ b/hw/virtio/vdpa-dev-mig.c
@@ -136,100 +136,33 @@ free:
static int vhost_vdpa_device_suspend(VhostVdpaDevice *vdpa)
{
VirtIODevice *vdev = VIRTIO_DEVICE(vdpa);
- BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
- VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- int ret;
- if (!vdpa->started || vdpa->suspended) {
+ if (!vdev->vhost_started || vdpa->suspended) {
return 0;
}
- if (!k->set_guest_notifiers) {
- return -EFAULT;
- }
-
- vdpa->started = false;
vdpa->suspended = true;
- ret = vhost_dev_suspend(&vdpa->dev, vdev, false);
- if (ret) {
- goto suspend_fail;
- }
-
- ret = k->set_guest_notifiers(qbus->parent, vdpa->dev.nvqs, false);
- if (ret < 0) {
- error_report("vhost guest notifier cleanup failed: %d\n", ret);
- goto set_guest_notifiers_fail;
- }
-
- vhost_dev_disable_notifiers(&vdpa->dev, vdev);
- return ret;
-
-set_guest_notifiers_fail:
- ret = k->set_guest_notifiers(qbus->parent, vdpa->dev.nvqs, true);
- if (ret) {
- error_report("vhost guest notifier restore failed: %d\n", ret);
- }
-
-suspend_fail:
- vdpa->suspended = false;
- vdpa->started = true;
- return ret;
+ return vhost_dev_suspend(&vdpa->dev, vdev, false);
}
static int vhost_vdpa_device_resume(VhostVdpaDevice *vdpa)
{
VirtIODevice *vdev = VIRTIO_DEVICE(vdpa);
- BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
- VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- int i, ret;
+ MigrationIncomingState *mis = migration_incoming_get_current();
+ int ret;
- if (vdpa->started || !vdpa->suspended) {
+ if (!vdev->vhost_started ||
+ (!vdpa->suspended && mis->state != RUN_STATE_RESTORE_VM)) {
return 0;
}
- if (!k->set_guest_notifiers) {
- error_report("binding does not support guest notifiers\n");
- return -ENOSYS;
- }
-
- ret = vhost_dev_enable_notifiers(&vdpa->dev, vdev);
+ ret = vhost_dev_resume(&vdpa->dev, vdev, false);
if (ret < 0) {
- error_report("Error enabling host notifiers: %d\n", ret);
return ret;
}
- ret = k->set_guest_notifiers(qbus->parent, vdpa->dev.nvqs, true);
- if (ret < 0) {
- error_report("Error binding guest notifier: %d\n", ret);
- goto err_host_notifiers;
- }
-
- vdpa->dev.acked_features = vdev->guest_features;
-
- ret = vhost_dev_resume(&vdpa->dev, vdev, false);
- if (ret < 0) {
- error_report("Error starting vhost: %d\n", ret);
- goto err_guest_notifiers;
- }
- vdpa->started = true;
vdpa->suspended = false;
-
- /*
- * guest_notifier_mask/pending not used yet, so just unmask
- * everything here. virtio-pci will do the right thing by
- * enabling/disabling irqfd.
- */
- for (i = 0; i < vdpa->dev.nvqs; i++) {
- vhost_virtqueue_mask(&vdpa->dev, vdev, i, false);
- }
-
- return ret;
-
-err_guest_notifiers:
- k->set_guest_notifiers(qbus->parent, vdpa->dev.nvqs, false);
-err_host_notifiers:
- vhost_dev_disable_notifiers(&vdpa->dev, vdev);
return ret;
}
@@ -254,7 +187,7 @@ static void vdpa_dev_vmstate_change(void *opaque, bool running, RunState state)
MigrationIncomingState *mis = migration_incoming_get_current();
if (!running) {
- if (ms->state == MIGRATION_STATUS_ACTIVE || state == RUN_STATE_PAUSED) {
+ if (state == RUN_STATE_FINISH_MIGRATE || state == RUN_STATE_PAUSED) {
ret = vhost_vdpa_device_suspend(vdpa);
if (ret) {
error_report("suspend vdpa device failed: %d\n", ret);
--
2.41.0.windows.1