[Cinder] Restore volume snaptshots while volume is in use
Hi, for providing a more enterprise like (VMware, HyperV, Proxmox) snapshot feature, it would be great to be able to revert snapshots in use. With the "revert-to-snapshot" functions everything is there to support this feature. IMO if the storage backend (driver) does not support the operation, it should raise an error (ex. resetting a ceph rbd volume while a vm is running is not possible). But if it supports the operation, we should not stop the user doing this, even if the volume is "in-use". One might introduce a force switch, to prevent triggering it unintended. A frontend component like skyline can also show additional information like the status of an attached instance. Since the "Detach and attach boot volumes" spec [1] was never fully implemented, there is no way to revert an instance to a snapshot state without completely rebuilding it. Like described in the spec draft, this is not always suitable and at least always cumbersome. -- Jan [1] https://specs.openstack.org/openstack/nova-specs/specs/stein/approved/detach... --- suggested patch ----- https://github.com/jmussmann/cinder/commit/4f971d77a94806e9ebdccab45392058c2... From 4f971d77a94806e9ebdccab45392058c25005761 Mon Sep 17 00:00:00 2001 From: jmussmann <jmussmann+github@posteo.de> Date: Thu, 31 Oct 2024 09:13:45 +0100 Subject: [PATCH] add: allow reverting snapshots of volumes in use --- cinder/volume/api.py | 5 +++-- cinder/volume/manager.py | 4 ++-- cinder/volume/rpcapi.py | 4 ++-- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/cinder/volume/api.py b/cinder/volume/api.py index 0e20baff0..9aa160740 100644 --- a/cinder/volume/api.py +++ b/cinder/volume/api.py @@ -390,8 +390,9 @@ class API(base.Base): """revert a volume to a snapshot""" context.authorize(vol_action_policy.REVERT_POLICY, target_obj=volume) + volume_state = volume.status v_res = volume.update_single_status_where( - 'reverting', 'available') + 'reverting', ['available', 'in-use']) if not v_res: msg = (_("Can't revert volume %(vol_id)s to its latest snapshot " "%(snap_id)s. Volume's status must be 'available'.") @@ -408,7 +409,7 @@ class API(base.Base): "snap_id": snapshot.id}) raise exception.InvalidSnapshot(reason=msg) - self.volume_rpcapi.revert_to_snapshot(context, volume, snapshot) + self.volume_rpcapi.revert_to_snapshot(context, volume, snapshot, volume_state) def delete(self, context: context.RequestContext, diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index 30de5d8c7..18914c073 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -1150,7 +1150,7 @@ class VolumeManager(manager.CleanableManager, self.create_snapshot(context, snapshot) return snapshot - def revert_to_snapshot(self, context, volume, snapshot) -> None: + def revert_to_snapshot(self, context, volume, snapshot, original_volume_state) -> None: """Revert a volume to a snapshot. The process of reverting to snapshot consists of several steps: @@ -1207,7 +1207,7 @@ class VolumeManager(manager.CleanableManager, LOG.exception(msg, msg_args) v_res = volume.update_single_status_where( - 'available', 'reverting') + original_volume_state, 'reverting') if not v_res: msg_args = {"id": volume.id, "status": 'available'} diff --git a/cinder/volume/rpcapi.py b/cinder/volume/rpcapi.py index 58de69c1c..0b36cebdd 100644 --- a/cinder/volume/rpcapi.py +++ b/cinder/volume/rpcapi.py @@ -180,11 +180,11 @@ class VolumeAPI(rpc.RPCAPI): volume=volume) @rpc.assert_min_rpc_version('3.15') - def revert_to_snapshot(self, ctxt, volume, snapshot): + def revert_to_snapshot(self, ctxt, volume, snapshot, volume_state): version = self._compat_ver('3.15') cctxt = self._get_cctxt(volume.service_topic_queue, version) cctxt.cast(ctxt, 'revert_to_snapshot', volume=volume, - snapshot=snapshot) + snapshot=snapshot, original_volume_state=volume_state) def delete_volume(self, ctxt: context.RequestContext, -- 2.47.0
participants (1)
-
Jan