Skip to content

Fix snapshot size handling in cross-pool copy/move #1717

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Mar 3, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 49 additions & 2 deletions cmd/incusd/migrate_storage_volumes.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import (
"github.com/lxc/incus/v6/internal/migration"
localMigration "github.com/lxc/incus/v6/internal/server/migration"
"github.com/lxc/incus/v6/internal/server/operations"
"github.com/lxc/incus/v6/internal/server/project"
"github.com/lxc/incus/v6/internal/server/state"
storagePools "github.com/lxc/incus/v6/internal/server/storage"
storageDrivers "github.com/lxc/incus/v6/internal/server/storage/drivers"
Expand Down Expand Up @@ -99,6 +100,40 @@ func (s *migrationSourceWs) DoStorage(state *state.State, projectName string, po
return fmt.Errorf("Failed generating volume migration config: %w", err)
}

dbContentType, err := storagePools.VolumeContentTypeNameToContentType(srcConfig.Volume.ContentType)
if err != nil {
return err
}

contentType, err := storagePools.VolumeDBContentTypeToContentType(dbContentType)
if err != nil {
return err
}

volStorageName := project.StorageVolume(projectName, volName)
vol := pool.GetVolume(storageDrivers.VolumeTypeCustom, contentType, volStorageName, srcConfig.Volume.Config)

var volSize int64

if contentType == storageDrivers.ContentTypeBlock {
err = vol.MountTask(func(mountPath string, op *operations.Operation) error {
volDiskPath, err := pool.Driver().GetVolumeDiskPath(vol)
if err != nil {
return err
}

volSize, err = storageDrivers.BlockDiskSizeBytes(volDiskPath)
if err != nil {
return err
}

return nil
}, nil)
if err != nil {
return err
}
}

// The refresh argument passed to MigrationTypes() is always set
// to false here. The migration source/sender doesn't need to care whether
// or not it's doing a refresh as the migration sink/receiver will know
Expand All @@ -114,6 +149,7 @@ func (s *migrationSourceWs) DoStorage(state *state.State, projectName string, po
// Offer to send index header.
indexHeaderVersion := localMigration.IndexHeaderVersion
offerHeader.IndexHeaderVersion = &indexHeaderVersion
offerHeader.VolumeSize = &volSize

// Only send snapshots when requested.
if !s.volumeOnly {
Expand All @@ -122,6 +158,14 @@ func (s *migrationSourceWs) DoStorage(state *state.State, projectName string, po

for i := range srcConfig.VolumeSnapshots {
offerHeader.SnapshotNames = append(offerHeader.SnapshotNames, srcConfig.VolumeSnapshots[i].Name)

// Set size for snapshot volume
snapSize, err := storagePools.CalculateVolumeSnapshotSize(projectName, pool, contentType, storageDrivers.VolumeTypeCustom, volName, srcConfig.VolumeSnapshots[i].Name)
if err != nil {
return err
}

srcConfig.VolumeSnapshots[i].Config["size"] = fmt.Sprintf("%d", snapSize)
offerHeader.Snapshots = append(offerHeader.Snapshots, volumeSnapshotToProtobuf(srcConfig.VolumeSnapshots[i]))
}
}
Expand Down Expand Up @@ -317,6 +361,7 @@ func (c *migrationSink) DoStorage(state *state.State, projectName string, poolNa
respHeader.SnapshotNames = offerHeader.SnapshotNames
respHeader.Snapshots = offerHeader.Snapshots
respHeader.Refresh = &c.refresh
respHeader.VolumeSize = offerHeader.VolumeSize

// Translate the legacy MigrationSinkArgs to a VolumeTargetArgs suitable for use
// with the new storage layer.
Expand All @@ -331,15 +376,16 @@ func (c *migrationSink) DoStorage(state *state.State, projectName string, poolNa
ContentType: req.ContentType,
Refresh: args.Refresh,
RefreshExcludeOlder: args.RefreshExcludeOlder,
VolumeSize: args.VolumeSize,
VolumeOnly: args.VolumeOnly,
}

// A zero length Snapshots slice indicates volume only migration in
// VolumeTargetArgs. So if VoluneOnly was requested, do not populate them.
if !args.VolumeOnly {
volTargetArgs.Snapshots = make([]string, 0, len(args.Snapshots))
volTargetArgs.Snapshots = make([]*migration.Snapshot, 0, len(args.Snapshots))
for _, snap := range args.Snapshots {
volTargetArgs.Snapshots = append(volTargetArgs.Snapshots, *snap.Name)
volTargetArgs.Snapshots = append(volTargetArgs.Snapshots, &migration.Snapshot{Name: snap.Name, LocalConfig: snap.LocalConfig})
}
}

Expand Down Expand Up @@ -424,6 +470,7 @@ func (c *migrationSink) DoStorage(state *state.State, projectName string, poolNa
RsyncFeatures: rsyncFeatures,
Snapshots: respHeader.Snapshots,
VolumeOnly: c.volumeOnly,
VolumeSize: *respHeader.VolumeSize,
Refresh: c.refresh,
RefreshExcludeOlder: c.refreshExcludeOlder,
}
Expand Down
29 changes: 29 additions & 0 deletions internal/migration/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,3 +90,32 @@ func (m *MigrationHeader) GetBtrfsFeaturesSlice() []string {

return features
}

// GetSnapshotConfigValue retrieves the value associated with the given key from the snapshot LocalConfig.
func GetSnapshotConfigValue(snapshot *Snapshot, key string) string {
var value string
for _, c := range snapshot.LocalConfig {
if c.GetKey() != key {
continue
}

value = c.GetValue()
}

return value
}

// SetSnapshotConfigValue stores the given value for the specified key in the snapshot LocalConfig.
func SetSnapshotConfigValue(snapshot *Snapshot, key string, value string) {
for _, c := range snapshot.LocalConfig {
if c.GetKey() != key {
continue
}

c.Value = &value
return
}

config := Config{Key: &key, Value: &value}
snapshot.LocalConfig = append(snapshot.LocalConfig, &config)
}
4 changes: 2 additions & 2 deletions internal/server/instance/drivers/driver_lxc.go
Original file line number Diff line number Diff line change
Expand Up @@ -6353,9 +6353,9 @@ func (d *lxc) MigrateReceive(args instance.MigrateReceiveArgs) error {
// A zero length Snapshots slice indicates volume only migration in
// VolumeTargetArgs. So if VolumeOnly was requested, do not populate them.
if args.Snapshots {
volTargetArgs.Snapshots = make([]string, 0, len(snapshots))
volTargetArgs.Snapshots = make([]*migration.Snapshot, 0, len(snapshots))
for _, snap := range snapshots {
volTargetArgs.Snapshots = append(volTargetArgs.Snapshots, *snap.Name)
volTargetArgs.Snapshots = append(volTargetArgs.Snapshots, &migration.Snapshot{Name: snap.Name})

// Only create snapshot instance DB records if not doing a cluster same-name move.
// As otherwise the DB records will already exist.
Expand Down
20 changes: 18 additions & 2 deletions internal/server/instance/drivers/driver_qemu.go
Original file line number Diff line number Diff line change
Expand Up @@ -6658,13 +6658,20 @@ func (d *qemu) MigrateSend(args instance.MigrateSendArgs) error {
return err
}

contentType := storagePools.InstanceContentType(d)
// If we are copying snapshots, retrieve a list of snapshots from source volume.
if args.Snapshots {
offerHeader.SnapshotNames = make([]string, 0, len(srcConfig.Snapshots))
offerHeader.Snapshots = make([]*migration.Snapshot, 0, len(srcConfig.Snapshots))

for i := range srcConfig.Snapshots {
offerHeader.SnapshotNames = append(offerHeader.SnapshotNames, srcConfig.Snapshots[i].Name)
snapSize, err := storagePools.CalculateVolumeSnapshotSize(d.Project().Name, pool, contentType, storageDrivers.VolumeTypeVM, d.Name(), srcConfig.Snapshots[i].Name)
if err != nil {
return err
}

srcConfig.Snapshots[i].Config["size"] = fmt.Sprintf("%d", snapSize)
offerHeader.Snapshots = append(offerHeader.Snapshots, instance.SnapshotToProtobuf(srcConfig.Snapshots[i]))
}
}
Expand Down Expand Up @@ -7480,9 +7487,12 @@ func (d *qemu) MigrateReceive(args instance.MigrateReceiveArgs) error {
// A zero length Snapshots slice indicates volume only migration in
// VolumeTargetArgs. So if VolumeOnly was requested, do not populate them.
if args.Snapshots {
volTargetArgs.Snapshots = make([]string, 0, len(snapshots))
volTargetArgs.Snapshots = make([]*migration.Snapshot, 0, len(snapshots))
for _, snap := range snapshots {
volTargetArgs.Snapshots = append(volTargetArgs.Snapshots, *snap.Name)
migrationSnapshot := &migration.Snapshot{Name: snap.Name}
migration.SetSnapshotConfigValue(migrationSnapshot, "size", migration.GetSnapshotConfigValue(snap, "size"))

volTargetArgs.Snapshots = append(volTargetArgs.Snapshots, migrationSnapshot)

// Only create snapshot instance DB records if not doing a cluster same-name move.
// As otherwise the DB records will already exist.
Expand All @@ -7492,6 +7502,12 @@ func (d *qemu) MigrateReceive(args instance.MigrateReceiveArgs) error {
return err
}

// The offerHeader, depending on the case, stores information about either an InstanceSnapshot
// or a StorageVolumeSnapshot. In the Config, we pass information about the volume size,
// but an InstanceSnapshot config cannot have a 'size' key. This key should be removed
// before passing the data to the CreateInternal method.
delete(snapArgs.Config, "size")

// Ensure that snapshot and parent instance have the same storage pool in
// their local root disk device. If the root disk device for the snapshot
// comes from a profile on the new instance as well we don't need to do
Expand Down
2 changes: 1 addition & 1 deletion internal/server/migration/migration_volumes.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ type VolumeTargetArgs struct {
Name string
Description string
Config map[string]string // Only used for custom volume migration.
Snapshots []string
Snapshots []*migration.Snapshot
MigrationType Type
TrackProgress bool
Refresh bool
Expand Down
55 changes: 48 additions & 7 deletions internal/server/storage/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -1165,6 +1165,14 @@ func (b *backend) CreateInstanceFromCopy(inst instance.Instance, src instance.In
}
}

var migrationSnapshots []*migration.Snapshot
if snapshots {
migrationSnapshots, err = VolumeSnapshotsToMigrationSnapshots(srcConfig.VolumeSnapshots, inst.Project().Name, srcPool, contentType, volType, src.Name())
if err != nil {
return err
}
}

ctx, cancel := context.WithCancel(context.Background())
defer cancel()

Expand Down Expand Up @@ -1193,7 +1201,7 @@ func (b *backend) CreateInstanceFromCopy(inst instance.Instance, src instance.In
return b.CreateInstanceFromMigration(inst, bEnd, localMigration.VolumeTargetArgs{
IndexHeaderVersion: localMigration.IndexHeaderVersion,
Name: inst.Name(),
Snapshots: snapshotNames,
Snapshots: migrationSnapshots,
MigrationType: migrationTypes[0],
VolumeSize: srcVolumeSize, // Block size setting override.
TrackProgress: false, // Do not use a progress tracker on receiver.
Expand Down Expand Up @@ -1421,6 +1429,14 @@ func (b *backend) RefreshCustomVolume(projectName string, srcProjectName string,
}
}

var migrationSnapshots []*migration.Snapshot
if snapshots {
migrationSnapshots, err = VolumeSnapshotsToMigrationSnapshots(srcConfig.VolumeSnapshots, projectName, srcPool, contentType, drivers.VolumeTypeCustom, srcVolName)
if err != nil {
return err
}
}

ctx, cancel := context.WithCancel(context.Background())

// Use in-memory pipe pair to simulate a connection between the sender and receiver.
Expand Down Expand Up @@ -1453,7 +1469,7 @@ func (b *backend) RefreshCustomVolume(projectName string, srcProjectName string,
Name: volName,
Description: desc,
Config: config,
Snapshots: snapshotNames,
Snapshots: migrationSnapshots,
MigrationType: migrationTypes[0],
TrackProgress: false, // Do not use a progress tracker on receiver.
ContentType: string(contentType),
Expand Down Expand Up @@ -1634,6 +1650,20 @@ func (b *backend) RefreshInstance(inst instance.Instance, src instance.Instance,
return fmt.Errorf("Failed to negotiate copy migration type: %w", err)
}

var srcVolumeSize int64
// For VMs, get source volume size so that target can create the volume the same size.
if src.Type() == instancetype.VM {
srcVolumeSize, err = InstanceDiskBlockSize(srcPool, src, op)
if err != nil {
return fmt.Errorf("Failed getting source disk size: %w", err)
}
}

migrationSnapshots, err := VolumeSnapshotsToMigrationSnapshots(srcConfig.VolumeSnapshots, src.Project().Name, srcPool, contentType, volType, src.Name())
if err != nil {
return err
}

ctx, cancel := context.WithCancel(context.Background())
defer cancel()

Expand Down Expand Up @@ -1663,9 +1693,10 @@ func (b *backend) RefreshInstance(inst instance.Instance, src instance.Instance,
return b.CreateInstanceFromMigration(inst, bEnd, localMigration.VolumeTargetArgs{
IndexHeaderVersion: localMigration.IndexHeaderVersion,
Name: inst.Name(),
Snapshots: snapshotNames,
Snapshots: migrationSnapshots,
MigrationType: migrationTypes[0],
Refresh: true, // Indicate to receiver volume should exist.
Refresh: true, // Indicate to receiver volume should exist.
VolumeSize: srcVolumeSize,
TrackProgress: false, // Do not use a progress tracker on receiver.
VolumeOnly: !snapshots,
}, op)
Expand Down Expand Up @@ -1994,7 +2025,8 @@ func (b *backend) CreateInstanceFromMigration(inst instance.Instance, conn io.Re
// Create new volume database records when the storage pool is changed or
// when it is not a remote cluster move.
if !isRemoteClusterMove || args.StoragePool != "" {
for i, snapName := range args.Snapshots {
for i, snapshot := range args.Snapshots {
snapName := snapshot.GetName()
newSnapshotName := drivers.GetSnapshotVolumeName(inst.Name(), snapName)
snapConfig := vol.Config() // Use parent volume config by default.
snapDescription := volumeDescription // Use parent volume description by default.
Expand Down Expand Up @@ -4815,6 +4847,14 @@ func (b *backend) CreateCustomVolumeFromCopy(projectName string, srcProjectName
}
}

var migrationSnapshots []*migration.Snapshot
if snapshots {
migrationSnapshots, err = VolumeSnapshotsToMigrationSnapshots(srcConfig.VolumeSnapshots, srcProjectName, srcPool, contentType, drivers.VolumeTypeCustom, srcVolName)
if err != nil {
return err
}
}

ctx, cancel := context.WithCancel(context.Background())

// Use in-memory pipe pair to simulate a connection between the sender and receiver.
Expand Down Expand Up @@ -4848,7 +4888,7 @@ func (b *backend) CreateCustomVolumeFromCopy(projectName string, srcProjectName
Name: volName,
Description: desc,
Config: config,
Snapshots: snapshotNames,
Snapshots: migrationSnapshots,
MigrationType: migrationTypes[0],
TrackProgress: false, // Do not use a progress tracker on receiver.
ContentType: string(contentType),
Expand Down Expand Up @@ -5117,7 +5157,8 @@ func (b *backend) CreateCustomVolumeFromMigration(projectName string, conn io.Re

if len(args.Snapshots) > 0 {
// Create database entries for new storage volume snapshots.
for _, snapName := range args.Snapshots {
for _, snapshot := range args.Snapshots {
snapName := snapshot.GetName()
newSnapshotName := drivers.GetSnapshotVolumeName(args.Name, snapName)

snapConfig := vol.Config() // Use parent volume config by default.
Expand Down
12 changes: 6 additions & 6 deletions internal/server/storage/drivers/driver_btrfs_volumes.go
Original file line number Diff line number Diff line change
Expand Up @@ -532,9 +532,9 @@ func (d *btrfs) CreateVolumeFromMigration(vol Volume, conn io.ReadWriteCloser, v
d.logger.Debug("Received BTRFS migration meta data header", logger.Ctx{"name": vol.name})
} else {
// Populate the migrationHeader subvolumes with root volumes only to support older sources.
for _, snapName := range volTargetArgs.Snapshots {
for _, snapshot := range volTargetArgs.Snapshots {
migrationHeader.Subvolumes = append(migrationHeader.Subvolumes, BTRFSSubVolume{
Snapshot: snapName,
Snapshot: snapshot.GetName(),
Path: string(filepath.Separator),
Readonly: true, // Snapshots are made readonly.
})
Expand All @@ -554,7 +554,7 @@ func (d *btrfs) CreateVolumeFromMigration(vol Volume, conn io.ReadWriteCloser, v
}

// Reset list of snapshots which are to be received.
volTargetArgs.Snapshots = []string{}
volTargetArgs.Snapshots = []*migration.Snapshot{}

// Map of local subvolumes with their received UUID.
localSubvolumes := make(map[string]string)
Expand All @@ -579,7 +579,7 @@ func (d *btrfs) CreateVolumeFromMigration(vol Volume, conn io.ReadWriteCloser, v
}

if migrationSnap.Path == "/" && migrationSnap.Snapshot != "" {
volTargetArgs.Snapshots = append(volTargetArgs.Snapshots, migrationSnap.Snapshot)
volTargetArgs.Snapshots = append(volTargetArgs.Snapshots, &migration.Snapshot{Name: &migrationSnap.Snapshot})
}

syncSubvolumes = append(syncSubvolumes, BTRFSSubVolume{Path: migrationSnap.Path, Snapshot: migrationSnap.Snapshot, UUID: migrationSnap.UUID})
Expand Down Expand Up @@ -702,8 +702,8 @@ func (d *btrfs) createVolumeFromMigrationOptimized(vol Volume, conn io.ReadWrite
revert.Add(func() { _ = deleteParentSnapshotDirIfEmpty(d.name, vol.volType, vol.name) })

// Transfer the snapshots.
for _, snapName := range volTargetArgs.Snapshots {
snapVol, _ := vol.NewSnapshot(snapName)
for _, snapshot := range volTargetArgs.Snapshots {
snapVol, _ := vol.NewSnapshot(snapshot.GetName())
err = receiveVolume(snapVol, tmpVolumesMountPoint)
if err != nil {
return err
Expand Down
6 changes: 3 additions & 3 deletions internal/server/storage/drivers/driver_ceph_volumes.go
Original file line number Diff line number Diff line change
Expand Up @@ -584,16 +584,16 @@ func (d *ceph) CreateVolumeFromMigration(vol Volume, conn io.ReadWriteCloser, vo
}

// Transfer the snapshots.
for _, snapName := range volTargetArgs.Snapshots {
fullSnapshotName := d.getRBDVolumeName(vol, snapName, true)
for _, snapshot := range volTargetArgs.Snapshots {
fullSnapshotName := d.getRBDVolumeName(vol, snapshot.GetName(), true)
wrapper := localMigration.ProgressWriter(op, "fs_progress", fullSnapshotName)

err = d.receiveVolume(recvName, conn, wrapper)
if err != nil {
return err
}

snapVol, err := vol.NewSnapshot(snapName)
snapVol, err := vol.NewSnapshot(snapshot.GetName())
if err != nil {
return err
}
Expand Down
Loading