@@ -19,6 +19,7 @@ import (
19
19
dbCluster "github.com/lxc/incus/v6/internal/server/db/cluster"
20
20
"github.com/lxc/incus/v6/internal/server/db/operationtype"
21
21
"github.com/lxc/incus/v6/internal/server/instance"
22
+ "github.com/lxc/incus/v6/internal/server/instance/instancetype"
22
23
"github.com/lxc/incus/v6/internal/server/operations"
23
24
"github.com/lxc/incus/v6/internal/server/project"
24
25
"github.com/lxc/incus/v6/internal/server/request"
@@ -234,9 +235,15 @@ func instancePost(d *Daemon, r *http.Request) response.Response {
234
235
return response .BadRequest (fmt .Errorf ("Instance must be stopped to be moved statelessly" ))
235
236
}
236
237
237
- // Storage pool changes require a stopped instance .
238
+ // Storage pool changes require a target flag .
238
239
if req .Pool != "" {
239
- return response .BadRequest (fmt .Errorf ("Instance must be stopped to be moved across storage pools" ))
240
+ if inst .Type () != instancetype .VM {
241
+ return response .BadRequest (fmt .Errorf ("Storage pool change supported only by virtual-machines" ))
242
+ }
243
+
244
+ if target == "" {
245
+ return response .BadRequest (fmt .Errorf ("Storage pool can be specified only together with target flag" ))
246
+ }
240
247
}
241
248
242
249
// Project changes require a stopped instance.
@@ -433,7 +440,7 @@ func instancePost(d *Daemon, r *http.Request) response.Response {
433
440
}
434
441
435
442
// Cross-server instance migration.
436
- ws , err := newMigrationSource (inst , req .Live , req .InstanceOnly , req .AllowInconsistent , "" , req .Target )
443
+ ws , err := newMigrationSource (inst , req .Live , req .InstanceOnly , req .AllowInconsistent , "" , "" , req .Target )
437
444
if err != nil {
438
445
return response .InternalError (err )
439
446
}
@@ -476,6 +483,11 @@ func migrateInstance(ctx context.Context, s *state.State, inst instance.Instance
476
483
return fmt .Errorf ("Failed loading instance storage pool: %w" , err )
477
484
}
478
485
486
+ // Check that we're not requested to move to the same storage pool we're currently use.
487
+ if req .Pool != "" && req .Pool == sourcePool .Name () {
488
+ return fmt .Errorf ("Requested storage pool is the same as current pool" )
489
+ }
490
+
479
491
// Get the DB volume type for the instance.
480
492
volType , err := storagePools .InstanceTypeToVolumeType (inst .Type ())
481
493
if err != nil {
@@ -593,8 +605,8 @@ func migrateInstance(ctx context.Context, s *state.State, inst instance.Instance
593
605
req .Name = ""
594
606
}
595
607
596
- // Handle pool and project moves.
597
- if req .Project != "" || req .Pool != "" {
608
+ // Handle pool and project moves for stopped instances .
609
+ if ( req .Project != "" || req .Pool != "" ) && ! req . Live {
598
610
// Get a local client.
599
611
args := & incus.ConnectionArgs {
600
612
SkipGetServer : true ,
@@ -756,7 +768,7 @@ func migrateInstance(ctx context.Context, s *state.State, inst instance.Instance
756
768
req .Project = ""
757
769
}
758
770
759
- // Handle remote migrations (location changes).
771
+ // Handle remote migrations (location and storage pool changes).
760
772
if targetMemberInfo != nil && inst .Location () != targetMemberInfo .Name {
761
773
// Get the client.
762
774
networkCert := s .Endpoints .NetworkCert ()
@@ -794,7 +806,7 @@ func migrateInstance(ctx context.Context, s *state.State, inst instance.Instance
794
806
}
795
807
796
808
// Setup a new migration source.
797
- sourceMigration , err := newMigrationSource (inst , req .Live , false , req .AllowInconsistent , inst .Name (), nil )
809
+ sourceMigration , err := newMigrationSource (inst , req .Live , false , req .AllowInconsistent , inst .Name (), req . Pool , nil )
798
810
if err != nil {
799
811
return fmt .Errorf ("Failed setting up instance migration on source: %w" , err )
800
812
}
@@ -918,8 +930,9 @@ func migrateInstance(ctx context.Context, s *state.State, inst instance.Instance
918
930
return err
919
931
}
920
932
921
- // Cleanup instance paths on source member if using remote shared storage.
922
- if sourcePool .Driver ().Info ().Remote {
933
+ // Cleanup instance paths on source member if using remote shared storage
934
+ // and there was no storage pool change.
935
+ if sourcePool .Driver ().Info ().Remote && req .Pool == "" {
923
936
err = sourcePool .CleanupInstancePaths (inst , nil )
924
937
if err != nil {
925
938
return fmt .Errorf ("Failed cleaning up instance paths on source member: %w" , err )
0 commit comments