@@ -4855,3 +4855,144 @@ resource "google_container_node_pool" "np" {
4855
4855
}
4856
4856
` , cluster , np )
4857
4857
}
4858
+
4859
+ func TestAccContainerNodePool_storagePools (t * testing.T ) {
4860
+ t .Parallel ()
4861
+
4862
+ cluster := fmt .Sprintf ("tf-test-cluster-%s" , acctest .RandString (t , 10 ))
4863
+ np := fmt .Sprintf ("tf-test-nodepool-%s" , acctest .RandString (t , 10 ))
4864
+ networkName := acctest .BootstrapSharedTestNetwork (t , "gke-cluster" )
4865
+ subnetworkName := acctest .BootstrapSubnet (t , "gke-cluster" , networkName )
4866
+ location := envvar .GetTestZoneFromEnv ()
4867
+
4868
+ storagePoolNameURL := acctest .BootstrapComputeStoragePool (t , "basic-1" , "hyperdisk-balanced" )
4869
+ storagePoolResourceName , err := extractSPName (storagePoolNameURL )
4870
+ if err != nil {
4871
+ t .Fatal ("Failed to extract Storage Pool resource name from URL." )
4872
+ }
4873
+
4874
+ acctest .VcrTest (t , resource.TestCase {
4875
+ PreCheck : func () { acctest .AccTestPreCheck (t ) },
4876
+ ProtoV5ProviderFactories : acctest .ProtoV5ProviderFactories (t ),
4877
+ CheckDestroy : testAccCheckContainerNodePoolDestroyProducer (t ),
4878
+ Steps : []resource.TestStep {
4879
+ {
4880
+ Config : testAccContainerNodePool_storagePools (cluster , np , networkName , subnetworkName , storagePoolResourceName , location ),
4881
+ Check : resource .ComposeTestCheckFunc (
4882
+ resource .TestCheckResourceAttr ("google_container_node_pool.np" , "node_config.0.storage_pools.0" , storagePoolResourceName ),
4883
+ ),
4884
+ },
4885
+ {
4886
+ ResourceName : "google_container_node_pool.np" ,
4887
+ ImportState : true ,
4888
+ ImportStateVerify : true ,
4889
+ ImportStateVerifyIgnore : []string {"deletion_protection" },
4890
+ },
4891
+ },
4892
+ })
4893
+ }
4894
+
4895
+ func testAccContainerNodePool_storagePools (cluster , np , networkName , subnetworkName , storagePoolResourceName , location string ) string {
4896
+ return fmt .Sprintf (`
4897
+ resource "google_container_cluster" "cluster" {
4898
+ name = "%[1]s"
4899
+ location = "%[6]s"
4900
+ initial_node_count = 1
4901
+ deletion_protection = false
4902
+ network = "%[3]s"
4903
+ subnetwork = "%[4]s"
4904
+ }
4905
+
4906
+ resource "google_container_node_pool" "np" {
4907
+ name = "%[2]s"
4908
+ location = "%[6]s"
4909
+ cluster = google_container_cluster.cluster.name
4910
+ initial_node_count = 1
4911
+
4912
+ node_config {
4913
+ machine_type = "c3-standard-4"
4914
+ image_type = "COS_CONTAINERD"
4915
+ storage_pools = ["%[5]s"]
4916
+ disk_type = "hyperdisk-balanced"
4917
+ }
4918
+ }
4919
+ ` , cluster , np , networkName , subnetworkName , storagePoolResourceName , location )
4920
+ }
4921
+
4922
+ func TestAccContainerNodePool_withMachineDiskStoragePoolsUpdate (t * testing.T ) {
4923
+ t .Parallel ()
4924
+
4925
+ cluster := fmt .Sprintf ("tf-test-cluster-%s" , acctest .RandString (t , 10 ))
4926
+ nodePool := fmt .Sprintf ("tf-test-nodepool-%s" , acctest .RandString (t , 10 ))
4927
+ networkName := acctest .BootstrapSharedTestNetwork (t , "gke-cluster" )
4928
+ subnetworkName := acctest .BootstrapSubnet (t , "gke-cluster" , networkName )
4929
+ location := envvar .GetTestZoneFromEnv ()
4930
+
4931
+ storagePoolNameURL := acctest .BootstrapComputeStoragePool (t , "basic-1" , "hyperdisk-balanced" )
4932
+ storagePoolResourceName , err := extractSPName (storagePoolNameURL )
4933
+ if err != nil {
4934
+ t .Fatal ("Failed to extract Storage Pool resource name from URL." )
4935
+ }
4936
+ acctest .VcrTest (t , resource.TestCase {
4937
+ PreCheck : func () { acctest .AccTestPreCheck (t ) },
4938
+ ProtoV5ProviderFactories : acctest .ProtoV5ProviderFactories (t ),
4939
+ CheckDestroy : testAccCheckContainerNodePoolDestroyProducer (t ),
4940
+ Steps : []resource.TestStep {
4941
+ {
4942
+ Config : testAccContainerNodePool_basic (cluster , nodePool , networkName , subnetworkName ),
4943
+ },
4944
+ {
4945
+ ResourceName : "google_container_node_pool.np" ,
4946
+ ImportState : true ,
4947
+ ImportStateVerify : true ,
4948
+ },
4949
+ {
4950
+ Config : testAccContainerNodePool_withDiskMachineAndStoragePoolUpdate (cluster , nodePool , networkName , subnetworkName , storagePoolResourceName , location ),
4951
+ Check : resource .ComposeTestCheckFunc (
4952
+ resource .TestCheckResourceAttr ("google_container_node_pool.np" , "node_config.0.storage_pools.0" , storagePoolResourceName ),
4953
+ ),
4954
+ },
4955
+ {
4956
+ ResourceName : "google_container_node_pool.np" ,
4957
+ ImportState : true ,
4958
+ ImportStateVerify : true ,
4959
+ // autoscaling.# = 0 is equivalent to no autoscaling at all,
4960
+ // but will still cause an import diff
4961
+ ImportStateVerifyIgnore : []string {"autoscaling.#" , "node_config.0.taint" , "deletion_protection" },
4962
+ },
4963
+ },
4964
+ })
4965
+ }
4966
+
4967
+ func testAccContainerNodePool_withDiskMachineAndStoragePoolUpdate (cluster , np , networkName , subnetworkName , storagePoolResourceName , location string ) string {
4968
+ return fmt .Sprintf (`
4969
+ provider "google" {
4970
+ alias = "user-project-override"
4971
+ user_project_override = true
4972
+ }
4973
+ resource "google_container_cluster" "cluster" {
4974
+ provider = google.user-project-override
4975
+ name = "%[1]s"
4976
+ location = "%[6]s"
4977
+ initial_node_count = 3
4978
+ deletion_protection = false
4979
+ network = "%[3]s"
4980
+ subnetwork = "%[4]s"
4981
+ }
4982
+
4983
+ resource "google_container_node_pool" "np" {
4984
+ provider = google.user-project-override
4985
+ name = "%[1]s"
4986
+ location = "%[6]s"
4987
+ cluster = google_container_cluster.cluster.name
4988
+ initial_node_count = 2
4989
+
4990
+ node_config {
4991
+ machine_type = "c3-standard-4"
4992
+ disk_size_gb = 50
4993
+ disk_type = "hyperdisk-balanced"
4994
+ storage_pools = ["%[5]s"]
4995
+ }
4996
+ }
4997
+ ` , cluster , np , networkName , subnetworkName , storagePoolResourceName , location )
4998
+ }
0 commit comments