Skip to content

Commit ae136f7

Browse files
Add for storage_pools flag on cluster/nodepool create, and nodepool update (#11391) (#8146)
[upstream:c63ed44f477c69bcc60f37113fbe0c2eca1be5a0] Signed-off-by: Modular Magician <[email protected]>
1 parent aae2b49 commit ae136f7

File tree

6 files changed

+303
-3
lines changed

6 files changed

+303
-3
lines changed

.changelog/11391.txt

+3
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
```release-note:enhancement
2+
container: added `storage_pools` to `node_config` in `google_container_cluster` and `google_container_node_pool`
3+
```

google-beta/services/container/node_config.go

+19
Original file line numberDiff line numberDiff line change
@@ -452,6 +452,14 @@ func schemaNodeConfig() *schema.Schema {
452452
Description: `The list of instance tags applied to all nodes.`,
453453
},
454454

455+
"storage_pools": {
456+
Type: schema.TypeList,
457+
ForceNew: true,
458+
Optional: true,
459+
Elem: &schema.Schema{Type: schema.TypeString},
460+
Description: `The list of Storage Pools where boot disks are provisioned.`,
461+
},
462+
455463
"shielded_instance_config": {
456464
Type: schema.TypeList,
457465
Optional: true,
@@ -1024,6 +1032,16 @@ func expandNodeConfig(v interface{}) *container.NodeConfig {
10241032
nc.Tags = tags
10251033
}
10261034

1035+
if v, ok := nodeConfig["storage_pools"]; ok {
1036+
spList := v.([]interface{})
1037+
storagePools := []string{}
1038+
for _, v := range spList {
1039+
if v != nil {
1040+
storagePools = append(storagePools, v.(string))
1041+
}
1042+
}
1043+
nc.StoragePools = storagePools
1044+
}
10271045
if v, ok := nodeConfig["shielded_instance_config"]; ok && len(v.([]interface{})) > 0 {
10281046
conf := v.([]interface{})[0].(map[string]interface{})
10291047
nc.ShieldedInstanceConfig = &container.ShieldedInstanceConfig{
@@ -1433,6 +1451,7 @@ func flattenNodeConfig(c *container.NodeConfig, v interface{}) []map[string]inte
14331451
"tags": c.Tags,
14341452
"preemptible": c.Preemptible,
14351453
"secondary_boot_disks": flattenSecondaryBootDisks(c.SecondaryBootDisks),
1454+
"storage_pools": c.StoragePools,
14361455
"spot": c.Spot,
14371456
"min_cpu_platform": c.MinCpuPlatform,
14381457
"shielded_instance_config": flattenShieldedInstanceConfig(c.ShieldedInstanceConfig),

google-beta/services/container/resource_container_cluster_test.go

+123
Original file line numberDiff line numberDiff line change
@@ -11490,3 +11490,126 @@ resource "google_container_cluster" "primary" {
1149011490
}
1149111491
`, name, networkName, subnetworkName)
1149211492
}
11493+
11494+
func TestAccContainerCluster_storagePoolsWithNodePool(t *testing.T) {
11495+
t.Parallel()
11496+
11497+
cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10))
11498+
np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10))
11499+
networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster")
11500+
subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName)
11501+
location := envvar.GetTestZoneFromEnv()
11502+
11503+
storagePoolNameURL := acctest.BootstrapComputeStoragePool(t, "basic-1", "hyperdisk-balanced")
11504+
storagePoolResourceName, err := extractSPName(storagePoolNameURL)
11505+
if err != nil {
11506+
t.Fatal("Failed to extract Storage Pool resource name from URL.")
11507+
}
11508+
11509+
acctest.VcrTest(t, resource.TestCase{
11510+
PreCheck: func() { acctest.AccTestPreCheck(t) },
11511+
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
11512+
CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t),
11513+
Steps: []resource.TestStep{
11514+
{
11515+
Config: testAccContainerCluster_storagePoolsWithNodePool(cluster, location, networkName, subnetworkName, np, storagePoolResourceName),
11516+
Check: resource.ComposeTestCheckFunc(
11517+
resource.TestCheckResourceAttr("google_container_cluster.storage_pools_with_node_pool", "node_pool.0.node_config.0.storage_pools.0", storagePoolResourceName),
11518+
),
11519+
},
11520+
{
11521+
ResourceName: "google_container_cluster.storage_pools_with_node_pool",
11522+
ImportState: true,
11523+
ImportStateVerify: true,
11524+
ImportStateVerifyIgnore: []string{"deletion_protection"},
11525+
},
11526+
},
11527+
})
11528+
}
11529+
11530+
func testAccContainerCluster_storagePoolsWithNodePool(cluster, location, networkName, subnetworkName, np, storagePoolResourceName string) string {
11531+
return fmt.Sprintf(`
11532+
resource "google_container_cluster" "storage_pools_with_node_pool" {
11533+
name = "%s"
11534+
location = "%s"
11535+
deletion_protection = false
11536+
network = "%s"
11537+
subnetwork = "%s"
11538+
node_pool {
11539+
name = "%s"
11540+
initial_node_count = 1
11541+
node_config {
11542+
machine_type = "c3-standard-4"
11543+
image_type = "COS_CONTAINERD"
11544+
storage_pools = ["%s"]
11545+
disk_type = "hyperdisk-balanced"
11546+
}
11547+
}
11548+
}
11549+
`, cluster, location, networkName, subnetworkName, np, storagePoolResourceName)
11550+
}
11551+
11552+
func TestAccContainerCluster_storagePoolsWithNodeConfig(t *testing.T) {
11553+
t.Parallel()
11554+
11555+
cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10))
11556+
networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster")
11557+
subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName)
11558+
location := envvar.GetTestZoneFromEnv()
11559+
11560+
storagePoolNameURL := acctest.BootstrapComputeStoragePool(t, "basic-1", "hyperdisk-balanced")
11561+
storagePoolResourceName, err := extractSPName(storagePoolNameURL)
11562+
if err != nil {
11563+
t.Fatal("Failed to extract Storage Pool resource name from URL.")
11564+
}
11565+
11566+
acctest.VcrTest(t, resource.TestCase{
11567+
PreCheck: func() { acctest.AccTestPreCheck(t) },
11568+
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
11569+
CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t),
11570+
Steps: []resource.TestStep{
11571+
{
11572+
Config: testAccContainerCluster_storagePoolsWithNodeConfig(cluster, location, networkName, subnetworkName, storagePoolResourceName),
11573+
Check: resource.ComposeTestCheckFunc(
11574+
resource.TestCheckResourceAttr("google_container_cluster.storage_pools_with_node_config", "node_config.0.storage_pools.0", storagePoolResourceName),
11575+
),
11576+
},
11577+
{
11578+
ResourceName: "google_container_cluster.storage_pools_with_node_config",
11579+
ImportState: true,
11580+
ImportStateVerify: true,
11581+
ImportStateVerifyIgnore: []string{"deletion_protection"},
11582+
},
11583+
},
11584+
})
11585+
}
11586+
11587+
func testAccContainerCluster_storagePoolsWithNodeConfig(cluster, location, networkName, subnetworkName, storagePoolResourceName string) string {
11588+
return fmt.Sprintf(`
11589+
resource "google_container_cluster" "storage_pools_with_node_config" {
11590+
name = "%s"
11591+
location = "%s"
11592+
initial_node_count = 1
11593+
deletion_protection = false
11594+
network = "%s"
11595+
subnetwork = "%s"
11596+
node_config {
11597+
machine_type = "c3-standard-4"
11598+
image_type = "COS_CONTAINERD"
11599+
storage_pools = ["%s"]
11600+
disk_type = "hyperdisk-balanced"
11601+
}
11602+
}
11603+
`, cluster, location, networkName, subnetworkName, storagePoolResourceName)
11604+
}
11605+
11606+
func extractSPName(url string) (string, error) {
11607+
re := regexp.MustCompile(`https://www\.googleapis\.com/compute/beta/(projects/[^"]+)`)
11608+
matches := re.FindStringSubmatch(url)
11609+
11610+
if len(matches) > 1 {
11611+
return matches[1], nil
11612+
} else {
11613+
return "", fmt.Errorf("no match found")
11614+
}
11615+
}

google-beta/services/container/resource_container_node_pool.go

+15-3
Original file line numberDiff line numberDiff line change
@@ -1448,13 +1448,25 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node
14481448

14491449
if d.HasChange("node_config.0.disk_size_gb") ||
14501450
d.HasChange("node_config.0.disk_type") ||
1451-
d.HasChange("node_config.0.machine_type") {
1451+
d.HasChange("node_config.0.machine_type") ||
1452+
d.HasChange("node_config.0.storage_pools") {
14521453
req := &container.UpdateNodePoolRequest{
14531454
Name: name,
14541455
DiskSizeGb: int64(d.Get("node_config.0.disk_size_gb").(int)),
14551456
DiskType: d.Get("node_config.0.disk_type").(string),
14561457
MachineType: d.Get("node_config.0.machine_type").(string),
14571458
}
1459+
if v, ok := d.GetOk("node_config.0.storage_pools"); ok {
1460+
spList := v.([]interface{})
1461+
storagePools := []string{}
1462+
for _, v := range spList {
1463+
if v != nil {
1464+
storagePools = append(storagePools, v.(string))
1465+
}
1466+
}
1467+
req.StoragePools = storagePools
1468+
}
1469+
14581470
updateF := func() error {
14591471
clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req)
14601472
if config.UserProjectOverride {
@@ -1469,14 +1481,14 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node
14691481
return ContainerOperationWait(config, op,
14701482
nodePoolInfo.project,
14711483
nodePoolInfo.location,
1472-
"updating GKE node pool disk_size_gb/disk_type/machine_type", userAgent,
1484+
"updating GKE node pool disk_size_gb/disk_type/machine_type/storage_pools", userAgent,
14731485
timeout)
14741486
}
14751487

14761488
if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil {
14771489
return err
14781490
}
1479-
log.Printf("[INFO] Updated disk disk_size_gb/disk_type/machine_type for Node Pool %s", d.Id())
1491+
log.Printf("[INFO] Updated disk disk_size_gb/disk_type/machine_type/storage_pools for Node Pool %s", d.Id())
14801492
}
14811493

14821494
if d.HasChange(prefix + "node_config.0.taint") {

google-beta/services/container/resource_container_node_pool_test.go

+141
Original file line numberDiff line numberDiff line change
@@ -4855,3 +4855,144 @@ resource "google_container_node_pool" "np" {
48554855
}
48564856
`, cluster, np)
48574857
}
4858+
4859+
func TestAccContainerNodePool_storagePools(t *testing.T) {
4860+
t.Parallel()
4861+
4862+
cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10))
4863+
np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10))
4864+
networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster")
4865+
subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName)
4866+
location := envvar.GetTestZoneFromEnv()
4867+
4868+
storagePoolNameURL := acctest.BootstrapComputeStoragePool(t, "basic-1", "hyperdisk-balanced")
4869+
storagePoolResourceName, err := extractSPName(storagePoolNameURL)
4870+
if err != nil {
4871+
t.Fatal("Failed to extract Storage Pool resource name from URL.")
4872+
}
4873+
4874+
acctest.VcrTest(t, resource.TestCase{
4875+
PreCheck: func() { acctest.AccTestPreCheck(t) },
4876+
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
4877+
CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t),
4878+
Steps: []resource.TestStep{
4879+
{
4880+
Config: testAccContainerNodePool_storagePools(cluster, np, networkName, subnetworkName, storagePoolResourceName, location),
4881+
Check: resource.ComposeTestCheckFunc(
4882+
resource.TestCheckResourceAttr("google_container_node_pool.np", "node_config.0.storage_pools.0", storagePoolResourceName),
4883+
),
4884+
},
4885+
{
4886+
ResourceName: "google_container_node_pool.np",
4887+
ImportState: true,
4888+
ImportStateVerify: true,
4889+
ImportStateVerifyIgnore: []string{"deletion_protection"},
4890+
},
4891+
},
4892+
})
4893+
}
4894+
4895+
func testAccContainerNodePool_storagePools(cluster, np, networkName, subnetworkName, storagePoolResourceName, location string) string {
4896+
return fmt.Sprintf(`
4897+
resource "google_container_cluster" "cluster" {
4898+
name = "%[1]s"
4899+
location = "%[6]s"
4900+
initial_node_count = 1
4901+
deletion_protection = false
4902+
network = "%[3]s"
4903+
subnetwork = "%[4]s"
4904+
}
4905+
4906+
resource "google_container_node_pool" "np" {
4907+
name = "%[2]s"
4908+
location = "%[6]s"
4909+
cluster = google_container_cluster.cluster.name
4910+
initial_node_count = 1
4911+
4912+
node_config {
4913+
machine_type = "c3-standard-4"
4914+
image_type = "COS_CONTAINERD"
4915+
storage_pools = ["%[5]s"]
4916+
disk_type = "hyperdisk-balanced"
4917+
}
4918+
}
4919+
`, cluster, np, networkName, subnetworkName, storagePoolResourceName, location)
4920+
}
4921+
4922+
func TestAccContainerNodePool_withMachineDiskStoragePoolsUpdate(t *testing.T) {
4923+
t.Parallel()
4924+
4925+
cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10))
4926+
nodePool := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10))
4927+
networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster")
4928+
subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName)
4929+
location := envvar.GetTestZoneFromEnv()
4930+
4931+
storagePoolNameURL := acctest.BootstrapComputeStoragePool(t, "basic-1", "hyperdisk-balanced")
4932+
storagePoolResourceName, err := extractSPName(storagePoolNameURL)
4933+
if err != nil {
4934+
t.Fatal("Failed to extract Storage Pool resource name from URL.")
4935+
}
4936+
acctest.VcrTest(t, resource.TestCase{
4937+
PreCheck: func() { acctest.AccTestPreCheck(t) },
4938+
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
4939+
CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t),
4940+
Steps: []resource.TestStep{
4941+
{
4942+
Config: testAccContainerNodePool_basic(cluster, nodePool, networkName, subnetworkName),
4943+
},
4944+
{
4945+
ResourceName: "google_container_node_pool.np",
4946+
ImportState: true,
4947+
ImportStateVerify: true,
4948+
},
4949+
{
4950+
Config: testAccContainerNodePool_withDiskMachineAndStoragePoolUpdate(cluster, nodePool, networkName, subnetworkName, storagePoolResourceName, location),
4951+
Check: resource.ComposeTestCheckFunc(
4952+
resource.TestCheckResourceAttr("google_container_node_pool.np", "node_config.0.storage_pools.0", storagePoolResourceName),
4953+
),
4954+
},
4955+
{
4956+
ResourceName: "google_container_node_pool.np",
4957+
ImportState: true,
4958+
ImportStateVerify: true,
4959+
// autoscaling.# = 0 is equivalent to no autoscaling at all,
4960+
// but will still cause an import diff
4961+
ImportStateVerifyIgnore: []string{"autoscaling.#", "node_config.0.taint", "deletion_protection"},
4962+
},
4963+
},
4964+
})
4965+
}
4966+
4967+
func testAccContainerNodePool_withDiskMachineAndStoragePoolUpdate(cluster, np, networkName, subnetworkName, storagePoolResourceName, location string) string {
4968+
return fmt.Sprintf(`
4969+
provider "google" {
4970+
alias = "user-project-override"
4971+
user_project_override = true
4972+
}
4973+
resource "google_container_cluster" "cluster" {
4974+
provider = google.user-project-override
4975+
name = "%[1]s"
4976+
location = "%[6]s"
4977+
initial_node_count = 3
4978+
deletion_protection = false
4979+
network = "%[3]s"
4980+
subnetwork = "%[4]s"
4981+
}
4982+
4983+
resource "google_container_node_pool" "np" {
4984+
provider = google.user-project-override
4985+
name = "%[1]s"
4986+
location = "%[6]s"
4987+
cluster = google_container_cluster.cluster.name
4988+
initial_node_count = 2
4989+
4990+
node_config {
4991+
machine_type = "c3-standard-4"
4992+
disk_size_gb = 50
4993+
disk_type = "hyperdisk-balanced"
4994+
storage_pools = ["%[5]s"]
4995+
}
4996+
}
4997+
`, cluster, np, networkName, subnetworkName, storagePoolResourceName, location)
4998+
}

website/docs/r/container_cluster.html.markdown

+2
Original file line numberDiff line numberDiff line change
@@ -941,6 +941,8 @@ gvnic {
941941

942942
* `shielded_instance_config` - (Optional) Shielded Instance options. Structure is [documented below](#nested_shielded_instance_config).
943943

944+
* `storage_pools` - (Optional) The list of Storage Pools where boot disks are provisioned.
945+
944946
* `tags` - (Optional) The list of instance tags applied to all nodes. Tags are used to identify
945947
valid sources or targets for network firewalls.
946948

0 commit comments

Comments
 (0)