Skip to content

Commit 0304a20

Browse files
authored
fix: use dynamic block for accelerators, updates for CI (#1428)
1 parent c81c2fe commit 0304a20

File tree

22 files changed

+80
-185
lines changed

22 files changed

+80
-185
lines changed

autogen/main/cluster.tf.tmpl

+5-8
Original file line numberDiff line numberDiff line change
@@ -722,17 +722,14 @@ resource "google_container_node_pool" "windows_pools" {
722722
local.node_pools_oauth_scopes[each.value["name"]],
723723
)
724724

725-
guest_accelerator = [
726-
for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{
725+
dynamic "guest_accelerator" {
726+
for_each = lookup(each.value, "accelerator_count", 0) > 0 ? [1] : []
727+
content {
727728
type = lookup(each.value, "accelerator_type", "")
728729
count = lookup(each.value, "accelerator_count", 0)
729-
gpu_partition_size = lookup(each.value, "gpu_partition_size", null)
730-
}] : [] : {
731-
type = guest_accelerator["type"]
732-
count = guest_accelerator["count"]
733-
gpu_partition_size = guest_accelerator["gpu_partition_size"]
730+
gpu_partition_size = lookup(each.value, "gpu_partition_size", null)
734731
}
735-
]
732+
}
736733

737734
dynamic "workload_metadata_config" {
738735
for_each = local.cluster_node_metadata_config

build/int.cloudbuild.yaml

+16-15
Original file line numberDiff line numberDiff line change
@@ -131,21 +131,22 @@ steps:
131131
- verify simple-regional-with-networking-local
132132
name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS'
133133
args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-regional-with-networking-local']
134-
- id: converge simple-zonal-local
135-
waitFor:
136-
- create all
137-
name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS'
138-
args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge simple-zonal-local']
139-
- id: verify simple-zonal-local
140-
waitFor:
141-
- converge simple-zonal-local
142-
name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS'
143-
args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify simple-zonal-local']
144-
- id: destroy simple-zonal-local
145-
waitFor:
146-
- verify simple-zonal-local
147-
name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS'
148-
args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-zonal-local']
134+
# TODO(bharathkkb): https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/1431
135+
# - id: converge simple-zonal-local
136+
# waitFor:
137+
# - create all
138+
# name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS'
139+
# args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge simple-zonal-local']
140+
# - id: verify simple-zonal-local
141+
# waitFor:
142+
# - converge simple-zonal-local
143+
# name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS'
144+
# args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify simple-zonal-local']
145+
# - id: destroy simple-zonal-local
146+
# waitFor:
147+
# - verify simple-zonal-local
148+
# name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS'
149+
# args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-zonal-local']
149150
- id: converge simple-zonal-private-local
150151
waitFor:
151152
- create all

cluster.tf

+8-14
Original file line numberDiff line numberDiff line change
@@ -407,17 +407,14 @@ resource "google_container_node_pool" "pools" {
407407
local.node_pools_oauth_scopes[each.value["name"]],
408408
)
409409

410-
guest_accelerator = [
411-
for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{
410+
dynamic "guest_accelerator" {
411+
for_each = lookup(each.value, "accelerator_count", 0) > 0 ? [1] : []
412+
content {
412413
type = lookup(each.value, "accelerator_type", "")
413414
count = lookup(each.value, "accelerator_count", 0)
414415
gpu_partition_size = lookup(each.value, "gpu_partition_size", null)
415-
}] : [] : {
416-
type = guest_accelerator["type"]
417-
count = guest_accelerator["count"]
418-
gpu_partition_size = guest_accelerator["gpu_partition_size"]
419416
}
420-
]
417+
}
421418

422419
dynamic "workload_metadata_config" {
423420
for_each = local.cluster_node_metadata_config
@@ -560,17 +557,14 @@ resource "google_container_node_pool" "windows_pools" {
560557
local.node_pools_oauth_scopes[each.value["name"]],
561558
)
562559

563-
guest_accelerator = [
564-
for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{
560+
dynamic "guest_accelerator" {
561+
for_each = lookup(each.value, "accelerator_count", 0) > 0 ? [1] : []
562+
content {
565563
type = lookup(each.value, "accelerator_type", "")
566564
count = lookup(each.value, "accelerator_count", 0)
567565
gpu_partition_size = lookup(each.value, "gpu_partition_size", null)
568-
}] : [] : {
569-
type = guest_accelerator["type"]
570-
count = guest_accelerator["count"]
571-
gpu_partition_size = guest_accelerator["gpu_partition_size"]
572566
}
573-
]
567+
}
574568

575569
dynamic "workload_metadata_config" {
576570
for_each = local.cluster_node_metadata_config

examples/node_pool/data/shutdown-script.sh

-18
This file was deleted.

examples/node_pool/main.tf

+2-2
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ module "gke" {
3737
ip_range_pods = var.ip_range_pods
3838
ip_range_services = var.ip_range_services
3939
create_service_account = false
40-
remove_default_node_pool = true
40+
remove_default_node_pool = false
4141
disable_legacy_metadata_endpoints = false
4242
cluster_autoscaling = var.cluster_autoscaling
4343

@@ -81,7 +81,7 @@ module "gke" {
8181

8282
node_pools_metadata = {
8383
pool-01 = {
84-
shutdown-script = file("${path.module}/data/shutdown-script.sh")
84+
shutdown-script = "kubectl --kubeconfig=/var/lib/kubelet/kubeconfig drain --force=true --ignore-daemonsets=true --delete-local-data \"$HOSTNAME\""
8585
}
8686
}
8787

examples/safer_cluster/main.tf

-3
Original file line numberDiff line numberDiff line change
@@ -73,9 +73,6 @@ module "gke" {
7373
},
7474
]
7575

76-
istio = true
77-
cloudrun = true
78-
7976
notification_config_topic = google_pubsub_topic.updates.id
8077
}
8178

examples/simple_regional_beta/README.md

-2
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@ This example illustrates how to create a simple cluster with beta features.
77

88
| Name | Description | Type | Default | Required |
99
|------|-------------|------|---------|:--------:|
10-
| cloudrun | Boolean to enable / disable CloudRun | `bool` | `true` | no |
1110
| cluster\_name\_suffix | A suffix to append to the default cluster name | `string` | `""` | no |
1211
| compute\_engine\_service\_account | Service account to associate to the nodes in the cluster | `any` | n/a | yes |
1312
| database\_encryption | Application-layer Secrets Encryption settings. The object format is {state = string, key\_name = string}. Valid values of state are: "ENCRYPTED"; "DECRYPTED". key\_name is the name of a CloudKMS key. | `list(object({ state = string, key_name = string }))` | <pre>[<br> {<br> "key_name": "",<br> "state": "DECRYPTED"<br> }<br>]</pre> | no |
@@ -18,7 +17,6 @@ This example illustrates how to create a simple cluster with beta features.
1817
| gce\_pd\_csi\_driver | (Beta) Whether this cluster should enable the Google Compute Engine Persistent Disk Container Storage Interface (CSI) Driver. | `bool` | `false` | no |
1918
| ip\_range\_pods | The secondary ip range to use for pods | `any` | n/a | yes |
2019
| ip\_range\_services | The secondary ip range to use for services | `any` | n/a | yes |
21-
| istio | Boolean to enable / disable Istio | `bool` | `true` | no |
2220
| network | The VPC network to host the cluster in | `any` | n/a | yes |
2321
| node\_pools | List of maps containing node pools | `list(map(string))` | <pre>[<br> {<br> "name": "default-node-pool"<br> }<br>]</pre> | no |
2422
| project\_id | The project ID to host the cluster in | `any` | n/a | yes |

examples/simple_regional_beta/main.tf

-2
Original file line numberDiff line numberDiff line change
@@ -39,8 +39,6 @@ module "gke" {
3939
ip_range_services = var.ip_range_services
4040
create_service_account = var.compute_engine_service_account == "create"
4141
service_account = var.compute_engine_service_account
42-
istio = var.istio
43-
cloudrun = var.cloudrun
4442
dns_cache = var.dns_cache
4543
gce_pd_csi_driver = var.gce_pd_csi_driver
4644
sandbox_enabled = var.sandbox_enabled

examples/simple_regional_beta/variables.tf

-10
Original file line numberDiff line numberDiff line change
@@ -47,16 +47,6 @@ variable "compute_engine_service_account" {
4747
description = "Service account to associate to the nodes in the cluster"
4848
}
4949

50-
variable "istio" {
51-
description = "Boolean to enable / disable Istio"
52-
default = true
53-
}
54-
55-
variable "cloudrun" {
56-
description = "Boolean to enable / disable CloudRun"
57-
default = true
58-
}
59-
6050
variable "dns_cache" {
6151
type = bool
6252
description = "(Beta) The status of the NodeLocal DNSCache addon."

examples/simple_regional_private_beta/main.tf

-2
Original file line numberDiff line numberDiff line change
@@ -56,8 +56,6 @@ module "gke" {
5656

5757
enable_confidential_nodes = true
5858

59-
istio = var.istio
60-
cloudrun = var.cloudrun
6159
dns_cache = var.dns_cache
6260
gce_pd_csi_driver = var.gce_pd_csi_driver
6361
}

examples/simple_regional_private_beta/variables.tf

-10
Original file line numberDiff line numberDiff line change
@@ -47,16 +47,6 @@ variable "compute_engine_service_account" {
4747
description = "Service account to associate to the nodes in the cluster"
4848
}
4949

50-
variable "istio" {
51-
description = "Boolean to enable / disable Istio"
52-
default = true
53-
}
54-
55-
variable "cloudrun" {
56-
description = "Boolean to enable / disable CloudRun"
57-
default = true
58-
}
59-
6050
variable "dns_cache" {
6151
description = "Boolean to enable / disable NodeLocal DNSCache "
6252
default = false

modules/beta-private-cluster-update-variant/cluster.tf

+8-14
Original file line numberDiff line numberDiff line change
@@ -638,17 +638,14 @@ resource "google_container_node_pool" "pools" {
638638
local.node_pools_oauth_scopes[each.value["name"]],
639639
)
640640

641-
guest_accelerator = [
642-
for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{
641+
dynamic "guest_accelerator" {
642+
for_each = lookup(each.value, "accelerator_count", 0) > 0 ? [1] : []
643+
content {
643644
type = lookup(each.value, "accelerator_type", "")
644645
count = lookup(each.value, "accelerator_count", 0)
645646
gpu_partition_size = lookup(each.value, "gpu_partition_size", null)
646-
}] : [] : {
647-
type = guest_accelerator["type"]
648-
count = guest_accelerator["count"]
649-
gpu_partition_size = guest_accelerator["gpu_partition_size"]
650647
}
651-
]
648+
}
652649

653650
dynamic "workload_metadata_config" {
654651
for_each = local.cluster_node_metadata_config
@@ -846,17 +843,14 @@ resource "google_container_node_pool" "windows_pools" {
846843
local.node_pools_oauth_scopes[each.value["name"]],
847844
)
848845

849-
guest_accelerator = [
850-
for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{
846+
dynamic "guest_accelerator" {
847+
for_each = lookup(each.value, "accelerator_count", 0) > 0 ? [1] : []
848+
content {
851849
type = lookup(each.value, "accelerator_type", "")
852850
count = lookup(each.value, "accelerator_count", 0)
853851
gpu_partition_size = lookup(each.value, "gpu_partition_size", null)
854-
}] : [] : {
855-
type = guest_accelerator["type"]
856-
count = guest_accelerator["count"]
857-
gpu_partition_size = guest_accelerator["gpu_partition_size"]
858852
}
859-
]
853+
}
860854

861855
dynamic "workload_metadata_config" {
862856
for_each = local.cluster_node_metadata_config

modules/beta-private-cluster/cluster.tf

+8-14
Original file line numberDiff line numberDiff line change
@@ -547,17 +547,14 @@ resource "google_container_node_pool" "pools" {
547547
local.node_pools_oauth_scopes[each.value["name"]],
548548
)
549549

550-
guest_accelerator = [
551-
for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{
550+
dynamic "guest_accelerator" {
551+
for_each = lookup(each.value, "accelerator_count", 0) > 0 ? [1] : []
552+
content {
552553
type = lookup(each.value, "accelerator_type", "")
553554
count = lookup(each.value, "accelerator_count", 0)
554555
gpu_partition_size = lookup(each.value, "gpu_partition_size", null)
555-
}] : [] : {
556-
type = guest_accelerator["type"]
557-
count = guest_accelerator["count"]
558-
gpu_partition_size = guest_accelerator["gpu_partition_size"]
559556
}
560-
]
557+
}
561558

562559
dynamic "workload_metadata_config" {
563560
for_each = local.cluster_node_metadata_config
@@ -754,17 +751,14 @@ resource "google_container_node_pool" "windows_pools" {
754751
local.node_pools_oauth_scopes[each.value["name"]],
755752
)
756753

757-
guest_accelerator = [
758-
for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{
754+
dynamic "guest_accelerator" {
755+
for_each = lookup(each.value, "accelerator_count", 0) > 0 ? [1] : []
756+
content {
759757
type = lookup(each.value, "accelerator_type", "")
760758
count = lookup(each.value, "accelerator_count", 0)
761759
gpu_partition_size = lookup(each.value, "gpu_partition_size", null)
762-
}] : [] : {
763-
type = guest_accelerator["type"]
764-
count = guest_accelerator["count"]
765-
gpu_partition_size = guest_accelerator["gpu_partition_size"]
766760
}
767-
]
761+
}
768762

769763
dynamic "workload_metadata_config" {
770764
for_each = local.cluster_node_metadata_config

modules/beta-public-cluster-update-variant/cluster.tf

+8-14
Original file line numberDiff line numberDiff line change
@@ -619,17 +619,14 @@ resource "google_container_node_pool" "pools" {
619619
local.node_pools_oauth_scopes[each.value["name"]],
620620
)
621621

622-
guest_accelerator = [
623-
for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{
622+
dynamic "guest_accelerator" {
623+
for_each = lookup(each.value, "accelerator_count", 0) > 0 ? [1] : []
624+
content {
624625
type = lookup(each.value, "accelerator_type", "")
625626
count = lookup(each.value, "accelerator_count", 0)
626627
gpu_partition_size = lookup(each.value, "gpu_partition_size", null)
627-
}] : [] : {
628-
type = guest_accelerator["type"]
629-
count = guest_accelerator["count"]
630-
gpu_partition_size = guest_accelerator["gpu_partition_size"]
631628
}
632-
]
629+
}
633630

634631
dynamic "workload_metadata_config" {
635632
for_each = local.cluster_node_metadata_config
@@ -827,17 +824,14 @@ resource "google_container_node_pool" "windows_pools" {
827824
local.node_pools_oauth_scopes[each.value["name"]],
828825
)
829826

830-
guest_accelerator = [
831-
for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{
827+
dynamic "guest_accelerator" {
828+
for_each = lookup(each.value, "accelerator_count", 0) > 0 ? [1] : []
829+
content {
832830
type = lookup(each.value, "accelerator_type", "")
833831
count = lookup(each.value, "accelerator_count", 0)
834832
gpu_partition_size = lookup(each.value, "gpu_partition_size", null)
835-
}] : [] : {
836-
type = guest_accelerator["type"]
837-
count = guest_accelerator["count"]
838-
gpu_partition_size = guest_accelerator["gpu_partition_size"]
839833
}
840-
]
834+
}
841835

842836
dynamic "workload_metadata_config" {
843837
for_each = local.cluster_node_metadata_config

modules/beta-public-cluster/cluster.tf

+8-14
Original file line numberDiff line numberDiff line change
@@ -528,17 +528,14 @@ resource "google_container_node_pool" "pools" {
528528
local.node_pools_oauth_scopes[each.value["name"]],
529529
)
530530

531-
guest_accelerator = [
532-
for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{
531+
dynamic "guest_accelerator" {
532+
for_each = lookup(each.value, "accelerator_count", 0) > 0 ? [1] : []
533+
content {
533534
type = lookup(each.value, "accelerator_type", "")
534535
count = lookup(each.value, "accelerator_count", 0)
535536
gpu_partition_size = lookup(each.value, "gpu_partition_size", null)
536-
}] : [] : {
537-
type = guest_accelerator["type"]
538-
count = guest_accelerator["count"]
539-
gpu_partition_size = guest_accelerator["gpu_partition_size"]
540537
}
541-
]
538+
}
542539

543540
dynamic "workload_metadata_config" {
544541
for_each = local.cluster_node_metadata_config
@@ -735,17 +732,14 @@ resource "google_container_node_pool" "windows_pools" {
735732
local.node_pools_oauth_scopes[each.value["name"]],
736733
)
737734

738-
guest_accelerator = [
739-
for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{
735+
dynamic "guest_accelerator" {
736+
for_each = lookup(each.value, "accelerator_count", 0) > 0 ? [1] : []
737+
content {
740738
type = lookup(each.value, "accelerator_type", "")
741739
count = lookup(each.value, "accelerator_count", 0)
742740
gpu_partition_size = lookup(each.value, "gpu_partition_size", null)
743-
}] : [] : {
744-
type = guest_accelerator["type"]
745-
count = guest_accelerator["count"]
746-
gpu_partition_size = guest_accelerator["gpu_partition_size"]
747741
}
748-
]
742+
}
749743

750744
dynamic "workload_metadata_config" {
751745
for_each = local.cluster_node_metadata_config

0 commit comments

Comments
 (0)