@@ -274,22 +274,22 @@ locals {
274
274
# resources where "ForceNew" is "true". schemaNodeConfig can be found in node_config.go at
275
275
# https://github.com/terraform-providers/terraform-provider-google/blob/master/google/node_config.go#L22
276
276
resource "random_id" "name" {
277
- count = length(var .node_pools)
277
+ for_each = local .node_pools
278
278
byte_length = 2
279
- prefix = format("%s-", lookup(var.node_pools[count.index] , "name"))
279
+ prefix = format("%s-", lookup(each.value , "name"))
280
280
keepers = merge(
281
281
zipmap(
282
282
local.force_node_pool_recreation_resources,
283
- [for keeper in local.force_node_pool_recreation_resources : lookup(var.node_pools[count.index] , keeper, "")]
283
+ [for keeper in local.force_node_pool_recreation_resources : lookup(each.value , keeper, "")]
284
284
),
285
285
{
286
286
labels = join(",",
287
287
sort(
288
288
concat(
289
289
keys(local.node_pools_labels["all"]),
290
290
values(local.node_pools_labels["all"]),
291
- keys(local.node_pools_labels[var.node_pools[count.index] ["name"]]),
292
- values(local.node_pools_labels[var.node_pools[count.index] ["name"]])
291
+ keys(local.node_pools_labels[each.value ["name"]]),
292
+ values(local.node_pools_labels[each.value ["name"]])
293
293
)
294
294
)
295
295
)
@@ -300,8 +300,8 @@ resource "random_id" "name" {
300
300
concat(
301
301
keys(local.node_pools_metadata["all"]),
302
302
values(local.node_pools_metadata["all"]),
303
- keys(local.node_pools_metadata[var.node_pools[count.index] ["name"]]),
304
- values(local.node_pools_metadata[var.node_pools[count.index] ["name"]])
303
+ keys(local.node_pools_metadata[each.value ["name"]]),
304
+ values(local.node_pools_metadata[each.value ["name"]])
305
305
)
306
306
)
307
307
)
@@ -311,7 +311,7 @@ resource "random_id" "name" {
311
311
sort(
312
312
concat(
313
313
local.node_pools_oauth_scopes["all"],
314
- local.node_pools_oauth_scopes[var.node_pools[count.index] ["name"]]
314
+ local.node_pools_oauth_scopes[each.value ["name"]]
315
315
)
316
316
)
317
317
)
@@ -321,7 +321,7 @@ resource "random_id" "name" {
321
321
sort(
322
322
concat(
323
323
local.node_pools_tags["all"],
324
- local.node_pools_tags[var.node_pools[count.index] ["name"]]
324
+ local.node_pools_tags[each.value ["name"]]
325
325
)
326
326
)
327
327
)
@@ -336,66 +336,66 @@ resource "google_container_node_pool" "pools" {
336
336
{% else %}
337
337
provider = google
338
338
{% endif %}
339
- count = length(var .node_pools)
339
+ for_each = local .node_pools
340
340
{% if update_variant %}
341
- name = random_id.name.*.hex[count.index ]
341
+ name = random_id.name.*.hex[each.key ]
342
342
{% else %}
343
- name = var.node_pools[count.index]["name"]
343
+ name = each.key
344
344
{% endif %}
345
345
project = var.project_id
346
346
location = local.location
347
347
{% if beta_cluster %}
348
348
// use node_locations if provided, defaults to cluster level node_locations if not specified
349
- node_locations = lookup(var.node_pools[count.index] , "node_locations", "") != "" ? split(",", var.node_pools[count.index] ["node_locations"]) : null
349
+ node_locations = lookup(each.value , "node_locations", "") != "" ? split(",", each.value ["node_locations"]) : null
350
350
{% endif %}
351
351
352
352
cluster = google_container_cluster.primary.name
353
353
354
- version = lookup(var.node_pools[count.index] , "auto_upgrade", false) ? "" : lookup(
355
- var.node_pools[count.index] ,
354
+ version = lookup(each.value , "auto_upgrade", false) ? "" : lookup(
355
+ each.value ,
356
356
"version",
357
357
local.node_version,
358
358
)
359
359
360
- initial_node_count = lookup(var.node_pools[count.index] , "autoscaling", true) ? lookup(
361
- var.node_pools[count.index] ,
360
+ initial_node_count = lookup(each.value , "autoscaling", true) ? lookup(
361
+ each.value ,
362
362
"initial_node_count",
363
- lookup(var.node_pools[count.index] , "min_count", 1)
363
+ lookup(each.value , "min_count", 1)
364
364
) : null
365
365
366
366
{% if beta_cluster %}
367
- max_pods_per_node = lookup(var.node_pools[count.index] , "max_pods_per_node", null)
367
+ max_pods_per_node = lookup(each.value , "max_pods_per_node", null)
368
368
{% endif %}
369
369
370
- node_count = lookup(var.node_pools[count.index] , "autoscaling", true) ? null : lookup(var.node_pools[count.index] , "node_count", 1)
370
+ node_count = lookup(each.value , "autoscaling", true) ? null : lookup(each.value , "node_count", 1)
371
371
372
372
dynamic "autoscaling" {
373
- for_each = lookup(var.node_pools[count.index] , "autoscaling", true) ? [var.node_pools[count.index] ] : []
373
+ for_each = lookup(each.value , "autoscaling", true) ? [each.value ] : []
374
374
content {
375
375
min_node_count = lookup(autoscaling.value, "min_count", 1)
376
376
max_node_count = lookup(autoscaling.value, "max_count", 100)
377
377
}
378
378
}
379
379
380
380
management {
381
- auto_repair = lookup(var.node_pools[count.index] , "auto_repair", true)
382
- auto_upgrade = lookup(var.node_pools[count.index] , "auto_upgrade", local.default_auto_upgrade)
381
+ auto_repair = lookup(each.value , "auto_repair", true)
382
+ auto_upgrade = lookup(each.value , "auto_upgrade", local.default_auto_upgrade)
383
383
}
384
384
385
385
node_config {
386
- image_type = lookup(var.node_pools[count.index] , "image_type", "COS")
387
- machine_type = lookup(var.node_pools[count.index] , "machine_type", "n1-standard-2")
386
+ image_type = lookup(each.value , "image_type", "COS")
387
+ machine_type = lookup(each.value , "machine_type", "n1-standard-2")
388
388
labels = merge(
389
389
lookup(lookup(local.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {},
390
- lookup(lookup(local.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index] ["name"] } : {},
390
+ lookup(lookup(local.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = each.value ["name"] } : {},
391
391
local.node_pools_labels["all"],
392
- local.node_pools_labels[var.node_pools[count.index] ["name"]],
392
+ local.node_pools_labels[each.value ["name"]],
393
393
)
394
394
metadata = merge(
395
395
lookup(lookup(local.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {},
396
- lookup(lookup(local.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = var.node_pools[count.index] ["name"] } : {},
396
+ lookup(lookup(local.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = each.value ["name"] } : {},
397
397
local.node_pools_metadata["all"],
398
- local.node_pools_metadata[var.node_pools[count.index] ["name"]],
398
+ local.node_pools_metadata[each.value ["name"]],
399
399
{
400
400
"disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints
401
401
},
@@ -404,7 +404,7 @@ resource "google_container_node_pool" "pools" {
404
404
dynamic "taint" {
405
405
for_each = concat(
406
406
local.node_pools_taints["all"],
407
- local.node_pools_taints[var.node_pools[count.index] ["name"]],
407
+ local.node_pools_taints[each.value ["name"]],
408
408
)
409
409
content {
410
410
effect = taint.value.effect
@@ -415,31 +415,31 @@ resource "google_container_node_pool" "pools" {
415
415
{% endif %}
416
416
tags = concat(
417
417
lookup(local.node_pools_tags, "default_values", [true, true])[0] ? ["gke-${var.name}"] : [],
418
- lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${var.node_pools[count.index] ["name"]}"] : [],
418
+ lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["gke-${var.name}-${each.value ["name"]}"] : [],
419
419
local.node_pools_tags["all"],
420
- local.node_pools_tags[var.node_pools[count.index] ["name"]],
420
+ local.node_pools_tags[each.value ["name"]],
421
421
)
422
422
423
- local_ssd_count = lookup(var.node_pools[count.index] , "local_ssd_count", 0)
424
- disk_size_gb = lookup(var.node_pools[count.index] , "disk_size_gb", 100)
425
- disk_type = lookup(var.node_pools[count.index] , "disk_type", "pd-standard")
423
+ local_ssd_count = lookup(each.value , "local_ssd_count", 0)
424
+ disk_size_gb = lookup(each.value , "disk_size_gb", 100)
425
+ disk_type = lookup(each.value , "disk_type", "pd-standard")
426
426
427
427
service_account = lookup(
428
- var.node_pools[count.index] ,
428
+ each.value ,
429
429
"service_account",
430
430
local.service_account,
431
431
)
432
- preemptible = lookup(var.node_pools[count.index] , "preemptible", false)
432
+ preemptible = lookup(each.value , "preemptible", false)
433
433
434
434
oauth_scopes = concat(
435
435
local.node_pools_oauth_scopes["all"],
436
- local.node_pools_oauth_scopes[var.node_pools[count.index] ["name"]],
436
+ local.node_pools_oauth_scopes[each.value ["name"]],
437
437
)
438
438
439
439
guest_accelerator = [
440
- for guest_accelerator in lookup(var.node_pools[count.index] , "accelerator_count", 0) > 0 ? [{
441
- type = lookup(var.node_pools[count.index] , "accelerator_type", "")
442
- count = lookup(var.node_pools[count.index] , "accelerator_count", 0)
440
+ for guest_accelerator in lookup(each.value , "accelerator_count", 0) > 0 ? [{
441
+ type = lookup(each.value , "accelerator_type", "")
442
+ count = lookup(each.value , "accelerator_count", 0)
443
443
}] : [] : {
444
444
type = guest_accelerator["type"]
445
445
count = guest_accelerator["count"]
@@ -451,7 +451,7 @@ resource "google_container_node_pool" "pools" {
451
451
for_each = local.cluster_node_metadata_config
452
452
453
453
content {
454
- node_metadata = lookup(var.node_pools[count.index] , "node_metadata", workload_metadata_config.value.node_metadata)
454
+ node_metadata = lookup(each.value , "node_metadata", workload_metadata_config.value.node_metadata)
455
455
}
456
456
}
457
457
0 commit comments