Skip to content

Commit eca88a2

Browse files
Add a logging_variant field to GKE node pools and to node pool defaults for GKE clusters. (#6744) (#13049)
This PR implements the feature request from [Add GKE logging variant field for increasing log agent throughput #12667](#12667). By adding a logging_variant field within the node_pool_defaults, GKE users will be able to select a cluster-wide default value for the logging agent of the node pools in a cluster. For example, by specifying ```terraform resource "google_container_cluster" "with_logging_variant_node_pool_default" { name = "example-cluster" location = "us-central1-f" initial_node_count = 1 node_pool_defaults { node_config_defaults { logging_variant = "MAX_THROUGHPUT" } } } ``` every newly created node pool in the cluster will have the max throughput logging agent unless this is explicitly overridden at the node pool level (see the [GKE docs](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#high_throughput_for_all_nodes_in_a_cluster) for more information). GKE users will also be able to select a logging variant at the node pool level. For example, by specifying ```terraform resource "google_container_cluster" "with_logging_variant_node_pool_default" { name = "example-cluster" location = "us-central1-f" initial_node_count = 1 node_pool_defaults { node_config_defaults { logging_variant = "DEFAULT" } } } resource "google_container_node_pool" "with_high_throughput_logging_variant" { name = "example-node-pool-0" cluster = google_container_cluster.with_logging_variant_node_pool_default.name } resource "google_container_node_pool" "with_high_throughput_logging_variant" { name = "example-node-pool-1" cluster = google_container_cluster.with_logging_variant_node_pool_default.name node_config { logging_variant = "MAX_THROUGHPUT" } } ``` example-node-pool-0 (as well as the default node pool) will have the default logging agent (see the [GKE docs](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#high_throughput_for_all_nodes_in_a_cluster) for more information), but example-node-pool-1 will have the max throughput agent. Signed-off-by: Modular Magician <[email protected]> Signed-off-by: Modular Magician <[email protected]>
1 parent 7c98257 commit eca88a2

7 files changed

+382
-3
lines changed

.changelog/6744.txt

+6
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
```release-note:enhancement
2+
container: Added `node_pool_defaults.node_config_defaults.logging_variant`, `node_pool.node_config.logging_variant`, and `node_config.logging_variant` to `google_container_cluster`.
3+
```
4+
```release-note:enhancement
5+
container: Added `node_config.logging_variant` to `google_container_node_pool`.
6+
```

google/node_config.go

+61
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,16 @@ var defaultOauthScopes = []string{
1616
"https://www.googleapis.com/auth/trace.append",
1717
}
1818

19+
func schemaLoggingVariant() *schema.Schema {
20+
return &schema.Schema{
21+
Type: schema.TypeString,
22+
Optional: true,
23+
Description: `Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.`,
24+
Default: "DEFAULT",
25+
ValidateFunc: validation.StringInSlice([]string{"DEFAULT", "MAX_THROUGHPUT"}, false),
26+
}
27+
}
28+
1929
func schemaGcfsConfig(forceNew bool) *schema.Schema {
2030
return &schema.Schema{
2131
Type: schema.TypeList,
@@ -148,6 +158,8 @@ func schemaNodeConfig() *schema.Schema {
148158
Description: `The number of local SSD disks to be attached to the node.`,
149159
},
150160

161+
"logging_variant": schemaLoggingVariant(),
162+
151163
"gcfs_config": schemaGcfsConfig(true),
152164

153165
"gvnic": {
@@ -369,6 +381,24 @@ func schemaNodeConfig() *schema.Schema {
369381
}
370382
}
371383

384+
func expandNodeConfigDefaults(configured interface{}) *container.NodeConfigDefaults {
385+
configs := configured.([]interface{})
386+
if len(configs) == 0 || configs[0] == nil {
387+
return nil
388+
}
389+
config := configs[0].(map[string]interface{})
390+
391+
nodeConfigDefaults := &container.NodeConfigDefaults{}
392+
if variant, ok := config["logging_variant"]; ok {
393+
nodeConfigDefaults.LoggingConfig = &container.NodePoolLoggingConfig{
394+
VariantConfig: &container.LoggingVariantConfig{
395+
Variant: variant.(string),
396+
},
397+
}
398+
}
399+
return nodeConfigDefaults
400+
}
401+
372402
func expandNodeConfig(v interface{}) *container.NodeConfig {
373403
nodeConfigs := v.([]interface{})
374404
nc := &container.NodeConfig{
@@ -424,6 +454,14 @@ func expandNodeConfig(v interface{}) *container.NodeConfig {
424454
nc.LocalSsdCount = int64(v.(int))
425455
}
426456

457+
if v, ok := nodeConfig["logging_variant"]; ok {
458+
nc.LoggingConfig = &container.NodePoolLoggingConfig{
459+
VariantConfig: &container.LoggingVariantConfig{
460+
Variant: v.(string),
461+
},
462+
}
463+
}
464+
427465
if v, ok := nodeConfig["gcfs_config"]; ok && len(v.([]interface{})) > 0 {
428466
conf := v.([]interface{})[0].(map[string]interface{})
429467
nc.GcfsConfig = &container.GcfsConfig{
@@ -565,6 +603,20 @@ func expandWorkloadMetadataConfig(v interface{}) *container.WorkloadMetadataConf
565603
return wmc
566604
}
567605

606+
func flattenNodeConfigDefaults(c *container.NodeConfigDefaults) []map[string]interface{} {
607+
result := make([]map[string]interface{}, 0, 1)
608+
609+
if c == nil {
610+
return result
611+
}
612+
613+
result = append(result, map[string]interface{}{})
614+
615+
result[0]["logging_variant"] = flattenLoggingVariant(c.LoggingConfig)
616+
617+
return result
618+
}
619+
568620
func flattenNodeConfig(c *container.NodeConfig) []map[string]interface{} {
569621
config := make([]map[string]interface{}, 0, 1)
570622

@@ -578,6 +630,7 @@ func flattenNodeConfig(c *container.NodeConfig) []map[string]interface{} {
578630
"disk_type": c.DiskType,
579631
"guest_accelerator": flattenContainerGuestAccelerators(c.Accelerators),
580632
"local_ssd_count": c.LocalSsdCount,
633+
"logging_variant": flattenLoggingVariant(c.LoggingConfig),
581634
"gcfs_config": flattenGcfsConfig(c.GcfsConfig),
582635
"gvnic": flattenGvnic(c.Gvnic),
583636
"reservation_affinity": flattenGKEReservationAffinity(c.ReservationAffinity),
@@ -635,6 +688,14 @@ func flattenShieldedInstanceConfig(c *container.ShieldedInstanceConfig) []map[st
635688
return result
636689
}
637690

691+
func flattenLoggingVariant(c *container.NodePoolLoggingConfig) string {
692+
variant := "DEFAULT"
693+
if c != nil && c.VariantConfig != nil && c.VariantConfig.Variant != "" {
694+
variant = c.VariantConfig.Variant
695+
}
696+
return variant
697+
}
698+
638699
func flattenGcfsConfig(c *container.GcfsConfig) []map[string]interface{} {
639700
result := []map[string]interface{}{}
640701
if c != nil {

google/resource_container_cluster.go

+86
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,33 @@ func clusterSchemaNodeConfig() *schema.Schema {
8989
return nodeConfigSch
9090
}
9191

92+
// Defines default nodel pool settings for the entire cluster. These settings are
93+
// overridden if specified on the specific NodePool object.
94+
func clusterSchemaNodePoolDefaults() *schema.Schema {
95+
return &schema.Schema{
96+
Type: schema.TypeList,
97+
Optional: true,
98+
Computed: true,
99+
Description: `The default nodel pool settings for the entire cluster.`,
100+
MaxItems: 1,
101+
Elem: &schema.Resource{
102+
Schema: map[string]*schema.Schema{
103+
"node_config_defaults": {
104+
Type: schema.TypeList,
105+
Optional: true,
106+
Description: `Subset of NodeConfig message that has defaults.`,
107+
MaxItems: 1,
108+
Elem: &schema.Resource{
109+
Schema: map[string]*schema.Schema{
110+
"logging_variant": schemaLoggingVariant(),
111+
},
112+
},
113+
},
114+
},
115+
},
116+
}
117+
}
118+
92119
func rfc5545RecurrenceDiffSuppress(k, o, n string, d *schema.ResourceData) bool {
93120
// This diff gets applied in the cloud console if you specify
94121
// "FREQ=DAILY" in your config and add a maintenance exclusion.
@@ -959,6 +986,8 @@ func resourceContainerCluster() *schema.Resource {
959986
ConflictsWith: []string{"enable_autopilot"},
960987
},
961988

989+
"node_pool_defaults": clusterSchemaNodePoolDefaults(),
990+
962991
"node_version": {
963992
Type: schema.TypeString,
964993
Optional: true,
@@ -1613,6 +1642,10 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
16131642
cluster.NodeConfig = expandNodeConfig([]interface{}{})
16141643
}
16151644

1645+
if v, ok := d.GetOk("node_pool_defaults"); ok {
1646+
cluster.NodePoolDefaults = expandNodePoolDefaults(v)
1647+
}
1648+
16161649
if v, ok := d.GetOk("node_config"); ok {
16171650
cluster.NodeConfig = expandNodeConfig(v)
16181651
}
@@ -2008,6 +2041,10 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro
20082041
return err
20092042
}
20102043

2044+
if err := d.Set("node_pool_defaults", flattenNodePoolDefaults(cluster.NodePoolDefaults)); err != nil {
2045+
return err
2046+
}
2047+
20112048
return nil
20122049
}
20132050

@@ -2909,6 +2946,29 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
29092946
log.Printf("[INFO] GKE cluster %s resource usage export config has been updated", d.Id())
29102947
}
29112948

2949+
if d.HasChange("node_pool_defaults") && d.HasChange("node_pool_defaults.0.node_config_defaults.0.logging_variant") {
2950+
if v, ok := d.GetOk("node_pool_defaults.0.node_config_defaults.0.logging_variant"); ok {
2951+
loggingVariant := v.(string)
2952+
req := &container.UpdateClusterRequest{
2953+
Update: &container.ClusterUpdate{
2954+
DesiredNodePoolLoggingConfig: &container.NodePoolLoggingConfig{
2955+
VariantConfig: &container.LoggingVariantConfig{
2956+
Variant: loggingVariant,
2957+
},
2958+
},
2959+
},
2960+
}
2961+
2962+
updateF := updateFunc(req, "updating GKE cluster desired node pool logging configuration defaults.")
2963+
// Call update serially.
2964+
if err := lockedCall(lockKey, updateF); err != nil {
2965+
return err
2966+
}
2967+
2968+
log.Printf("[INFO] GKE cluster %s node pool logging configuration defaults have been updated", d.Id())
2969+
}
2970+
}
2971+
29122972
d.Partial(false)
29132973

29142974
if _, err := containerClusterAwaitRestingState(config, project, location, clusterName, userAgent, d.Timeout(schema.TimeoutUpdate)); err != nil {
@@ -3749,6 +3809,32 @@ func expandContainerClusterAuthenticatorGroupsConfig(configured interface{}) *co
37493809
}
37503810
}
37513811

3812+
func expandNodePoolDefaults(configured interface{}) *container.NodePoolDefaults {
3813+
l, ok := configured.([]interface{})
3814+
if !ok || l == nil || len(l) == 0 || l[0] == nil {
3815+
return nil
3816+
}
3817+
nodePoolDefaults := &container.NodePoolDefaults{}
3818+
config := l[0].(map[string]interface{})
3819+
if v, ok := config["node_config_defaults"]; ok && len(v.([]interface{})) > 0 {
3820+
nodePoolDefaults.NodeConfigDefaults = expandNodeConfigDefaults(v)
3821+
}
3822+
return nodePoolDefaults
3823+
}
3824+
3825+
func flattenNodePoolDefaults(c *container.NodePoolDefaults) []map[string]interface{} {
3826+
if c == nil {
3827+
return nil
3828+
}
3829+
3830+
result := make(map[string]interface{})
3831+
if c.NodeConfigDefaults != nil {
3832+
result["node_config_defaults"] = flattenNodeConfigDefaults(c.NodeConfigDefaults)
3833+
}
3834+
3835+
return []map[string]interface{}{result}
3836+
}
3837+
37523838
func flattenNotificationConfig(c *container.NotificationConfig) []map[string]interface{} {
37533839
if c == nil {
37543840
return nil

google/resource_container_cluster_test.go

+124
Original file line numberDiff line numberDiff line change
@@ -923,6 +923,83 @@ func TestAccContainerCluster_withNodeConfig(t *testing.T) {
923923
})
924924
}
925925

926+
func TestAccContainerCluster_withLoggingVariantInNodeConfig(t *testing.T) {
927+
t.Parallel()
928+
clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10))
929+
vcrTest(t, resource.TestCase{
930+
PreCheck: func() { testAccPreCheck(t) },
931+
Providers: testAccProviders,
932+
CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
933+
Steps: []resource.TestStep{
934+
{
935+
Config: testAccContainerCluster_withLoggingVariantInNodeConfig(clusterName, "MAX_THROUGHPUT"),
936+
},
937+
{
938+
ResourceName: "google_container_cluster.with_logging_variant_in_node_config",
939+
ImportState: true,
940+
ImportStateVerify: true,
941+
},
942+
},
943+
})
944+
}
945+
946+
func TestAccContainerCluster_withLoggingVariantInNodePool(t *testing.T) {
947+
t.Parallel()
948+
clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10))
949+
nodePoolName := fmt.Sprintf("tf-test-nodepool-%s", randString(t, 10))
950+
vcrTest(t, resource.TestCase{
951+
PreCheck: func() { testAccPreCheck(t) },
952+
Providers: testAccProviders,
953+
CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
954+
Steps: []resource.TestStep{
955+
{
956+
Config: testAccContainerCluster_withLoggingVariantInNodePool(clusterName, nodePoolName, "MAX_THROUGHPUT"),
957+
},
958+
{
959+
ResourceName: "google_container_cluster.with_logging_variant_in_node_pool",
960+
ImportState: true,
961+
ImportStateVerify: true,
962+
},
963+
},
964+
})
965+
}
966+
967+
func TestAccContainerCluster_withLoggingVariantUpdates(t *testing.T) {
968+
t.Parallel()
969+
clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10))
970+
vcrTest(t, resource.TestCase{
971+
PreCheck: func() { testAccPreCheck(t) },
972+
Providers: testAccProviders,
973+
CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
974+
Steps: []resource.TestStep{
975+
{
976+
Config: testAccContainerCluster_withLoggingVariantNodePoolDefault(clusterName, "DEFAULT"),
977+
},
978+
{
979+
ResourceName: "google_container_cluster.with_logging_variant_node_pool_default",
980+
ImportState: true,
981+
ImportStateVerify: true,
982+
},
983+
{
984+
Config: testAccContainerCluster_withLoggingVariantNodePoolDefault(clusterName, "MAX_THROUGHPUT"),
985+
},
986+
{
987+
ResourceName: "google_container_cluster.with_logging_variant_node_pool_default",
988+
ImportState: true,
989+
ImportStateVerify: true,
990+
},
991+
{
992+
Config: testAccContainerCluster_withLoggingVariantNodePoolDefault(clusterName, "DEFAULT"),
993+
},
994+
{
995+
ResourceName: "google_container_cluster.with_logging_variant_node_pool_default",
996+
ImportState: true,
997+
ImportStateVerify: true,
998+
},
999+
},
1000+
})
1001+
}
1002+
9261003
func TestAccContainerCluster_withNodeConfigScopeAlias(t *testing.T) {
9271004
t.Parallel()
9281005

@@ -3572,6 +3649,53 @@ resource "google_container_cluster" "with_node_config" {
35723649
`, clusterName)
35733650
}
35743651

3652+
func testAccContainerCluster_withLoggingVariantInNodeConfig(clusterName, loggingVariant string) string {
3653+
return fmt.Sprintf(`
3654+
resource "google_container_cluster" "with_logging_variant_in_node_config" {
3655+
name = "%s"
3656+
location = "us-central1-f"
3657+
initial_node_count = 1
3658+
3659+
node_config {
3660+
logging_variant = "%s"
3661+
}
3662+
}
3663+
`, clusterName, loggingVariant)
3664+
}
3665+
3666+
func testAccContainerCluster_withLoggingVariantInNodePool(clusterName, nodePoolName, loggingVariant string) string {
3667+
return fmt.Sprintf(`
3668+
resource "google_container_cluster" "with_logging_variant_in_node_pool" {
3669+
name = "%s"
3670+
location = "us-central1-f"
3671+
3672+
node_pool {
3673+
name = "%s"
3674+
initial_node_count = 1
3675+
node_config {
3676+
logging_variant = "%s"
3677+
}
3678+
}
3679+
}
3680+
`, clusterName, nodePoolName, loggingVariant)
3681+
}
3682+
3683+
func testAccContainerCluster_withLoggingVariantNodePoolDefault(clusterName, loggingVariant string) string {
3684+
return fmt.Sprintf(`
3685+
resource "google_container_cluster" "with_logging_variant_node_pool_default" {
3686+
name = "%s"
3687+
location = "us-central1-f"
3688+
initial_node_count = 1
3689+
3690+
node_pool_defaults {
3691+
node_config_defaults {
3692+
logging_variant = "%s"
3693+
}
3694+
}
3695+
}
3696+
`, clusterName, loggingVariant)
3697+
}
3698+
35753699
func testAccContainerCluster_withNodeConfigUpdate(clusterName string) string {
35763700
return fmt.Sprintf(`
35773701
resource "google_container_cluster" "with_node_config" {

0 commit comments

Comments
 (0)