Skip to content

Add instanceFlexibilityPolicy to google_compute_region_instance_group_manager #8581

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .changelog/12163.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:enhancement
compute: added new field `instance_flexibility_policy` to resource `google_compute_region_instance_group_manager`
```
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,45 @@ func ResourceComputeRegionInstanceGroupManager() *schema.Resource {
},
},
},
"instance_flexibility_policy": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Description: `The flexibility policy for this managed instance group. Instance flexibility allowing MIG to create VMs from multiple types of machines. Instance flexibility configuration on MIG overrides instance template configuration.`,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"instance_selections": {
Type: schema.TypeSet,
Optional: true,
Description: `Named instance selections configuring properties that the group will use when creating new VMs.`,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
Description: `Instance selection name.`,
},

"rank": {
Type: schema.TypeInt,
Optional: true,
Description: `Preference of this instance selection. Lower number means higher preference. MIG will first try to create a VM based on the machine-type with lowest rank and fallback to next rank based on availability. Machine types and instance selections with the same rank have the same preference.`,
},

"machine_types": {
Type: schema.TypeSet,
Required: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: `Full machine-type names, e.g. "n1-standard-16"`,
},
},
},
},
},
},
},

"name": {
Type: schema.TypeString,
Expand Down Expand Up @@ -277,7 +316,6 @@ func ResourceComputeRegionInstanceGroupManager() *schema.Resource {
},
},
},

"standby_policy": {
Type: schema.TypeList,
Computed: true,
Expand Down Expand Up @@ -379,7 +417,6 @@ func ResourceComputeRegionInstanceGroupManager() *schema.Resource {
ValidateFunc: validation.IntBetween(0, 100),
Description: `Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.`,
},

"min_ready_sec": {
Type: schema.TypeInt,
Optional: true,
Expand Down Expand Up @@ -620,6 +657,7 @@ func resourceComputeRegionInstanceGroupManagerCreate(d *schema.ResourceData, met
StandbyPolicy: expandStandbyPolicy(d),
TargetSuspendedSize: int64(d.Get("target_suspended_size").(int)),
TargetStoppedSize: int64(d.Get("target_stopped_size").(int)),
InstanceFlexibilityPolicy: expandInstanceFlexibilityPolicy(d),
UpdatePolicy: expandRegionUpdatePolicy(d.Get("update_policy").([]interface{})),
InstanceLifecyclePolicy: expandInstanceLifecyclePolicy(d.Get("instance_lifecycle_policy").([]interface{})),
AllInstancesConfig: expandAllInstancesConfig(nil, d.Get("all_instances_config").([]interface{})),
Expand Down Expand Up @@ -819,6 +857,9 @@ func resourceComputeRegionInstanceGroupManagerRead(d *schema.ResourceData, meta
if err := d.Set("target_stopped_size", manager.TargetStoppedSize); err != nil {
return fmt.Errorf("Error setting target_stopped_size: %s", err)
}
if err := d.Set("instance_flexibility_policy", flattenInstanceFlexibilityPolicy(manager.InstanceFlexibilityPolicy)); err != nil {
return err
}
if err := d.Set("update_policy", flattenRegionUpdatePolicy(manager.UpdatePolicy)); err != nil {
return fmt.Errorf("Error setting update_policy in state: %s", err.Error())
}
Expand Down Expand Up @@ -891,12 +932,23 @@ func resourceComputeRegionInstanceGroupManagerUpdate(d *schema.ResourceData, met
updatedManager.Versions = expandVersions(d.Get("version").([]interface{}))
change = true
}
var targetSizePatchUpdate bool
if d.HasChange("instance_flexibility_policy") {
updatedManager.InstanceFlexibilityPolicy = expandInstanceFlexibilityPolicy(d)
change = true

// target size update should be done by patch instead of using resize
if d.HasChange("target_size") {
updatedManager.TargetSize = int64(d.Get("target_size").(int))
updatedManager.ForceSendFields = append(updatedManager.ForceSendFields, "TargetSize")
targetSizePatchUpdate = true
}
}

if d.HasChange("distribution_policy_target_shape") {
updatedManager.DistributionPolicy = expandDistributionPolicyForUpdate(d)
change = true
}

if d.HasChange("standby_policy") {
updatedManager.StandbyPolicy = expandStandbyPolicy(d)
change = true
Expand Down Expand Up @@ -979,7 +1031,7 @@ func resourceComputeRegionInstanceGroupManagerUpdate(d *schema.ResourceData, met
}

// target size should use resize
if d.HasChange("target_size") {
if d.HasChange("target_size") && !targetSizePatchUpdate {
d.Partial(true)
targetSize := int64(d.Get("target_size").(int))
op, err := config.NewComputeClient(userAgent).RegionInstanceGroupManagers.Resize(
Expand Down Expand Up @@ -1124,6 +1176,39 @@ func flattenRegionUpdatePolicy(updatePolicy *compute.InstanceGroupManagerUpdateP
}
return results
}
func expandInstanceFlexibilityPolicy(d *schema.ResourceData) *compute.InstanceGroupManagerInstanceFlexibilityPolicy {
instanceFlexibilityPolicy := &compute.InstanceGroupManagerInstanceFlexibilityPolicy{}
oldFlexibilityPolicy, newFlexibilityPolicy := d.GetChange("instance_flexibility_policy")
for _, flexibilityPolicy := range newFlexibilityPolicy.([]any) {
flexibilityPolicyData := flexibilityPolicy.(map[string]any)
instanceFlexibilityPolicy.InstanceSelections = expandInstanceSelections(flexibilityPolicyData["instance_selections"].(*schema.Set).List())
}
for _, flexibilityPolicy := range oldFlexibilityPolicy.([]any) {
flexibilityPolicyData := flexibilityPolicy.(map[string]any)
for _, instanceSelection := range flexibilityPolicyData["instance_selections"].(*schema.Set).List() {
instanceSelectionData := instanceSelection.(map[string]any)
name := instanceSelectionData["name"].(string)
if _, exist := instanceFlexibilityPolicy.InstanceSelections[name]; !exist {
instanceFlexibilityPolicy.NullFields = append(instanceFlexibilityPolicy.NullFields, "InstanceSelections."+name)
}
}
instanceFlexibilityPolicy.ForceSendFields = append(instanceFlexibilityPolicy.ForceSendFields, "InstanceSelections")
}
return instanceFlexibilityPolicy
}

func expandInstanceSelections(instanceSelections []any) map[string]compute.InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection {
instanceSelectionsMap := make(map[string]compute.InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection)
for _, instanceSelectionRaw := range instanceSelections {
instanceSelectionData := instanceSelectionRaw.(map[string]any)
instanceSelection := compute.InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection{
Rank: int64(instanceSelectionData["rank"].(int)),
MachineTypes: tpgresource.ConvertStringSet(instanceSelectionData["machine_types"].(*schema.Set)),
}
instanceSelectionsMap[instanceSelectionData["name"].(string)] = instanceSelection
}
return instanceSelectionsMap
}

func expandDistributionPolicyForUpdate(d *schema.ResourceData) *compute.DistributionPolicy {
dpts := d.Get("distribution_policy_target_shape").(string)
Expand Down Expand Up @@ -1159,6 +1244,27 @@ func expandDistributionPolicyForCreate(d *schema.ResourceData) *compute.Distribu
}
return distributionPolicy
}
func flattenInstanceFlexibilityPolicy(instanceFlexibilityPolicy *compute.InstanceGroupManagerInstanceFlexibilityPolicy) []map[string]any {
flattenedInstanceFlexibilityPolicy := []map[string]any{}
if instanceFlexibilityPolicy != nil {
instanceSelectionsMap := map[string]any{}
instanceSelectionsMap["instance_selections"] = flattenInstanceSelections(instanceFlexibilityPolicy.InstanceSelections)
flattenedInstanceFlexibilityPolicy = append(flattenedInstanceFlexibilityPolicy, instanceSelectionsMap)
}
return flattenedInstanceFlexibilityPolicy
}

func flattenInstanceSelections(instanceSelections map[string]compute.InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection) []map[string]any {
instanceSelectionsMap := make([]map[string]any, 0, len(instanceSelections))
for instanceSelectionName, instanceSelection := range instanceSelections {
instanceSelectionData := make(map[string]any)
instanceSelectionData["name"] = instanceSelectionName
instanceSelectionData["rank"] = instanceSelection.Rank
instanceSelectionData["machine_types"] = instanceSelection.MachineTypes
instanceSelectionsMap = append(instanceSelectionsMap, instanceSelectionData)
}
return instanceSelectionsMap
}

func flattenDistributionPolicy(distributionPolicy *compute.DistributionPolicy) []string {
zones := make([]string, 0)
Expand Down
Loading