Skip to content

Commit cc7fe55

Browse files
Avoid recreate instance when cluster not ready (#9023) (#15961)
Signed-off-by: Modular Magician <[email protected]>
1 parent 8eea792 commit cc7fe55

File tree

4 files changed

+269
-8
lines changed

4 files changed

+269
-8
lines changed

.changelog/9023.txt

+6
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
```release-note:bug
2+
bigtable: avoided re-creation of instance when cluster is still updating and storage type changed
3+
```
4+
```release-note:enhancement
5+
bigtable: added `state` output attribute to `google_bigtable_instance` clusters
6+
```

google/services/bigtable/resource_bigtable_instance.go

+21-6
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,11 @@ func ResourceBigtableInstance() *schema.Resource {
129129
},
130130
},
131131
},
132+
"state": {
133+
Type: schema.TypeString,
134+
Computed: true,
135+
Description: `The state of the cluster`,
136+
},
132137
},
133138
},
134139
},
@@ -420,6 +425,7 @@ func flattenBigtableCluster(c *bigtable.ClusterInfo) map[string]interface{} {
420425
"cluster_id": c.Name,
421426
"storage_type": storageType,
422427
"kms_key_name": c.KMSKeyName,
428+
"state": c.State,
423429
}
424430
if c.AutoscalingConfig != nil {
425431
cluster["autoscaling_config"] = make([]map[string]interface{}, 1)
@@ -565,7 +571,14 @@ func resourceBigtableInstanceUniqueClusterID(_ context.Context, diff *schema.Res
565571
// This doesn't use the standard unordered list utility (https://github.com/GoogleCloudPlatform/magic-modules/blob/main/templates/terraform/unordered_list_customize_diff.erb)
566572
// because some fields can't be modified using the API and we recreate the instance
567573
// when they're changed.
568-
func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error {
574+
func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *schema.ResourceDiff, _ interface{}) error {
575+
// separate func to allow unit testing
576+
return resourceBigtableInstanceClusterReorderTypeListFunc(diff, func(orderedClusters []interface{}) error {
577+
return diff.SetNew("cluster", orderedClusters)
578+
})
579+
580+
}
581+
func resourceBigtableInstanceClusterReorderTypeListFunc(diff tpgresource.TerraformResourceDiff, setNew func([]interface{}) error) error {
569582
oldCount, newCount := diff.GetChange("cluster.#")
570583

571584
// Simulate Required:true, MinItems:1 for "cluster". This doesn't work
@@ -594,7 +607,9 @@ func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *sch
594607
for i := 0; i < newCount.(int); i++ {
595608
_, newId := diff.GetChange(fmt.Sprintf("cluster.%d.cluster_id", i))
596609
_, c := diff.GetChange(fmt.Sprintf("cluster.%d", i))
597-
clusters[newId.(string)] = c
610+
typedCluster := c.(map[string]interface{})
611+
typedCluster["state"] = "READY"
612+
clusters[newId.(string)] = typedCluster
598613
}
599614

600615
// create a list of clusters using the old order when possible to minimise
@@ -630,9 +645,8 @@ func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *sch
630645
}
631646
}
632647

633-
err := diff.SetNew("cluster", orderedClusters)
634-
if err != nil {
635-
return fmt.Errorf("Error setting cluster diff: %s", err)
648+
if err := setNew(orderedClusters); err != nil {
649+
return err
636650
}
637651

638652
// Clusters can't have their zone, storage_type or kms_key_name updated,
@@ -658,8 +672,9 @@ func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *sch
658672
}
659673
}
660674

675+
currentState, _ := diff.GetChange(fmt.Sprintf("cluster.%d.state", i))
661676
oST, nST := diff.GetChange(fmt.Sprintf("cluster.%d.storage_type", i))
662-
if oST != nST {
677+
if oST != nST && currentState.(string) != "CREATING" {
663678
err := diff.ForceNew(fmt.Sprintf("cluster.%d.storage_type", i))
664679
if err != nil {
665680
return fmt.Errorf("Error setting cluster diff: %s", err)

google/services/bigtable/resource_bigtable_instance_internal_test.go

+241-2
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,10 @@ import (
1010

1111
"cloud.google.com/go/bigtable"
1212
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
13+
"github.com/hashicorp/terraform-provider-google/google/tpgresource"
1314
)
1415

15-
func TestGetUnavailableClusterZones(t *testing.T) {
16+
func TestUnitBigtable_getUnavailableClusterZones(t *testing.T) {
1617
cases := map[string]struct {
1718
clusterZones []string
1819
unavailableZones []string
@@ -56,7 +57,7 @@ func TestGetUnavailableClusterZones(t *testing.T) {
5657
}
5758
}
5859

59-
func TestGetInstanceFromResponse(t *testing.T) {
60+
func TestUnitBigtable_getInstanceFromResponse(t *testing.T) {
6061
instanceName := "test-instance"
6162
originalId := "original_value"
6263
cases := map[string]struct {
@@ -132,3 +133,241 @@ func TestGetInstanceFromResponse(t *testing.T) {
132133
}
133134
}
134135
}
136+
137+
func TestUnitBigtable_flattenBigtableCluster(t *testing.T) {
138+
cases := map[string]struct {
139+
clusterInfo *bigtable.ClusterInfo
140+
want map[string]interface{}
141+
}{
142+
"SSD auto scaling": {
143+
clusterInfo: &bigtable.ClusterInfo{
144+
StorageType: bigtable.SSD,
145+
Zone: "zone1",
146+
ServeNodes: 5,
147+
Name: "ssd-cluster",
148+
KMSKeyName: "KMS",
149+
State: "CREATING",
150+
AutoscalingConfig: &bigtable.AutoscalingConfig{
151+
MinNodes: 3,
152+
MaxNodes: 7,
153+
CPUTargetPercent: 50,
154+
StorageUtilizationPerNode: 60,
155+
},
156+
},
157+
want: map[string]interface{}{
158+
"zone": "zone1",
159+
"num_nodes": 5,
160+
"cluster_id": "ssd-cluster",
161+
"storage_type": "SSD",
162+
"kms_key_name": "KMS",
163+
"state": "CREATING",
164+
"autoscaling_config": []map[string]interface{}{
165+
map[string]interface{}{
166+
"min_nodes": 3,
167+
"max_nodes": 7,
168+
"cpu_target": 50,
169+
"storage_target": 60,
170+
},
171+
},
172+
},
173+
},
174+
"HDD manual scaling": {
175+
clusterInfo: &bigtable.ClusterInfo{
176+
StorageType: bigtable.HDD,
177+
Zone: "zone2",
178+
ServeNodes: 7,
179+
Name: "hdd-cluster",
180+
KMSKeyName: "KMS",
181+
State: "READY",
182+
},
183+
want: map[string]interface{}{
184+
"zone": "zone2",
185+
"num_nodes": 7,
186+
"cluster_id": "hdd-cluster",
187+
"storage_type": "HDD",
188+
"kms_key_name": "KMS",
189+
"state": "READY",
190+
},
191+
},
192+
}
193+
194+
for tn, tc := range cases {
195+
if got := flattenBigtableCluster(tc.clusterInfo); !reflect.DeepEqual(got, tc.want) {
196+
t.Errorf("bad: %s, got %q, want %q", tn, got, tc.want)
197+
}
198+
}
199+
}
200+
201+
func TestUnitBigtable_resourceBigtableInstanceClusterReorderTypeListFunc_error(t *testing.T) {
202+
d := &tpgresource.ResourceDiffMock{
203+
After: map[string]interface{}{
204+
"cluster.#": 0,
205+
},
206+
}
207+
if err := resourceBigtableInstanceClusterReorderTypeListFunc(d, nil); err == nil {
208+
t.Errorf("expected error, got success")
209+
}
210+
}
211+
212+
func TestUnitBigtable_resourceBigtableInstanceClusterReorderTypeListFunc(t *testing.T) {
213+
cases := map[string]struct {
214+
before map[string]interface{}
215+
after map[string]interface{}
216+
wantClusterOrder []string
217+
wantForceNew bool
218+
}{
219+
"create": {
220+
before: map[string]interface{}{
221+
"cluster.#": 1,
222+
"cluster.0.cluster_id": "some-id-a",
223+
},
224+
after: map[string]interface{}{
225+
"name": "some-name",
226+
"cluster.#": 1,
227+
"cluster.0.cluster_id": "some-id-a",
228+
"cluster.0": map[string]interface{}{
229+
"cluster_id": "some-id-a",
230+
},
231+
},
232+
wantClusterOrder: []string{},
233+
wantForceNew: false,
234+
},
235+
"no force new change": {
236+
before: map[string]interface{}{
237+
"name": "some-name",
238+
"cluster.#": 4,
239+
"cluster.0.cluster_id": "some-id-a",
240+
"cluster.1.cluster_id": "some-id-b",
241+
"cluster.2.cluster_id": "some-id-c",
242+
"cluster.3.cluster_id": "some-id-e",
243+
},
244+
after: map[string]interface{}{
245+
"name": "some-name",
246+
"cluster.#": 3,
247+
"cluster.0.cluster_id": "some-id-c",
248+
"cluster.1.cluster_id": "some-id-a",
249+
"cluster.2.cluster_id": "some-id-d",
250+
"cluster.0": map[string]interface{}{
251+
"cluster_id": "some-id-c",
252+
},
253+
"cluster.1": map[string]interface{}{
254+
"cluster_id": "some-id-a",
255+
},
256+
"cluster.2": map[string]interface{}{
257+
"cluster_id": "some-id-d",
258+
},
259+
},
260+
wantClusterOrder: []string{"some-id-a", "some-id-d", "some-id-c"},
261+
wantForceNew: false,
262+
},
263+
"force new - zone change": {
264+
before: map[string]interface{}{
265+
"name": "some-name",
266+
"cluster.#": 1,
267+
"cluster.0.cluster_id": "some-id-a",
268+
"cluster.0.zone": "zone-a",
269+
},
270+
after: map[string]interface{}{
271+
"name": "some-name",
272+
"cluster.#": 1,
273+
"cluster.0.cluster_id": "some-id-a",
274+
"cluster.0.zone": "zone-b",
275+
"cluster.0": map[string]interface{}{
276+
"cluster_id": "some-id-a",
277+
"zone": "zone-b",
278+
},
279+
},
280+
wantClusterOrder: []string{"some-id-a"},
281+
wantForceNew: true,
282+
},
283+
"force new - kms_key_name change": {
284+
before: map[string]interface{}{
285+
"name": "some-name",
286+
"cluster.#": 1,
287+
"cluster.0.cluster_id": "some-id-a",
288+
"cluster.0.kms_key_name": "key-a",
289+
},
290+
after: map[string]interface{}{
291+
"name": "some-name",
292+
"cluster.#": 1,
293+
"cluster.0.cluster_id": "some-id-a",
294+
"cluster.0.kms_key_name": "key-b",
295+
"cluster.0": map[string]interface{}{
296+
"cluster_id": "some-id-a",
297+
"kms_key_name": "key-b",
298+
},
299+
},
300+
wantClusterOrder: []string{"some-id-a"},
301+
wantForceNew: true,
302+
},
303+
"force new - storage_type change": {
304+
before: map[string]interface{}{
305+
"name": "some-name",
306+
"cluster.#": 1,
307+
"cluster.0.cluster_id": "some-id-a",
308+
"cluster.0.storage_type": "HDD",
309+
"cluster.0.state": "READY",
310+
},
311+
after: map[string]interface{}{
312+
"name": "some-name",
313+
"cluster.#": 1,
314+
"cluster.0.cluster_id": "some-id-a",
315+
"cluster.0.storage_type": "SSD",
316+
"cluster.0": map[string]interface{}{
317+
"cluster_id": "some-id-a",
318+
"storage_type": "SSD",
319+
},
320+
},
321+
wantClusterOrder: []string{"some-id-a"},
322+
wantForceNew: true,
323+
},
324+
"skip force new - storage_type change for CREATING cluster": {
325+
before: map[string]interface{}{
326+
"name": "some-name",
327+
"cluster.#": 1,
328+
"cluster.0.cluster_id": "some-id-a",
329+
"cluster.0.storage_type": "SSD",
330+
"cluster.0.state": "CREATING",
331+
},
332+
after: map[string]interface{}{
333+
"name": "some-name",
334+
"cluster.#": 1,
335+
"cluster.0.cluster_id": "some-id-a",
336+
"cluster.0.storage_type": "HDD",
337+
"cluster.0": map[string]interface{}{
338+
"cluster_id": "some-id-a",
339+
"storage_type": "HDD",
340+
},
341+
},
342+
wantClusterOrder: []string{"some-id-a"},
343+
wantForceNew: false,
344+
},
345+
}
346+
for tn, tc := range cases {
347+
t.Run(tn, func(t *testing.T) {
348+
d := &tpgresource.ResourceDiffMock{
349+
Before: tc.before,
350+
After: tc.after,
351+
}
352+
var clusters []interface{}
353+
err := resourceBigtableInstanceClusterReorderTypeListFunc(d, func(gotClusters []interface{}) error {
354+
clusters = gotClusters
355+
return nil
356+
})
357+
if err != nil {
358+
t.Fatalf("bad: %s, error: %v", tn, err)
359+
}
360+
if d.IsForceNew != tc.wantForceNew {
361+
t.Errorf("bad: %s, got %v, want %v", tn, d.IsForceNew, tc.wantForceNew)
362+
}
363+
gotClusterOrder := []string{}
364+
for _, cluster := range clusters {
365+
clusterResource := cluster.(map[string]interface{})
366+
gotClusterOrder = append(gotClusterOrder, clusterResource["cluster_id"].(string))
367+
}
368+
if !reflect.DeepEqual(gotClusterOrder, tc.wantClusterOrder) {
369+
t.Errorf("bad: %s, got %q, want %q", tn, gotClusterOrder, tc.wantClusterOrder)
370+
}
371+
})
372+
}
373+
}

website/docs/r/bigtable_instance.html.markdown

+1
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,7 @@ If no value is set, Cloud Bigtable automatically allocates nodes based on your d
142142
In addition to the arguments listed above, the following computed attributes are exported:
143143

144144
* `id` - an identifier for the resource with format `projects/{{project}}/instances/{{name}}`
145+
* `cluster.0.state` - describes the current state of the cluster.
145146

146147
## Timeouts
147148

0 commit comments

Comments
 (0)