@@ -10,9 +10,10 @@ import (
10
10
11
11
"cloud.google.com/go/bigtable"
12
12
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
13
+ "github.com/hashicorp/terraform-provider-google/google/tpgresource"
13
14
)
14
15
15
- func TestGetUnavailableClusterZones (t * testing.T ) {
16
+ func TestUnitBigtable_getUnavailableClusterZones (t * testing.T ) {
16
17
cases := map [string ]struct {
17
18
clusterZones []string
18
19
unavailableZones []string
@@ -56,7 +57,7 @@ func TestGetUnavailableClusterZones(t *testing.T) {
56
57
}
57
58
}
58
59
59
- func TestGetInstanceFromResponse (t * testing.T ) {
60
+ func TestUnitBigtable_getInstanceFromResponse (t * testing.T ) {
60
61
instanceName := "test-instance"
61
62
originalId := "original_value"
62
63
cases := map [string ]struct {
@@ -132,3 +133,241 @@ func TestGetInstanceFromResponse(t *testing.T) {
132
133
}
133
134
}
134
135
}
136
+
137
+ func TestUnitBigtable_flattenBigtableCluster (t * testing.T ) {
138
+ cases := map [string ]struct {
139
+ clusterInfo * bigtable.ClusterInfo
140
+ want map [string ]interface {}
141
+ }{
142
+ "SSD auto scaling" : {
143
+ clusterInfo : & bigtable.ClusterInfo {
144
+ StorageType : bigtable .SSD ,
145
+ Zone : "zone1" ,
146
+ ServeNodes : 5 ,
147
+ Name : "ssd-cluster" ,
148
+ KMSKeyName : "KMS" ,
149
+ State : "CREATING" ,
150
+ AutoscalingConfig : & bigtable.AutoscalingConfig {
151
+ MinNodes : 3 ,
152
+ MaxNodes : 7 ,
153
+ CPUTargetPercent : 50 ,
154
+ StorageUtilizationPerNode : 60 ,
155
+ },
156
+ },
157
+ want : map [string ]interface {}{
158
+ "zone" : "zone1" ,
159
+ "num_nodes" : 5 ,
160
+ "cluster_id" : "ssd-cluster" ,
161
+ "storage_type" : "SSD" ,
162
+ "kms_key_name" : "KMS" ,
163
+ "state" : "CREATING" ,
164
+ "autoscaling_config" : []map [string ]interface {}{
165
+ map [string ]interface {}{
166
+ "min_nodes" : 3 ,
167
+ "max_nodes" : 7 ,
168
+ "cpu_target" : 50 ,
169
+ "storage_target" : 60 ,
170
+ },
171
+ },
172
+ },
173
+ },
174
+ "HDD manual scaling" : {
175
+ clusterInfo : & bigtable.ClusterInfo {
176
+ StorageType : bigtable .HDD ,
177
+ Zone : "zone2" ,
178
+ ServeNodes : 7 ,
179
+ Name : "hdd-cluster" ,
180
+ KMSKeyName : "KMS" ,
181
+ State : "READY" ,
182
+ },
183
+ want : map [string ]interface {}{
184
+ "zone" : "zone2" ,
185
+ "num_nodes" : 7 ,
186
+ "cluster_id" : "hdd-cluster" ,
187
+ "storage_type" : "HDD" ,
188
+ "kms_key_name" : "KMS" ,
189
+ "state" : "READY" ,
190
+ },
191
+ },
192
+ }
193
+
194
+ for tn , tc := range cases {
195
+ if got := flattenBigtableCluster (tc .clusterInfo ); ! reflect .DeepEqual (got , tc .want ) {
196
+ t .Errorf ("bad: %s, got %q, want %q" , tn , got , tc .want )
197
+ }
198
+ }
199
+ }
200
+
201
+ func TestUnitBigtable_resourceBigtableInstanceClusterReorderTypeListFunc_error (t * testing.T ) {
202
+ d := & tpgresource.ResourceDiffMock {
203
+ After : map [string ]interface {}{
204
+ "cluster.#" : 0 ,
205
+ },
206
+ }
207
+ if err := resourceBigtableInstanceClusterReorderTypeListFunc (d , nil ); err == nil {
208
+ t .Errorf ("expected error, got success" )
209
+ }
210
+ }
211
+
212
+ func TestUnitBigtable_resourceBigtableInstanceClusterReorderTypeListFunc (t * testing.T ) {
213
+ cases := map [string ]struct {
214
+ before map [string ]interface {}
215
+ after map [string ]interface {}
216
+ wantClusterOrder []string
217
+ wantForceNew bool
218
+ }{
219
+ "create" : {
220
+ before : map [string ]interface {}{
221
+ "cluster.#" : 1 ,
222
+ "cluster.0.cluster_id" : "some-id-a" ,
223
+ },
224
+ after : map [string ]interface {}{
225
+ "name" : "some-name" ,
226
+ "cluster.#" : 1 ,
227
+ "cluster.0.cluster_id" : "some-id-a" ,
228
+ "cluster.0" : map [string ]interface {}{
229
+ "cluster_id" : "some-id-a" ,
230
+ },
231
+ },
232
+ wantClusterOrder : []string {},
233
+ wantForceNew : false ,
234
+ },
235
+ "no force new change" : {
236
+ before : map [string ]interface {}{
237
+ "name" : "some-name" ,
238
+ "cluster.#" : 4 ,
239
+ "cluster.0.cluster_id" : "some-id-a" ,
240
+ "cluster.1.cluster_id" : "some-id-b" ,
241
+ "cluster.2.cluster_id" : "some-id-c" ,
242
+ "cluster.3.cluster_id" : "some-id-e" ,
243
+ },
244
+ after : map [string ]interface {}{
245
+ "name" : "some-name" ,
246
+ "cluster.#" : 3 ,
247
+ "cluster.0.cluster_id" : "some-id-c" ,
248
+ "cluster.1.cluster_id" : "some-id-a" ,
249
+ "cluster.2.cluster_id" : "some-id-d" ,
250
+ "cluster.0" : map [string ]interface {}{
251
+ "cluster_id" : "some-id-c" ,
252
+ },
253
+ "cluster.1" : map [string ]interface {}{
254
+ "cluster_id" : "some-id-a" ,
255
+ },
256
+ "cluster.2" : map [string ]interface {}{
257
+ "cluster_id" : "some-id-d" ,
258
+ },
259
+ },
260
+ wantClusterOrder : []string {"some-id-a" , "some-id-d" , "some-id-c" },
261
+ wantForceNew : false ,
262
+ },
263
+ "force new - zone change" : {
264
+ before : map [string ]interface {}{
265
+ "name" : "some-name" ,
266
+ "cluster.#" : 1 ,
267
+ "cluster.0.cluster_id" : "some-id-a" ,
268
+ "cluster.0.zone" : "zone-a" ,
269
+ },
270
+ after : map [string ]interface {}{
271
+ "name" : "some-name" ,
272
+ "cluster.#" : 1 ,
273
+ "cluster.0.cluster_id" : "some-id-a" ,
274
+ "cluster.0.zone" : "zone-b" ,
275
+ "cluster.0" : map [string ]interface {}{
276
+ "cluster_id" : "some-id-a" ,
277
+ "zone" : "zone-b" ,
278
+ },
279
+ },
280
+ wantClusterOrder : []string {"some-id-a" },
281
+ wantForceNew : true ,
282
+ },
283
+ "force new - kms_key_name change" : {
284
+ before : map [string ]interface {}{
285
+ "name" : "some-name" ,
286
+ "cluster.#" : 1 ,
287
+ "cluster.0.cluster_id" : "some-id-a" ,
288
+ "cluster.0.kms_key_name" : "key-a" ,
289
+ },
290
+ after : map [string ]interface {}{
291
+ "name" : "some-name" ,
292
+ "cluster.#" : 1 ,
293
+ "cluster.0.cluster_id" : "some-id-a" ,
294
+ "cluster.0.kms_key_name" : "key-b" ,
295
+ "cluster.0" : map [string ]interface {}{
296
+ "cluster_id" : "some-id-a" ,
297
+ "kms_key_name" : "key-b" ,
298
+ },
299
+ },
300
+ wantClusterOrder : []string {"some-id-a" },
301
+ wantForceNew : true ,
302
+ },
303
+ "force new - storage_type change" : {
304
+ before : map [string ]interface {}{
305
+ "name" : "some-name" ,
306
+ "cluster.#" : 1 ,
307
+ "cluster.0.cluster_id" : "some-id-a" ,
308
+ "cluster.0.storage_type" : "HDD" ,
309
+ "cluster.0.state" : "READY" ,
310
+ },
311
+ after : map [string ]interface {}{
312
+ "name" : "some-name" ,
313
+ "cluster.#" : 1 ,
314
+ "cluster.0.cluster_id" : "some-id-a" ,
315
+ "cluster.0.storage_type" : "SSD" ,
316
+ "cluster.0" : map [string ]interface {}{
317
+ "cluster_id" : "some-id-a" ,
318
+ "storage_type" : "SSD" ,
319
+ },
320
+ },
321
+ wantClusterOrder : []string {"some-id-a" },
322
+ wantForceNew : true ,
323
+ },
324
+ "skip force new - storage_type change for CREATING cluster" : {
325
+ before : map [string ]interface {}{
326
+ "name" : "some-name" ,
327
+ "cluster.#" : 1 ,
328
+ "cluster.0.cluster_id" : "some-id-a" ,
329
+ "cluster.0.storage_type" : "SSD" ,
330
+ "cluster.0.state" : "CREATING" ,
331
+ },
332
+ after : map [string ]interface {}{
333
+ "name" : "some-name" ,
334
+ "cluster.#" : 1 ,
335
+ "cluster.0.cluster_id" : "some-id-a" ,
336
+ "cluster.0.storage_type" : "HDD" ,
337
+ "cluster.0" : map [string ]interface {}{
338
+ "cluster_id" : "some-id-a" ,
339
+ "storage_type" : "HDD" ,
340
+ },
341
+ },
342
+ wantClusterOrder : []string {"some-id-a" },
343
+ wantForceNew : false ,
344
+ },
345
+ }
346
+ for tn , tc := range cases {
347
+ t .Run (tn , func (t * testing.T ) {
348
+ d := & tpgresource.ResourceDiffMock {
349
+ Before : tc .before ,
350
+ After : tc .after ,
351
+ }
352
+ var clusters []interface {}
353
+ err := resourceBigtableInstanceClusterReorderTypeListFunc (d , func (gotClusters []interface {}) error {
354
+ clusters = gotClusters
355
+ return nil
356
+ })
357
+ if err != nil {
358
+ t .Fatalf ("bad: %s, error: %v" , tn , err )
359
+ }
360
+ if d .IsForceNew != tc .wantForceNew {
361
+ t .Errorf ("bad: %s, got %v, want %v" , tn , d .IsForceNew , tc .wantForceNew )
362
+ }
363
+ gotClusterOrder := []string {}
364
+ for _ , cluster := range clusters {
365
+ clusterResource := cluster .(map [string ]interface {})
366
+ gotClusterOrder = append (gotClusterOrder , clusterResource ["cluster_id" ].(string ))
367
+ }
368
+ if ! reflect .DeepEqual (gotClusterOrder , tc .wantClusterOrder ) {
369
+ t .Errorf ("bad: %s, got %q, want %q" , tn , gotClusterOrder , tc .wantClusterOrder )
370
+ }
371
+ })
372
+ }
373
+ }
0 commit comments