5
5
"fmt"
6
6
"log"
7
7
8
+ "github.com/hashicorp/terraform/helper/customdiff"
8
9
"github.com/hashicorp/terraform/helper/schema"
9
10
"github.com/hashicorp/terraform/helper/validation"
10
11
@@ -15,7 +16,13 @@ func resourceBigtableInstance() *schema.Resource {
15
16
return & schema.Resource {
16
17
Create : resourceBigtableInstanceCreate ,
17
18
Read : resourceBigtableInstanceRead ,
19
+ // TODO: Update is only needed because we're doing forcenew in customizediff
20
+ // when we're done with the deprecation, we can drop customizediff and make cluster forcenew
21
+ Update : schema .Noop ,
18
22
Delete : resourceBigtableInstanceDestroy ,
23
+ CustomizeDiff : customdiff .All (
24
+ resourceBigTableInstanceClusterCustomizeDiff ,
25
+ ),
19
26
20
27
Schema : map [string ]* schema.Schema {
21
28
"name" : {
@@ -25,16 +32,49 @@ func resourceBigtableInstance() *schema.Resource {
25
32
},
26
33
27
34
"cluster_id" : {
28
- Type : schema .TypeString ,
29
- Required : true ,
30
- ForceNew : true ,
35
+ Type : schema .TypeString ,
36
+ Optional : true ,
37
+ Deprecated : "Use cluster instead." ,
38
+ ConflictsWith : []string {"cluster" },
39
+ },
40
+
41
+ "cluster" : {
42
+ Type : schema .TypeSet ,
43
+ Optional : true ,
44
+ MaxItems : 1 ,
45
+ ConflictsWith : []string {"cluster_id" , "zone" , "num_nodes" , "storage_type" },
46
+ Elem : & schema.Resource {
47
+ Schema : map [string ]* schema.Schema {
48
+ "cluster_id" : {
49
+ Type : schema .TypeString ,
50
+ Optional : true ,
51
+ },
52
+ "zone" : {
53
+ Type : schema .TypeString ,
54
+ Optional : true ,
55
+ Computed : true ,
56
+ },
57
+ "num_nodes" : {
58
+ Type : schema .TypeInt ,
59
+ Optional : true ,
60
+ },
61
+ "storage_type" : {
62
+ Type : schema .TypeString ,
63
+ Optional : true ,
64
+ Default : "SSD" ,
65
+ ValidateFunc : validation .StringInSlice ([]string {"SSD" , "HDD" }, false ),
66
+ },
67
+ },
68
+ },
31
69
},
32
70
33
71
"zone" : {
34
- Type : schema .TypeString ,
35
- Optional : true ,
36
- Computed : true ,
37
- ForceNew : true ,
72
+ Type : schema .TypeString ,
73
+ Optional : true ,
74
+ Computed : true ,
75
+ ForceNew : true ,
76
+ Deprecated : "Use cluster instead." ,
77
+ ConflictsWith : []string {"cluster" },
38
78
},
39
79
40
80
"display_name" : {
@@ -45,9 +85,10 @@ func resourceBigtableInstance() *schema.Resource {
45
85
},
46
86
47
87
"num_nodes" : {
48
- Type : schema .TypeInt ,
49
- Optional : true ,
50
- ForceNew : true ,
88
+ Type : schema .TypeInt ,
89
+ Optional : true ,
90
+ Deprecated : "Use cluster instead." ,
91
+ ConflictsWith : []string {"cluster" },
51
92
},
52
93
53
94
"instance_type" : {
@@ -59,11 +100,12 @@ func resourceBigtableInstance() *schema.Resource {
59
100
},
60
101
61
102
"storage_type" : {
62
- Type : schema .TypeString ,
63
- Optional : true ,
64
- ForceNew : true ,
65
- Default : "SSD" ,
66
- ValidateFunc : validation .StringInSlice ([]string {"SSD" , "HDD" }, false ),
103
+ Type : schema .TypeString ,
104
+ Optional : true ,
105
+ Default : "SSD" ,
106
+ ValidateFunc : validation .StringInSlice ([]string {"SSD" , "HDD" }, false ),
107
+ Deprecated : "Use cluster instead." ,
108
+ ConflictsWith : []string {"cluster" },
67
109
},
68
110
69
111
"project" : {
@@ -76,6 +118,50 @@ func resourceBigtableInstance() *schema.Resource {
76
118
}
77
119
}
78
120
121
+ func resourceBigTableInstanceClusterCustomizeDiff (d * schema.ResourceDiff , meta interface {}) error {
122
+ if d .Get ("cluster_id" ).(string ) == "" && d .Get ("cluster.#" ).(int ) == 0 {
123
+ return fmt .Errorf ("At least one cluster must be set." )
124
+ }
125
+ if ! d .HasChange ("cluster_id" ) && ! d .HasChange ("zone" ) && ! d .HasChange ("num_nodes" ) &&
126
+ ! d .HasChange ("storage_type" ) && ! d .HasChange ("cluster" ) {
127
+ return nil
128
+ }
129
+ if d .Get ("cluster.#" ).(int ) == 1 {
130
+ // if we have exactly one cluster, and it has the same values as the old top-level
131
+ // values, we can assume the user is trying to go from the deprecated values to the
132
+ // new values, and we shouldn't ForceNew. We know that the top-level values aren't
133
+ // set, because they ConflictWith cluster.
134
+ oldID , _ := d .GetChange ("cluster_id" )
135
+ oldNodes , _ := d .GetChange ("num_nodes" )
136
+ oldZone , _ := d .GetChange ("zone" )
137
+ oldStorageType , _ := d .GetChange ("storage_type" )
138
+ new := d .Get ("cluster" ).(* schema.Set ).List ()[0 ].(map [string ]interface {})
139
+
140
+ if oldID .(string ) == new ["cluster_id" ].(string ) &&
141
+ oldNodes .(int ) == new ["num_nodes" ].(int ) &&
142
+ oldZone .(string ) == new ["zone" ].(string ) &&
143
+ oldStorageType .(string ) == new ["storage_type" ].(string ) {
144
+ return nil
145
+ }
146
+ }
147
+ if d .HasChange ("cluster_id" ) {
148
+ d .ForceNew ("cluster_id" )
149
+ }
150
+ if d .HasChange ("cluster" ) {
151
+ d .ForceNew ("cluster" )
152
+ }
153
+ if d .HasChange ("zone" ) {
154
+ d .ForceNew ("zone" )
155
+ }
156
+ if d .HasChange ("num_nodes" ) {
157
+ d .ForceNew ("num_nodes" )
158
+ }
159
+ if d .HasChange ("storage_type" ) {
160
+ d .ForceNew ("storage_type" )
161
+ }
162
+ return nil
163
+ }
164
+
79
165
func resourceBigtableInstanceCreate (d * schema.ResourceData , meta interface {}) error {
80
166
config := meta .(* Config )
81
167
ctx := context .Background ()
@@ -85,46 +171,48 @@ func resourceBigtableInstanceCreate(d *schema.ResourceData, meta interface{}) er
85
171
return err
86
172
}
87
173
88
- name := d .Get ("name" ).(string )
89
- displayName , ok := d .GetOk ("display_name" )
90
- if ! ok {
91
- displayName = name
174
+ conf := & bigtable.InstanceWithClustersConfig {
175
+ InstanceID : d .Get ("name" ).(string ),
92
176
}
93
177
94
- var storageType bigtable.StorageType
95
- switch value := d .Get ("storage_type" ); value {
96
- case "HDD" :
97
- storageType = bigtable .HDD
98
- case "SSD" :
99
- storageType = bigtable .SSD
178
+ displayName , ok := d .GetOk ("display_name" )
179
+ if ! ok {
180
+ displayName = conf .InstanceID
100
181
}
182
+ conf .DisplayName = displayName .(string )
101
183
102
- numNodes := int32 (d .Get ("num_nodes" ).(int ))
103
- var instanceType bigtable.InstanceType
104
- switch value := d .Get ("instance_type" ); value {
184
+ switch d .Get ("instance_type" ).(string ) {
105
185
case "DEVELOPMENT" :
106
- instanceType = bigtable .DEVELOPMENT
107
-
108
- if numNodes > 0 {
109
- return fmt .Errorf ("Can't specify a non-zero number of nodes: %d for DEVELOPMENT Bigtable instance: %s" , numNodes , name )
110
- }
186
+ conf .InstanceType = bigtable .DEVELOPMENT
111
187
case "PRODUCTION" :
112
- instanceType = bigtable .PRODUCTION
113
- }
114
-
115
- zone , err := getZone (d , config )
116
- if err != nil {
117
- return err
188
+ conf .InstanceType = bigtable .PRODUCTION
118
189
}
119
190
120
- instanceConf := & bigtable.InstanceConf {
121
- InstanceId : name ,
122
- DisplayName : displayName .(string ),
123
- ClusterId : d .Get ("cluster_id" ).(string ),
124
- NumNodes : numNodes ,
125
- InstanceType : instanceType ,
126
- StorageType : storageType ,
127
- Zone : zone ,
191
+ if d .Get ("cluster.#" ).(int ) > 0 {
192
+ // expand cluster
193
+ conf .Clusters = expandBigtableClusters (d .Get ("cluster" ).(* schema.Set ).List (), conf .InstanceID , config .Zone )
194
+ if err != nil {
195
+ return fmt .Errorf ("error expanding clusters: %s" , err .Error ())
196
+ }
197
+ } else {
198
+ // TODO: remove this when we're done with the deprecation period
199
+ zone , err := getZone (d , config )
200
+ if err != nil {
201
+ return err
202
+ }
203
+ cluster := bigtable.ClusterConfig {
204
+ InstanceID : conf .InstanceID ,
205
+ NumNodes : int32 (d .Get ("num_nodes" ).(int )),
206
+ Zone : zone ,
207
+ ClusterID : d .Get ("cluster_id" ).(string ),
208
+ }
209
+ switch d .Get ("storage_type" ).(string ) {
210
+ case "HDD" :
211
+ cluster .StorageType = bigtable .HDD
212
+ case "SSD" :
213
+ cluster .StorageType = bigtable .SSD
214
+ }
215
+ conf .Clusters = append (conf .Clusters , cluster )
128
216
}
129
217
130
218
c , err := config .bigtableClientFactory .NewInstanceAdminClient (project )
@@ -134,12 +222,12 @@ func resourceBigtableInstanceCreate(d *schema.ResourceData, meta interface{}) er
134
222
135
223
defer c .Close ()
136
224
137
- err = c .CreateInstance (ctx , instanceConf )
225
+ err = c .CreateInstanceWithClusters (ctx , conf )
138
226
if err != nil {
139
227
return fmt .Errorf ("Error creating instance. %s" , err )
140
228
}
141
229
142
- d .SetId (name )
230
+ d .SetId (conf . InstanceID )
143
231
144
232
return resourceBigtableInstanceRead (d , meta )
145
233
}
@@ -153,11 +241,6 @@ func resourceBigtableInstanceRead(d *schema.ResourceData, meta interface{}) erro
153
241
return err
154
242
}
155
243
156
- zone , err := getZone (d , config )
157
- if err != nil {
158
- return err
159
- }
160
-
161
244
c , err := config .bigtableClientFactory .NewInstanceAdminClient (project )
162
245
if err != nil {
163
246
return fmt .Errorf ("Error starting instance admin client. %s" , err )
@@ -173,7 +256,37 @@ func resourceBigtableInstanceRead(d *schema.ResourceData, meta interface{}) erro
173
256
}
174
257
175
258
d .Set ("project" , project )
176
- d .Set ("zone" , zone )
259
+ if d .Get ("cluster.#" ).(int ) > 0 {
260
+ clusters := d .Get ("cluster" ).(* schema.Set ).List ()
261
+ clusterState := []map [string ]interface {}{}
262
+ for _ , cl := range clusters {
263
+ cluster := cl .(map [string ]interface {})
264
+ clus , err := c .GetCluster (ctx , instance .Name , cluster ["cluster_id" ].(string ))
265
+ if err != nil {
266
+ if isGoogleApiErrorWithCode (err , 404 ) {
267
+ log .Printf ("[WARN] Cluster %q not found, not setting it in state" , cluster ["cluster_id" ].(string ))
268
+ continue
269
+ }
270
+ return fmt .Errorf ("Error retrieving cluster %q: %s" , cluster ["cluster_id" ].(string ), err .Error ())
271
+ }
272
+ clusterState = append (clusterState , flattenBigtableCluster (clus , cluster ["storage_type" ].(string )))
273
+ }
274
+ err = d .Set ("cluster" , clusterState )
275
+ if err != nil {
276
+ return fmt .Errorf ("Error setting clusters in state: %s" , err .Error ())
277
+ }
278
+ d .Set ("cluster_id" , "" )
279
+ d .Set ("zone" , "" )
280
+ d .Set ("num_nodes" , 0 )
281
+ d .Set ("storage_type" , "SSD" )
282
+ } else {
283
+ // TODO remove this when we're done with our deprecation period
284
+ zone , err := getZone (d , config )
285
+ if err != nil {
286
+ return err
287
+ }
288
+ d .Set ("zone" , zone )
289
+ }
177
290
d .Set ("name" , instance .Name )
178
291
d .Set ("display_name" , instance .DisplayName )
179
292
@@ -206,3 +319,38 @@ func resourceBigtableInstanceDestroy(d *schema.ResourceData, meta interface{}) e
206
319
207
320
return nil
208
321
}
322
+
323
+ func flattenBigtableCluster (c * bigtable.ClusterInfo , storageType string ) map [string ]interface {} {
324
+ return map [string ]interface {}{
325
+ "zone" : c .Zone ,
326
+ "num_nodes" : c .ServeNodes ,
327
+ "cluster_id" : c .Name ,
328
+ "storage_type" : storageType ,
329
+ }
330
+ }
331
+
332
+ func expandBigtableClusters (clusters []interface {}, instanceID string , defaultZone string ) []bigtable.ClusterConfig {
333
+ results := make ([]bigtable.ClusterConfig , 0 , len (clusters ))
334
+ for _ , c := range clusters {
335
+ cluster := c .(map [string ]interface {})
336
+ zone := defaultZone
337
+ if confZone , ok := cluster ["zone" ]; ok {
338
+ zone = confZone .(string )
339
+ }
340
+ var storageType bigtable.StorageType
341
+ switch cluster ["storage_type" ].(string ) {
342
+ case "SSD" :
343
+ storageType = bigtable .SSD
344
+ case "HDD" :
345
+ storageType = bigtable .HDD
346
+ }
347
+ results = append (results , bigtable.ClusterConfig {
348
+ InstanceID : instanceID ,
349
+ Zone : zone ,
350
+ ClusterID : cluster ["cluster_id" ].(string ),
351
+ NumNodes : int32 (cluster ["num_nodes" ].(int )),
352
+ StorageType : storageType ,
353
+ })
354
+ }
355
+ return results
356
+ }
0 commit comments