Skip to content

Commit d6cf446

Browse files
authored
Merge pull request #2161 from terraform-providers/paddy_bigtable_clusters
Move Bigtable config to cluster block.
2 parents ba088fa + f743cc0 commit d6cf446

File tree

3 files changed

+243
-75
lines changed

3 files changed

+243
-75
lines changed

google/resource_bigtable_instance.go

+203-55
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ import (
55
"fmt"
66
"log"
77

8+
"github.com/hashicorp/terraform/helper/customdiff"
89
"github.com/hashicorp/terraform/helper/schema"
910
"github.com/hashicorp/terraform/helper/validation"
1011

@@ -15,7 +16,13 @@ func resourceBigtableInstance() *schema.Resource {
1516
return &schema.Resource{
1617
Create: resourceBigtableInstanceCreate,
1718
Read: resourceBigtableInstanceRead,
19+
// TODO: Update is only needed because we're doing forcenew in customizediff
20+
// when we're done with the deprecation, we can drop customizediff and make cluster forcenew
21+
Update: schema.Noop,
1822
Delete: resourceBigtableInstanceDestroy,
23+
CustomizeDiff: customdiff.All(
24+
resourceBigTableInstanceClusterCustomizeDiff,
25+
),
1926

2027
Schema: map[string]*schema.Schema{
2128
"name": {
@@ -25,16 +32,49 @@ func resourceBigtableInstance() *schema.Resource {
2532
},
2633

2734
"cluster_id": {
28-
Type: schema.TypeString,
29-
Required: true,
30-
ForceNew: true,
35+
Type: schema.TypeString,
36+
Optional: true,
37+
Deprecated: "Use cluster instead.",
38+
ConflictsWith: []string{"cluster"},
39+
},
40+
41+
"cluster": {
42+
Type: schema.TypeSet,
43+
Optional: true,
44+
MaxItems: 1,
45+
ConflictsWith: []string{"cluster_id", "zone", "num_nodes", "storage_type"},
46+
Elem: &schema.Resource{
47+
Schema: map[string]*schema.Schema{
48+
"cluster_id": {
49+
Type: schema.TypeString,
50+
Optional: true,
51+
},
52+
"zone": {
53+
Type: schema.TypeString,
54+
Optional: true,
55+
Computed: true,
56+
},
57+
"num_nodes": {
58+
Type: schema.TypeInt,
59+
Optional: true,
60+
},
61+
"storage_type": {
62+
Type: schema.TypeString,
63+
Optional: true,
64+
Default: "SSD",
65+
ValidateFunc: validation.StringInSlice([]string{"SSD", "HDD"}, false),
66+
},
67+
},
68+
},
3169
},
3270

3371
"zone": {
34-
Type: schema.TypeString,
35-
Optional: true,
36-
Computed: true,
37-
ForceNew: true,
72+
Type: schema.TypeString,
73+
Optional: true,
74+
Computed: true,
75+
ForceNew: true,
76+
Deprecated: "Use cluster instead.",
77+
ConflictsWith: []string{"cluster"},
3878
},
3979

4080
"display_name": {
@@ -45,9 +85,10 @@ func resourceBigtableInstance() *schema.Resource {
4585
},
4686

4787
"num_nodes": {
48-
Type: schema.TypeInt,
49-
Optional: true,
50-
ForceNew: true,
88+
Type: schema.TypeInt,
89+
Optional: true,
90+
Deprecated: "Use cluster instead.",
91+
ConflictsWith: []string{"cluster"},
5192
},
5293

5394
"instance_type": {
@@ -59,11 +100,12 @@ func resourceBigtableInstance() *schema.Resource {
59100
},
60101

61102
"storage_type": {
62-
Type: schema.TypeString,
63-
Optional: true,
64-
ForceNew: true,
65-
Default: "SSD",
66-
ValidateFunc: validation.StringInSlice([]string{"SSD", "HDD"}, false),
103+
Type: schema.TypeString,
104+
Optional: true,
105+
Default: "SSD",
106+
ValidateFunc: validation.StringInSlice([]string{"SSD", "HDD"}, false),
107+
Deprecated: "Use cluster instead.",
108+
ConflictsWith: []string{"cluster"},
67109
},
68110

69111
"project": {
@@ -76,6 +118,50 @@ func resourceBigtableInstance() *schema.Resource {
76118
}
77119
}
78120

121+
func resourceBigTableInstanceClusterCustomizeDiff(d *schema.ResourceDiff, meta interface{}) error {
122+
if d.Get("cluster_id").(string) == "" && d.Get("cluster.#").(int) == 0 {
123+
return fmt.Errorf("At least one cluster must be set.")
124+
}
125+
if !d.HasChange("cluster_id") && !d.HasChange("zone") && !d.HasChange("num_nodes") &&
126+
!d.HasChange("storage_type") && !d.HasChange("cluster") {
127+
return nil
128+
}
129+
if d.Get("cluster.#").(int) == 1 {
130+
// if we have exactly one cluster, and it has the same values as the old top-level
131+
// values, we can assume the user is trying to go from the deprecated values to the
132+
// new values, and we shouldn't ForceNew. We know that the top-level values aren't
133+
// set, because they ConflictWith cluster.
134+
oldID, _ := d.GetChange("cluster_id")
135+
oldNodes, _ := d.GetChange("num_nodes")
136+
oldZone, _ := d.GetChange("zone")
137+
oldStorageType, _ := d.GetChange("storage_type")
138+
new := d.Get("cluster").(*schema.Set).List()[0].(map[string]interface{})
139+
140+
if oldID.(string) == new["cluster_id"].(string) &&
141+
oldNodes.(int) == new["num_nodes"].(int) &&
142+
oldZone.(string) == new["zone"].(string) &&
143+
oldStorageType.(string) == new["storage_type"].(string) {
144+
return nil
145+
}
146+
}
147+
if d.HasChange("cluster_id") {
148+
d.ForceNew("cluster_id")
149+
}
150+
if d.HasChange("cluster") {
151+
d.ForceNew("cluster")
152+
}
153+
if d.HasChange("zone") {
154+
d.ForceNew("zone")
155+
}
156+
if d.HasChange("num_nodes") {
157+
d.ForceNew("num_nodes")
158+
}
159+
if d.HasChange("storage_type") {
160+
d.ForceNew("storage_type")
161+
}
162+
return nil
163+
}
164+
79165
func resourceBigtableInstanceCreate(d *schema.ResourceData, meta interface{}) error {
80166
config := meta.(*Config)
81167
ctx := context.Background()
@@ -85,46 +171,48 @@ func resourceBigtableInstanceCreate(d *schema.ResourceData, meta interface{}) er
85171
return err
86172
}
87173

88-
name := d.Get("name").(string)
89-
displayName, ok := d.GetOk("display_name")
90-
if !ok {
91-
displayName = name
174+
conf := &bigtable.InstanceWithClustersConfig{
175+
InstanceID: d.Get("name").(string),
92176
}
93177

94-
var storageType bigtable.StorageType
95-
switch value := d.Get("storage_type"); value {
96-
case "HDD":
97-
storageType = bigtable.HDD
98-
case "SSD":
99-
storageType = bigtable.SSD
178+
displayName, ok := d.GetOk("display_name")
179+
if !ok {
180+
displayName = conf.InstanceID
100181
}
182+
conf.DisplayName = displayName.(string)
101183

102-
numNodes := int32(d.Get("num_nodes").(int))
103-
var instanceType bigtable.InstanceType
104-
switch value := d.Get("instance_type"); value {
184+
switch d.Get("instance_type").(string) {
105185
case "DEVELOPMENT":
106-
instanceType = bigtable.DEVELOPMENT
107-
108-
if numNodes > 0 {
109-
return fmt.Errorf("Can't specify a non-zero number of nodes: %d for DEVELOPMENT Bigtable instance: %s", numNodes, name)
110-
}
186+
conf.InstanceType = bigtable.DEVELOPMENT
111187
case "PRODUCTION":
112-
instanceType = bigtable.PRODUCTION
113-
}
114-
115-
zone, err := getZone(d, config)
116-
if err != nil {
117-
return err
188+
conf.InstanceType = bigtable.PRODUCTION
118189
}
119190

120-
instanceConf := &bigtable.InstanceConf{
121-
InstanceId: name,
122-
DisplayName: displayName.(string),
123-
ClusterId: d.Get("cluster_id").(string),
124-
NumNodes: numNodes,
125-
InstanceType: instanceType,
126-
StorageType: storageType,
127-
Zone: zone,
191+
if d.Get("cluster.#").(int) > 0 {
192+
// expand cluster
193+
conf.Clusters = expandBigtableClusters(d.Get("cluster").(*schema.Set).List(), conf.InstanceID, config.Zone)
194+
if err != nil {
195+
return fmt.Errorf("error expanding clusters: %s", err.Error())
196+
}
197+
} else {
198+
// TODO: remove this when we're done with the deprecation period
199+
zone, err := getZone(d, config)
200+
if err != nil {
201+
return err
202+
}
203+
cluster := bigtable.ClusterConfig{
204+
InstanceID: conf.InstanceID,
205+
NumNodes: int32(d.Get("num_nodes").(int)),
206+
Zone: zone,
207+
ClusterID: d.Get("cluster_id").(string),
208+
}
209+
switch d.Get("storage_type").(string) {
210+
case "HDD":
211+
cluster.StorageType = bigtable.HDD
212+
case "SSD":
213+
cluster.StorageType = bigtable.SSD
214+
}
215+
conf.Clusters = append(conf.Clusters, cluster)
128216
}
129217

130218
c, err := config.bigtableClientFactory.NewInstanceAdminClient(project)
@@ -134,12 +222,12 @@ func resourceBigtableInstanceCreate(d *schema.ResourceData, meta interface{}) er
134222

135223
defer c.Close()
136224

137-
err = c.CreateInstance(ctx, instanceConf)
225+
err = c.CreateInstanceWithClusters(ctx, conf)
138226
if err != nil {
139227
return fmt.Errorf("Error creating instance. %s", err)
140228
}
141229

142-
d.SetId(name)
230+
d.SetId(conf.InstanceID)
143231

144232
return resourceBigtableInstanceRead(d, meta)
145233
}
@@ -153,11 +241,6 @@ func resourceBigtableInstanceRead(d *schema.ResourceData, meta interface{}) erro
153241
return err
154242
}
155243

156-
zone, err := getZone(d, config)
157-
if err != nil {
158-
return err
159-
}
160-
161244
c, err := config.bigtableClientFactory.NewInstanceAdminClient(project)
162245
if err != nil {
163246
return fmt.Errorf("Error starting instance admin client. %s", err)
@@ -173,7 +256,37 @@ func resourceBigtableInstanceRead(d *schema.ResourceData, meta interface{}) erro
173256
}
174257

175258
d.Set("project", project)
176-
d.Set("zone", zone)
259+
if d.Get("cluster.#").(int) > 0 {
260+
clusters := d.Get("cluster").(*schema.Set).List()
261+
clusterState := []map[string]interface{}{}
262+
for _, cl := range clusters {
263+
cluster := cl.(map[string]interface{})
264+
clus, err := c.GetCluster(ctx, instance.Name, cluster["cluster_id"].(string))
265+
if err != nil {
266+
if isGoogleApiErrorWithCode(err, 404) {
267+
log.Printf("[WARN] Cluster %q not found, not setting it in state", cluster["cluster_id"].(string))
268+
continue
269+
}
270+
return fmt.Errorf("Error retrieving cluster %q: %s", cluster["cluster_id"].(string), err.Error())
271+
}
272+
clusterState = append(clusterState, flattenBigtableCluster(clus, cluster["storage_type"].(string)))
273+
}
274+
err = d.Set("cluster", clusterState)
275+
if err != nil {
276+
return fmt.Errorf("Error setting clusters in state: %s", err.Error())
277+
}
278+
d.Set("cluster_id", "")
279+
d.Set("zone", "")
280+
d.Set("num_nodes", 0)
281+
d.Set("storage_type", "SSD")
282+
} else {
283+
// TODO remove this when we're done with our deprecation period
284+
zone, err := getZone(d, config)
285+
if err != nil {
286+
return err
287+
}
288+
d.Set("zone", zone)
289+
}
177290
d.Set("name", instance.Name)
178291
d.Set("display_name", instance.DisplayName)
179292

@@ -206,3 +319,38 @@ func resourceBigtableInstanceDestroy(d *schema.ResourceData, meta interface{}) e
206319

207320
return nil
208321
}
322+
323+
func flattenBigtableCluster(c *bigtable.ClusterInfo, storageType string) map[string]interface{} {
324+
return map[string]interface{}{
325+
"zone": c.Zone,
326+
"num_nodes": c.ServeNodes,
327+
"cluster_id": c.Name,
328+
"storage_type": storageType,
329+
}
330+
}
331+
332+
func expandBigtableClusters(clusters []interface{}, instanceID string, defaultZone string) []bigtable.ClusterConfig {
333+
results := make([]bigtable.ClusterConfig, 0, len(clusters))
334+
for _, c := range clusters {
335+
cluster := c.(map[string]interface{})
336+
zone := defaultZone
337+
if confZone, ok := cluster["zone"]; ok {
338+
zone = confZone.(string)
339+
}
340+
var storageType bigtable.StorageType
341+
switch cluster["storage_type"].(string) {
342+
case "SSD":
343+
storageType = bigtable.SSD
344+
case "HDD":
345+
storageType = bigtable.HDD
346+
}
347+
results = append(results, bigtable.ClusterConfig{
348+
InstanceID: instanceID,
349+
Zone: zone,
350+
ClusterID: cluster["cluster_id"].(string),
351+
NumNodes: int32(cluster["num_nodes"].(int)),
352+
StorageType: storageType,
353+
})
354+
}
355+
return results
356+
}

google/resource_bigtable_instance_test.go

+12-8
Original file line numberDiff line numberDiff line change
@@ -107,21 +107,25 @@ func testAccBigtableInstanceExists(n string) resource.TestCheckFunc {
107107
func testAccBigtableInstance(instanceName string) string {
108108
return fmt.Sprintf(`
109109
resource "google_bigtable_instance" "instance" {
110-
name = "%s"
111-
cluster_id = "%s"
112-
zone = "us-central1-b"
113-
num_nodes = 3
114-
storage_type = "HDD"
110+
name = "%s"
111+
cluster {
112+
cluster_id = "%s"
113+
zone = "us-central1-b"
114+
num_nodes = 3
115+
storage_type = "HDD"
116+
}
115117
}
116118
`, instanceName, instanceName)
117119
}
118120

119121
func testAccBigtableInstance_development(instanceName string) string {
120122
return fmt.Sprintf(`
121123
resource "google_bigtable_instance" "instance" {
122-
name = "%s"
123-
cluster_id = "%s"
124-
zone = "us-central1-b"
124+
name = "%s"
125+
cluster {
126+
cluster_id = "%s"
127+
zone = "us-central1-b"
128+
}
125129
instance_type = "DEVELOPMENT"
126130
}
127131
`, instanceName, instanceName)

0 commit comments

Comments
 (0)