Skip to content

Commit 4e396ab

Browse files
modular-magicianrileykarson
authored andcommitted
Add support for Google Cloud Bigtable replication (#2313)
<!-- This change is generated by MagicModules. --> /cc @rileykarson
1 parent fa6c940 commit 4e396ab

File tree

2 files changed

+102
-152
lines changed

2 files changed

+102
-152
lines changed

google/resource_bigtable_instance.go

+57-148
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@ import (
55
"fmt"
66
"log"
77

8-
"github.com/hashicorp/terraform/helper/customdiff"
98
"github.com/hashicorp/terraform/helper/schema"
109
"github.com/hashicorp/terraform/helper/validation"
1110

@@ -16,13 +15,7 @@ func resourceBigtableInstance() *schema.Resource {
1615
return &schema.Resource{
1716
Create: resourceBigtableInstanceCreate,
1817
Read: resourceBigtableInstanceRead,
19-
// TODO: Update is only needed because we're doing forcenew in customizediff
20-
// when we're done with the deprecation, we can drop customizediff and make cluster forcenew
21-
Update: schema.Noop,
2218
Delete: resourceBigtableInstanceDestroy,
23-
CustomizeDiff: customdiff.All(
24-
resourceBigTableInstanceClusterCustomizeDiff,
25-
),
2619

2720
Schema: map[string]*schema.Schema{
2821
"name": {
@@ -31,28 +24,20 @@ func resourceBigtableInstance() *schema.Resource {
3124
ForceNew: true,
3225
},
3326

34-
"cluster_id": {
35-
Type: schema.TypeString,
36-
Optional: true,
37-
Deprecated: "Use cluster instead.",
38-
ConflictsWith: []string{"cluster"},
39-
},
40-
4127
"cluster": {
42-
Type: schema.TypeSet,
43-
Optional: true,
44-
MaxItems: 1,
45-
ConflictsWith: []string{"cluster_id", "zone", "num_nodes", "storage_type"},
28+
Type: schema.TypeSet,
29+
Required: true,
30+
ForceNew: true,
31+
MaxItems: 2,
4632
Elem: &schema.Resource{
4733
Schema: map[string]*schema.Schema{
4834
"cluster_id": {
4935
Type: schema.TypeString,
50-
Optional: true,
36+
Required: true,
5137
},
5238
"zone": {
5339
Type: schema.TypeString,
54-
Optional: true,
55-
Computed: true,
40+
Required: true,
5641
},
5742
"num_nodes": {
5843
Type: schema.TypeInt,
@@ -68,29 +53,13 @@ func resourceBigtableInstance() *schema.Resource {
6853
},
6954
},
7055

71-
"zone": {
72-
Type: schema.TypeString,
73-
Optional: true,
74-
Computed: true,
75-
ForceNew: true,
76-
Deprecated: "Use cluster instead.",
77-
ConflictsWith: []string{"cluster"},
78-
},
79-
8056
"display_name": {
8157
Type: schema.TypeString,
8258
Optional: true,
8359
ForceNew: true,
8460
Computed: true,
8561
},
8662

87-
"num_nodes": {
88-
Type: schema.TypeInt,
89-
Optional: true,
90-
Deprecated: "Use cluster instead.",
91-
ConflictsWith: []string{"cluster"},
92-
},
93-
9463
"instance_type": {
9564
Type: schema.TypeString,
9665
Optional: true,
@@ -99,67 +68,42 @@ func resourceBigtableInstance() *schema.Resource {
9968
ValidateFunc: validation.StringInSlice([]string{"DEVELOPMENT", "PRODUCTION"}, false),
10069
},
10170

102-
"storage_type": {
103-
Type: schema.TypeString,
104-
Optional: true,
105-
Default: "SSD",
106-
ValidateFunc: validation.StringInSlice([]string{"SSD", "HDD"}, false),
107-
Deprecated: "Use cluster instead.",
108-
ConflictsWith: []string{"cluster"},
109-
},
110-
11171
"project": {
11272
Type: schema.TypeString,
11373
Optional: true,
11474
Computed: true,
11575
ForceNew: true,
11676
},
117-
},
118-
}
119-
}
12077

121-
func resourceBigTableInstanceClusterCustomizeDiff(d *schema.ResourceDiff, meta interface{}) error {
122-
if d.Get("cluster_id").(string) == "" && d.Get("cluster.#").(int) == 0 {
123-
return fmt.Errorf("At least one cluster must be set.")
124-
}
125-
if !d.HasChange("cluster_id") && !d.HasChange("zone") && !d.HasChange("num_nodes") &&
126-
!d.HasChange("storage_type") && !d.HasChange("cluster") {
127-
return nil
128-
}
129-
if d.Get("cluster.#").(int) == 1 {
130-
// if we have exactly one cluster, and it has the same values as the old top-level
131-
// values, we can assume the user is trying to go from the deprecated values to the
132-
// new values, and we shouldn't ForceNew. We know that the top-level values aren't
133-
// set, because they ConflictWith cluster.
134-
oldID, _ := d.GetChange("cluster_id")
135-
oldNodes, _ := d.GetChange("num_nodes")
136-
oldZone, _ := d.GetChange("zone")
137-
oldStorageType, _ := d.GetChange("storage_type")
138-
new := d.Get("cluster").(*schema.Set).List()[0].(map[string]interface{})
139-
140-
if oldID.(string) == new["cluster_id"].(string) &&
141-
oldNodes.(int) == new["num_nodes"].(int) &&
142-
oldZone.(string) == new["zone"].(string) &&
143-
oldStorageType.(string) == new["storage_type"].(string) {
144-
return nil
145-
}
146-
}
147-
if d.HasChange("cluster_id") {
148-
d.ForceNew("cluster_id")
149-
}
150-
if d.HasChange("cluster") {
151-
d.ForceNew("cluster")
152-
}
153-
if d.HasChange("zone") {
154-
d.ForceNew("zone")
155-
}
156-
if d.HasChange("num_nodes") {
157-
d.ForceNew("num_nodes")
158-
}
159-
if d.HasChange("storage_type") {
160-
d.ForceNew("storage_type")
78+
"cluster_id": {
79+
Type: schema.TypeString,
80+
Optional: true,
81+
Computed: true,
82+
Removed: "Use cluster instead.",
83+
},
84+
85+
"zone": {
86+
Type: schema.TypeString,
87+
Optional: true,
88+
Computed: true,
89+
Removed: "Use cluster instead.",
90+
},
91+
92+
"num_nodes": {
93+
Type: schema.TypeInt,
94+
Optional: true,
95+
Computed: true,
96+
Removed: "Use cluster instead.",
97+
},
98+
99+
"storage_type": {
100+
Type: schema.TypeString,
101+
Optional: true,
102+
Computed: true,
103+
Removed: "Use cluster instead.",
104+
},
105+
},
161106
}
162-
return nil
163107
}
164108

165109
func resourceBigtableInstanceCreate(d *schema.ResourceData, meta interface{}) error {
@@ -188,31 +132,9 @@ func resourceBigtableInstanceCreate(d *schema.ResourceData, meta interface{}) er
188132
conf.InstanceType = bigtable.PRODUCTION
189133
}
190134

191-
if d.Get("cluster.#").(int) > 0 {
192-
// expand cluster
193-
conf.Clusters = expandBigtableClusters(d.Get("cluster").(*schema.Set).List(), conf.InstanceID, config.Zone)
194-
if err != nil {
195-
return fmt.Errorf("error expanding clusters: %s", err.Error())
196-
}
197-
} else {
198-
// TODO: remove this when we're done with the deprecation period
199-
zone, err := getZone(d, config)
200-
if err != nil {
201-
return err
202-
}
203-
cluster := bigtable.ClusterConfig{
204-
InstanceID: conf.InstanceID,
205-
NumNodes: int32(d.Get("num_nodes").(int)),
206-
Zone: zone,
207-
ClusterID: d.Get("cluster_id").(string),
208-
}
209-
switch d.Get("storage_type").(string) {
210-
case "HDD":
211-
cluster.StorageType = bigtable.HDD
212-
case "SSD":
213-
cluster.StorageType = bigtable.SSD
214-
}
215-
conf.Clusters = append(conf.Clusters, cluster)
135+
conf.Clusters = expandBigtableClusters(d.Get("cluster").(*schema.Set).List(), conf.InstanceID)
136+
if err != nil {
137+
return fmt.Errorf("error expanding clusters: %s", err.Error())
216138
}
217139

218140
c, err := config.bigtableClientFactory.NewInstanceAdminClient(project)
@@ -256,37 +178,27 @@ func resourceBigtableInstanceRead(d *schema.ResourceData, meta interface{}) erro
256178
}
257179

258180
d.Set("project", project)
259-
if d.Get("cluster.#").(int) > 0 {
260-
clusters := d.Get("cluster").(*schema.Set).List()
261-
clusterState := []map[string]interface{}{}
262-
for _, cl := range clusters {
263-
cluster := cl.(map[string]interface{})
264-
clus, err := c.GetCluster(ctx, instance.Name, cluster["cluster_id"].(string))
265-
if err != nil {
266-
if isGoogleApiErrorWithCode(err, 404) {
267-
log.Printf("[WARN] Cluster %q not found, not setting it in state", cluster["cluster_id"].(string))
268-
continue
269-
}
270-
return fmt.Errorf("Error retrieving cluster %q: %s", cluster["cluster_id"].(string), err.Error())
271-
}
272-
clusterState = append(clusterState, flattenBigtableCluster(clus, cluster["storage_type"].(string)))
273-
}
274-
err = d.Set("cluster", clusterState)
275-
if err != nil {
276-
return fmt.Errorf("Error setting clusters in state: %s", err.Error())
277-
}
278-
d.Set("cluster_id", "")
279-
d.Set("zone", "")
280-
d.Set("num_nodes", 0)
281-
d.Set("storage_type", "SSD")
282-
} else {
283-
// TODO remove this when we're done with our deprecation period
284-
zone, err := getZone(d, config)
181+
182+
clusters := d.Get("cluster").(*schema.Set).List()
183+
clusterState := []map[string]interface{}{}
184+
for _, cl := range clusters {
185+
cluster := cl.(map[string]interface{})
186+
clus, err := c.GetCluster(ctx, instance.Name, cluster["cluster_id"].(string))
285187
if err != nil {
286-
return err
188+
if isGoogleApiErrorWithCode(err, 404) {
189+
log.Printf("[WARN] Cluster %q not found, not setting it in state", cluster["cluster_id"].(string))
190+
continue
191+
}
192+
return fmt.Errorf("Error retrieving cluster %q: %s", cluster["cluster_id"].(string), err.Error())
287193
}
288-
d.Set("zone", zone)
194+
clusterState = append(clusterState, flattenBigtableCluster(clus, cluster["storage_type"].(string)))
195+
}
196+
197+
err = d.Set("cluster", clusterState)
198+
if err != nil {
199+
return fmt.Errorf("Error setting clusters in state: %s", err.Error())
289200
}
201+
290202
d.Set("name", instance.Name)
291203
d.Set("display_name", instance.DisplayName)
292204

@@ -329,14 +241,11 @@ func flattenBigtableCluster(c *bigtable.ClusterInfo, storageType string) map[str
329241
}
330242
}
331243

332-
func expandBigtableClusters(clusters []interface{}, instanceID string, defaultZone string) []bigtable.ClusterConfig {
244+
func expandBigtableClusters(clusters []interface{}, instanceID string) []bigtable.ClusterConfig {
333245
results := make([]bigtable.ClusterConfig, 0, len(clusters))
334246
for _, c := range clusters {
335247
cluster := c.(map[string]interface{})
336-
zone := defaultZone
337-
if confZone, ok := cluster["zone"]; ok {
338-
zone = confZone.(string)
339-
}
248+
zone := cluster["zone"].(string)
340249
var storageType bigtable.StorageType
341250
switch cluster["storage_type"].(string) {
342251
case "SSD":

google/resource_bigtable_instance_test.go

+45-4
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,27 @@ func TestAccBigtableInstance_basic(t *testing.T) {
3131
})
3232
}
3333

34+
func TestAccBigtableInstance_cluster(t *testing.T) {
35+
t.Parallel()
36+
37+
instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
38+
39+
resource.Test(t, resource.TestCase{
40+
PreCheck: func() { testAccPreCheck(t) },
41+
Providers: testAccProviders,
42+
CheckDestroy: testAccCheckBigtableInstanceDestroy,
43+
Steps: []resource.TestStep{
44+
{
45+
Config: testAccBigtableInstance_cluster(instanceName),
46+
Check: resource.ComposeTestCheckFunc(
47+
testAccBigtableInstanceExists(
48+
"google_bigtable_instance.instance"),
49+
),
50+
},
51+
},
52+
})
53+
}
54+
3455
func TestAccBigtableInstance_development(t *testing.T) {
3556
t.Parallel()
3657

@@ -65,12 +86,12 @@ func testAccCheckBigtableInstanceDestroy(s *terraform.State) error {
6586
return fmt.Errorf("Error starting instance admin client. %s", err)
6687
}
6788

89+
defer c.Close()
90+
6891
_, err = c.InstanceInfo(ctx, rs.Primary.Attributes["name"])
6992
if err == nil {
7093
return fmt.Errorf("Instance %s still exists.", rs.Primary.Attributes["name"])
7194
}
72-
73-
c.Close()
7495
}
7596

7697
return nil
@@ -93,13 +114,13 @@ func testAccBigtableInstanceExists(n string) resource.TestCheckFunc {
93114
return fmt.Errorf("Error starting instance admin client. %s", err)
94115
}
95116

117+
defer c.Close()
118+
96119
_, err = c.InstanceInfo(ctx, rs.Primary.Attributes["name"])
97120
if err != nil {
98121
return fmt.Errorf("Error retrieving instance %s.", rs.Primary.Attributes["name"])
99122
}
100123

101-
c.Close()
102-
103124
return nil
104125
}
105126
}
@@ -118,6 +139,26 @@ resource "google_bigtable_instance" "instance" {
118139
`, instanceName, instanceName)
119140
}
120141

142+
func testAccBigtableInstance_cluster(instanceName string) string {
143+
return fmt.Sprintf(`
144+
resource "google_bigtable_instance" "instance" {
145+
name = "%s"
146+
cluster {
147+
cluster_id = "%s-a"
148+
zone = "us-central1-b"
149+
num_nodes = 3
150+
storage_type = "HDD"
151+
}
152+
cluster {
153+
cluster_id = "%s-b"
154+
zone = "us-central1-c"
155+
num_nodes = 3
156+
storage_type = "HDD"
157+
}
158+
}
159+
`, instanceName, instanceName, instanceName)
160+
}
161+
121162
func testAccBigtableInstance_development(instanceName string) string {
122163
return fmt.Sprintf(`
123164
resource "google_bigtable_instance" "instance" {

0 commit comments

Comments
 (0)