Skip to content

Commit b20cb1e

Browse files
# Add: biglake_configuration to bigquery_table (#11724)
[upstream:6555ae7f20fd7543412c9292e31bba01c7c4bf4c] Signed-off-by: Modular Magician <[email protected]>
1 parent 02c4fb0 commit b20cb1e

File tree

4 files changed

+128
-0
lines changed

4 files changed

+128
-0
lines changed

.changelog/11724.txt

+3
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
```release-note:enhancement
2+
bigquery: added `biglake_configuration` to `google_bigquery_table` to support BigLake Managed Tables
3+
```

google-beta/services/bigquery/resource_bigquery_table.go

+47
Original file line numberDiff line numberDiff line change
@@ -892,6 +892,53 @@ func ResourceBigQueryTable() *schema.Resource {
892892
},
893893
},
894894

895+
// BiglakeConfiguration [Optional] Specifies the configuration of a BigLake managed table.
896+
"biglake_configuration": {
897+
Type: schema.TypeList,
898+
Optional: true,
899+
MaxItems: 1,
900+
ForceNew: true,
901+
Description: "Specifies the configuration of a BigLake managed table.",
902+
Elem: &schema.Resource{
903+
Schema: map[string]*schema.Schema{
904+
// ConnectionId: [Required] The connection specifying the credentials to be used to read
905+
// and write to external storage, such as Cloud Storage. The connection_id can have the
906+
// form "&lt;project\_id&gt;.&lt;location\_id&gt;.&lt;connection\_id&gt;" or
907+
// "projects/&lt;project\_id&gt;/locations/&lt;location\_id&gt;/connections/&lt;connection\_id&gt;".
908+
"connection_id": {
909+
Type: schema.TypeString,
910+
Required: true,
911+
DiffSuppressFunc: bigQueryTableConnectionIdSuppress,
912+
ForceNew: true,
913+
Description: `The connection specifying the credentials to be used to read and write to external storage, such as Cloud Storage. The connection_id can have the form "&lt;project\_id&gt;.&lt;location\_id&gt;.&lt;connection\_id&gt;" or "projects/&lt;project\_id&gt;/locations/&lt;location\_id&gt;/connections/&lt;connection\_id&gt;".`,
914+
},
915+
// StorageUri: [Required] The fully qualified location prefix of the external folder where
916+
// table data is stored. The '*' wildcard character is not allowed.
917+
// The URI should be in the format "gs://bucket/path_to_table/"
918+
"storage_uri": {
919+
Type: schema.TypeString,
920+
Required: true,
921+
ForceNew: true,
922+
Description: `The fully qualified location prefix of the external folder where table data is stored. The '*' wildcard character is not allowed. The URI should be in the format "gs://bucket/path_to_table/"`,
923+
},
924+
// FileFormat: [Required] The file format the data is stored in.
925+
"file_format": {
926+
Type: schema.TypeString,
927+
Required: true,
928+
ForceNew: true,
929+
Description: "The file format the data is stored in.",
930+
},
931+
// TableFormat: [Required]
932+
"table_format": {
933+
Type: schema.TypeString,
934+
Required: true,
935+
ForceNew: true,
936+
Description: "The table format the metadata only snapshots are stored in.",
937+
},
938+
},
939+
},
940+
},
941+
895942
// FriendlyName: [Optional] A descriptive name for this table.
896943
"friendly_name": {
897944
Type: schema.TypeString,

google-beta/services/bigquery/resource_bigquery_table_test.go

+62
Original file line numberDiff line numberDiff line change
@@ -281,6 +281,68 @@ func TestAccBigQueryTable_AvroPartitioning(t *testing.T) {
281281
})
282282
}
283283

284+
func TestAccBigQueryBigLakeManagedTable(t *testing.T) {
285+
t.Parallel()
286+
bucketName := acctest.TestBucketName(t)
287+
connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10))
288+
289+
datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10))
290+
tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10))
291+
292+
acctest.VcrTest(t, resource.TestCase{
293+
PreCheck: func() { acctest.AccTestPreCheck(t) },
294+
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
295+
CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t),
296+
Steps: []resource.TestStep{
297+
{
298+
Config: testAccBigLakeManagedTable(bucketName, connectionID, datasetID, tableID, TEST_SIMPLE_CSV_SCHEMA),
299+
},
300+
},
301+
})
302+
}
303+
304+
func testAccBigLakeManagedTable(bucketName, connectionID, datasetID, tableID, schema string) string {
305+
return fmt.Sprintf(`
306+
data "google_project" "project" {}
307+
resource "google_storage_bucket" "test" {
308+
name = "%s"
309+
location = "US"
310+
force_destroy = true
311+
uniform_bucket_level_access = true
312+
}
313+
resource "google_bigquery_connection" "test" {
314+
connection_id = "%s"
315+
location = "US"
316+
cloud_resource {}
317+
}
318+
resource "google_project_iam_member" "test" {
319+
role = "roles/storage.objectViewer"
320+
project = data.google_project.project.id
321+
member = "serviceAccount:${google_bigquery_connection.test.cloud_resource[0].service_account_id}"
322+
}
323+
resource "google_bigquery_dataset" "test" {
324+
dataset_id = "%s"
325+
}
326+
resource "google_bigquery_table" "test" {
327+
deletion_protection = false
328+
table_id = "%s"
329+
dataset_id = google_bigquery_dataset.test.dataset_id
330+
biglake_configuration {
331+
connection_id = google_bigquery_connection.test.name
332+
storage_uri = "gs://${google_storage_bucket.test.name}/data/"
333+
file_format = "PARQUET"
334+
table_format = "ICEBERG"
335+
}
336+
337+
schema = jsonencode(%s)
338+
339+
depends_on = [
340+
google_project_iam_member.test
341+
]
342+
}
343+
`, bucketName, connectionID, datasetID, tableID, schema)
344+
}
345+
284346
func TestAccBigQueryExternalDataTable_json(t *testing.T) {
285347
t.Parallel()
286348
bucketName := acctest.TestBucketName(t)

website/docs/r/bigquery_table.html.markdown

+16
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,8 @@ The following arguments are supported:
104104
By defining these properties, the data source can then be queried as
105105
if it were a standard BigQuery table. Structure is [documented below](#nested_external_data_configuration).
106106

107+
* `biglake_configuration` - (Optional) Specifies the configuration of a BigLake managed table. Structure is [documented below](#nested_biglake_configuration)
108+
107109
* `friendly_name` - (Optional) A descriptive name for the table.
108110

109111
* `max_staleness`: (Optional) The maximum staleness of data that could be
@@ -492,6 +494,20 @@ The following arguments are supported:
492494
* `replication_interval_ms` (Optional) - The interval at which the source
493495
materialized view is polled for updates. The default is 300000.
494496

497+
<a name="nested_biglake_configuration"></a>The `biglake_configuration` block supports:
498+
499+
* `connection_id` - (Required) The connection specifying the credentials to be used to
500+
read and write to external storage, such as Cloud Storage. The connection_id can
501+
have the form "&lt;project\_id&gt;.&lt;location\_id&gt;.&lt;connection\_id&gt;" or
502+
projects/&lt;project\_id&gt;/locations/&lt;location\_id&gt;/connections/&lt;connection\_id&gt;".
503+
504+
* `storage_uri` - (Required) The fully qualified location prefix of the external folder where table data
505+
is stored. The '*' wildcard character is not allowed. The URI should be in the format "gs://bucket/path_to_table/"
506+
507+
* `file_format` - (Required) The file format the table data is stored in.
508+
509+
* `table_format` - (Required) The table format the metadata only snapshots are stored in.
510+
495511
## Attributes Reference
496512

497513
In addition to the arguments listed above, the following computed attributes are

0 commit comments

Comments
 (0)