forked from GoogleCloudPlatform/magic-modules
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathbig_query_routine_pyspark.tf.erb
41 lines (35 loc) · 1.32 KB
/
big_query_routine_pyspark.tf.erb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
resource "google_bigquery_dataset" "test" {
dataset_id = "<%= ctx[:vars]['dataset_id'] %>"
}
resource "google_bigquery_connection" "test" {
connection_id = "<%= ctx[:vars]['connection_id'] %>"
location = "US"
spark { }
}
resource "google_bigquery_routine" "<%= ctx[:primary_resource_id] %>" {
dataset_id = google_bigquery_dataset.test.dataset_id
routine_id = "<%= ctx[:vars]['routine_id'] %>"
routine_type = "PROCEDURE"
language = "PYTHON"
definition_body = <<-EOS
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("spark-bigquery-demo").getOrCreate()
# Load data from BigQuery.
words = spark.read.format("bigquery") \
.option("table", "bigquery-public-data:samples.shakespeare") \
.load()
words.createOrReplaceTempView("words")
# Perform word count.
word_count = words.select('word', 'word_count').groupBy('word').sum('word_count').withColumnRenamed("sum(word_count)", "sum_word_count")
word_count.show()
word_count.printSchema()
# Saving the data to BigQuery
word_count.write.format("bigquery") \
.option("writeMethod", "direct") \
.save("wordcount_dataset.wordcount_output")
EOS
spark_options {
connection = google_bigquery_connection.test.name
runtime_version = "2.1"
}
}