@@ -929,11 +929,17 @@ func flattenHiveJob(job *dataproc.HiveJob) []map[string]interface{} {
929
929
func expandHiveJob(config map[string]interface{}) *dataproc.HiveJob {
930
930
job := &dataproc.HiveJob{}
931
931
if v, ok := config["query_file_uri"]; ok {
932
- job.QueryFileUri = v.(string)
932
+ queryFileUri := v.(string)
933
+ if len(queryFileUri) != 0 {
934
+ job.QueryFileUri = v.(string)
935
+ }
933
936
}
934
937
if v, ok := config["query_list"]; ok {
935
- job.QueryList = &dataproc.QueryList{
936
- Queries: tpgresource.ConvertStringArr(v.([]interface{})),
938
+ queryList := v.([]interface{})
939
+ if len(queryList) != 0 {
940
+ job.QueryList = &dataproc.QueryList{
941
+ Queries: tpgresource.ConvertStringArr(queryList),
942
+ }
937
943
}
938
944
}
939
945
if v, ok := config["continue_on_failure"]; ok {
@@ -1037,11 +1043,17 @@ func flattenPigJob(job *dataproc.PigJob) []map[string]interface{} {
1037
1043
func expandPigJob(config map[string]interface{}) *dataproc.PigJob {
1038
1044
job := &dataproc.PigJob{}
1039
1045
if v, ok := config["query_file_uri"]; ok {
1040
- job.QueryFileUri = v.(string)
1046
+ queryFileUri := v.(string)
1047
+ if len(queryFileUri) != 0 {
1048
+ job.QueryFileUri = v.(string)
1049
+ }
1041
1050
}
1042
1051
if v, ok := config["query_list"]; ok {
1043
- job.QueryList = &dataproc.QueryList{
1044
- Queries: tpgresource.ConvertStringArr(v.([]interface{})),
1052
+ queryList := v.([]interface{})
1053
+ if len(queryList) != 0 {
1054
+ job.QueryList = &dataproc.QueryList{
1055
+ Queries: tpgresource.ConvertStringArr(queryList),
1056
+ }
1045
1057
}
1046
1058
}
1047
1059
if v, ok := config["continue_on_failure"]; ok {
@@ -1138,11 +1150,17 @@ func flattenSparkSqlJob(job *dataproc.SparkSqlJob) []map[string]interface{} {
1138
1150
func expandSparkSqlJob(config map[string]interface{}) *dataproc.SparkSqlJob {
1139
1151
job := &dataproc.SparkSqlJob{}
1140
1152
if v, ok := config["query_file_uri"]; ok {
1141
- job.QueryFileUri = v.(string)
1153
+ queryFileUri := v.(string)
1154
+ if len(queryFileUri) != 0 {
1155
+ job.QueryFileUri = v.(string)
1156
+ }
1142
1157
}
1143
1158
if v, ok := config["query_list"]; ok {
1144
- job.QueryList = &dataproc.QueryList{
1145
- Queries: tpgresource.ConvertStringArr(v.([]interface{})),
1159
+ queryList := v.([]interface{})
1160
+ if len(queryList) != 0 {
1161
+ job.QueryList = &dataproc.QueryList{
1162
+ Queries: tpgresource.ConvertStringArr(queryList),
1163
+ }
1146
1164
}
1147
1165
}
1148
1166
if v, ok := config["script_variables"]; ok {
@@ -1239,20 +1257,26 @@ func flattenPrestoJob(job *dataproc.PrestoJob) []map[string]interface{} {
1239
1257
1240
1258
func expandPrestoJob(config map[string]interface{}) *dataproc.PrestoJob {
1241
1259
job := &dataproc.PrestoJob{}
1260
+ if v, ok := config["query_file_uri"]; ok {
1261
+ queryFileUri := v.(string)
1262
+ if len(queryFileUri) != 0 {
1263
+ job.QueryFileUri = v.(string)
1264
+ }
1265
+ }
1266
+ if v, ok := config["query_list"]; ok {
1267
+ queryList := v.([]interface{})
1268
+ if len(queryList) != 0 {
1269
+ job.QueryList = &dataproc.QueryList{
1270
+ Queries: tpgresource.ConvertStringArr(queryList),
1271
+ }
1272
+ }
1273
+ }
1242
1274
if v, ok := config["client_tags"]; ok {
1243
1275
job.ClientTags = tpgresource.ConvertStringArr(v.([]interface{}))
1244
1276
}
1245
1277
if v, ok := config["continue_on_failure"]; ok {
1246
1278
job.ContinueOnFailure = v.(bool)
1247
1279
}
1248
- if v, ok := config["query_file_uri"]; ok {
1249
- job.QueryFileUri = v.(string)
1250
- }
1251
- if v, ok := config["query_list"]; ok {
1252
- job.QueryList = &dataproc.QueryList{
1253
- Queries: tpgresource.ConvertStringArr(v.([]interface{})),
1254
- }
1255
- }
1256
1280
if v, ok := config["properties"]; ok {
1257
1281
job.Properties = tpgresource.ConvertStringMap(v.(map[string]interface{}))
1258
1282
}
0 commit comments