-
Notifications
You must be signed in to change notification settings - Fork 68
/
Copy pathpipeline.yaml
591 lines (490 loc) · 27.6 KB
/
pipeline.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
resources:
# A list of GCP resources that are unique and specific to your pipeline.
#
# The currently supported resources are shown below. Use only the resources
# needed by your pipeline, and delete the rest of the examples.
#
# We will keep adding to the list below to support more Google Cloud resources
# over time. If a resource you need isn't supported, please file an issue on
# the repository.
- type: bigquery_table
# A Google BigQuery table to store your data. Requires a `bigquery_dataset`
# to be specified in the config (i.e. `dataset.yaml) for the dataset that
# this pipeline belongs in.
#
# Required Properties:
# table_id
table_id: PIPELINE_FOLDER_NAME
# Optional Properties:
# Description of the table
description: "This is a table description."
# Time-based partitioning configuration. There is no need for this property
# if you have a relatively small dataset to host on a BigQuery table.
time_partitioning:
# The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.
type: "DAY"
# If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.
require_partition_filter: false
# Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
clustering:
- "column_1"
- "column_2"
- "column_3"
# The table cannot be deleted without first disabling this property.
# Unless this field is set to false in Terraform state, a `terraform destroy`
# or `terraform apply` that would delete the table will fail.
deletion_protection: true
dag:
# [Required] Specify the Airflow version of the operators used by the DAG.
airflow_version: 2
# The DAG acronym stands for directed acyclic graph. This block represents
# your data pipeline along with every property and configuration it needs to
# onboard your data.
initialize:
dag_id: PIPELINE_FOLDER_NAME
default_args:
owner: "Google"
# When set to True, keeps a task from getting triggered if the previous schedule for the task hasn’t succeeded
depends_on_past: False
start_date: "2021-03-01"
max_active_runs: 1
schedule_interval: "@once"
catchup: False
default_view: graph
tasks:
# This is where you specify the tasks (a.k.a. processes) that your data
# pipeline will run to onboard the data.
#
# As the examples below will show, every task must be represented by an
# Airflow operator. The list of suported operators are listed in
#
# scripts/dag_imports.json
#
# If an operator you need isn't supported, please file an issue on the
# repository.
#
# Use the YAML list syntax in this block to specify every task for your
# pipeline.
- operator: "BashOperator"
# Initializes an Airflow BashOperator for the DAG. This operator can be
# used to
# - Download from HTTP sources
# - Run custom Python scripts
# - Run processes using specific packages that support CLI commands
# Task description
description: "Run a custom Python script"
args:
# Arguments supported by this operator:
# https://airflow.apache.org/docs/apache-airflow/stable/howto/operator/bash.html
task_id: "sample_bash_task"
bash_command: |
mkdir -p $airflow_home/data/$dataset/$pipeline/run_date={{ ds }}
CUSTOM_ENV_VAR=$custom_env_var python $airflow_home/dags/$dataset/$pipeline/custom/some_script.py
env:
airflow_home: "{{ var.value.airflow_home }}"
dataset: DATASET_FOLDER_NAME
pipeline: PIPELINE_FOLDER_NAME
custom_env_var: "some value that your custom script needs"
- operator: "GoogleCloudStorageToBigQueryOperator"
# Initializes GCS to BQ task for the DAG. This operator is used to load a
# JSON, CSV, Avro, ORC, or Parquet data from GCS into a BigQuery table.
# Task description
description: "Task to load CSV data to a BigQuery table"
# Arguments supported by this operator:
# http://airflow.apache.org/docs/apache-airflow/stable/howto/operator/gcp/gcs.html#googlecloudstoragetobigqueryoperator
args:
task_id: "sample_gcs_to_bq_task"
# The GCS bucket where the CSV file is located in.
bucket: "{{ var.value.composer_bucket }}"
# The GCS object path for the CSV file
source_objects:
[
"data/DATASET_FOLDER_NAME/PIPELINE_FOLDER_NAME/run_date={{ ds }}/data.csv",
]
source_format: "CSV"
destination_project_dataset_table: "DATASET_FOLDER_NAME.PIPELINE_FOLDER_NAME"
# Use this if your CSV file contains a header row
skip_leading_rows: 1
# How to write data to the table: overwrite, append, or write if empty
# See https://cloud.google.com/bigquery/docs/reference/auditlogs/rest/Shared.Types/WriteDisposition
write_disposition: "WRITE_TRUNCATE"
# The BigQuery table schema based on the CSV file. For more info, see
# https://cloud.google.com/bigquery/docs/schemas.
# Always use snake_case and lowercase for column names, and be explicit,
# i.e. specify modes for all columns.
schema_fields:
- name: "name"
type: "STRING"
mode: "REQUIRED"
- name: "string_col"
type: "STRING"
mode: "NULLABLE"
- name: "date"
type: "DATE"
mode: "REQUIRED"
- name: "num_col"
type: "INTEGER"
mode: "NULLABLE"
# Copies objects from a bucket to another using the Google Cloud Storage Transfer Service.
# This operator does not control the copying process locally, but uses Google resources, which allows them to perform this task faster and more economically.
#
# Warning: This operator is NOT idempotent. If you run it many times, many transfer jobs will be created in the Google Cloud.
- operator: "CloudDataTransferServiceGCSToGCSOperator"
# Task description
description: "Task to run a GCS to GCS operation using the Cloud Data Transfer Service"
# Arguments supported by this operator:
# https://airflow.apache.org/docs/apache-airflow-providers-google/stable/_api/airflow/providers/google/cloud/operators/cloud_storage_transfer_service/index.html#airflow.providers.google.cloud.operators.cloud_storage_transfer_service.CloudDataTransferServiceGCSToGCSOperator
args:
task_id: "sample_data_transfer_service_gcs_to_gcs"
project_id: "{{ var.value.gcp_project }}"
# The GCS bucket to copy from
source_bucket: "sample-source-bucket"
# The GCS bucket to copy to
destination_bucket: "sample-destination-bucket"
# The service account to use that have been granted read access to the source bucket
google_impersonation_chain: "[email protected]"
# Initializes a GCS-to-GCS task for the DAG. This operator is used to copy or move
# GCS objects from one location to another.
- operator: "GoogleCloudStorageToGoogleCloudStorageOperator"
# Task description
description: "Task to run a GoogleCloudStorageToGoogleCloudStorageOperator"
args:
# Arguments supported by this operator:
# https://airflow.apache.org/docs/apache-airflow-providers-google/stable/_api/airflow/providers/google/cloud/transfers/gcs_to_gcs/index.html
task_id: "sample_gcs_to_gcs_task"
# The GCS bucket to copy the object/s from
source_bucket: "{{ var.value.composer_bucket }}"
# Use a trailing "/*" if you want to copy all objects under that path.
source_object: "data/DATASET_FOLDER_NAME/PIPELINE_FOLDER_NAME/run_date={{ ds }}/*"
# Optionally, you can supply a list of objects to specifically copy
source_objects:
- "path/to/object"
- "path/to/another/object"
# The GCS bucket to copy the object/s to
destination_bucket: "{{ var.json.DATASET_FOLDER_NAME.destination_bucket }}"
# The GCS prefix to copy the object/s to
destination_object: "datasets/DATASET_FOLDER_NAME/PIPELINE_FOLDER_NAME/run_date={{ ds }}/"
# Use this argument if you don't want to keep the source object/s.
move_object: True
# Whether you want to replace existing destination files or not.
replace: False
# [Optional] This is used to restrict the result to only the matching names in a given folder
# If source_objects = ['foo/bah/'] and delimiter = '.avro', then only the .avro
# files will be copied to the destination folder.
delimiter: ".csv"
# [Optional] When specified, the objects will be copied or moved, only if they were modified
# after last_modified_time. If tzinfo has not been set, UTC will be assumed.
last_modified_time: "2021-08-10 11:58:27"
- operator: "BigQueryExecuteQueryOperator"
# Initializes a BigQuery operator that executes SQL queries in a specific BigQuery table,
# and can optionally store the results of a query to another table
# Task description
description: "Task to run a execute a BigQuery SQL query in a specific BigQuery database"
args:
# Arguments supported by this operator:
# https://airflow.apache.org/docs/apache-airflow-providers-google/stable/_api/airflow/providers/google/cloud/operators/bigquery/index.html#airflow.providers.google.cloud.operators.bigquery.BigQueryExecuteQueryOperator
task_id: "sample_bq_sql_task"
# The SQL query to execute, along with query parameters. For more info,
# see https://cloud.google.com/bigquery/docs/parameterized-queries.
sql: "SELECT * FROM DATASET_FOLDER_NAME.PIPELINE_FOLDER_NAME LIMIT @max_rows"
query_params:
- name: "max_rows"
parameterType:
type: "INTEGER"
parameterValue:
value: 100
# If specified, will store the results of the query to a BigQuery destination table
destination_dataset_table: "destination_dataset.destination_table"
# Specifies whether the job is allowed to create new tables.
create_disposition: "CREATE_IF_NEEDED"
# How to write to the destination: overwrite, append, or write if empty
# See https://cloud.google.com/bigquery/docs/reference/auditlogs/rest/Shared.Types/WriteDisposition
write_disposition: "WRITE_TRUNCATE"
# [Optional] Configure optional time partitioning fields i.e. partition by field, type and expiration
# as per API specifications.
time_partitioning:
# The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour,
# month, and year, respectively.
type: "DAY"
# [Optional] BigQuery supports clustering for both partitioned and non-partitioned tables. The order
# of columns given determines the sort order.
cluster_fields:
- "column_1"
- "column_2"
- "column_3"
# [Optional for US and EU] The geographic location of the job
location: "asia-northeast1"
# Deletes objects from a Google Cloud Storage bucket, either from an explicit list of object names
# or all objects matching a prefix.
- operator: "GoogleCloudStorageDeleteOperator"
# Task description
description: "Task to delete objects from a GCS bucket"
# Arguments supported by this operator:
# https://airflow.apache.org/docs/apache-airflow-providers-google/stable/_api/airflow/providers/google/cloud/operators/gcs/index.html#airflow.providers.google.cloud.operators.gcs.GCSDeleteObjectsOperator
args:
task_id: "gcs_delete_task"
# The GCS bucket where the objects to delete are located.
bucket_name: "sample_bucket"
# List of objects to delete. These should be the names of objects in the bucket, not including gs://bucket/.
objects: ["path/to/some_object"]
# Alternatively, you can specify a prefix of objects to delete.
# All objects matching this prefix in the bucket will be deleted.
prefix: "prefix/to/delete"
# Executes a task in a Kubernetes pod that uses the Cloud Composer environment's own CPU and memory resources.
# Note: Do NOT use this for very heavy workloads. This can potentially starve resources from Cloud Composer
# and affect data pipeline orchestration overall. Instead, use a different GKE cluster using Kubernetes
# Engine operators: https://github.com/apache/airflow/blob/master/airflow/providers/google/cloud/operators/kubernetes_engine.py
- operator: "KubernetesPodOperator"
# Task description
description: "Task to run a KubernetesPodOperator"
args:
# Arguments supported by this operator:
# https://airflow.readthedocs.io/en/1.10.15/_api/airflow/contrib/operators/kubernetes_pod_operator/index.html#airflow.contrib.operators.kubernetes_pod_operator.KubernetesPodOperator
task_id: "sample_kube_pod_operator"
# The name of the pod in which the task will run. This will be used (plus a random suffix) to generate a pod id
name: "sample-kube-operator"
# The Kubernetes namespace to run the pod in. For Composer versions >= 2.0.0, GKE pods do not have access to resources in your Google Cloud project when using the "default" namespace. You need to create a new namespace and Kubernetes service account with appropriate bindings. The new namespace and SA should be used by your pods to gain access to GCP resources in your project. For info on how to configure this, see https://cloud.google.com/composer/docs/composer-2/use-kubernetes-pod-operator.
namespace: "KUBE_NAMESPACE"
# The Kubernetes service account under the namespace specified above.
service_account_name: "KUBE_SERVICE_ACCOUNT"
# The Google Container Registry image URL. To prepare a Docker image to be used by this operator:
#
# 1. Create an `_images` folder under your dataset folder if it doesn't exist.
#
# 2. Inside the `_images` folder, create another folder and name it after what the image is expected to do, e.g. process_shapefiles, get_cdf_metadata.
#
# 3. In that subfolder, create a Dockerfile (https://docs.docker.com/engine/reference/builder/) and any scripts you need to process the data. Use the `COPY` command (https://docs.docker.com/engine/reference/builder/#copy) in your `Dockerfile` to include your scripts in the image.
#
# The resulting file tree for a dataset that uses two container images may look like
#
# datasets
# └── DATASET
# ├── _images
# │ ├── container_a
# │ │ ├── Dockerfile
# │ │ ├── requirements.txt
# │ │ └── script.py
# │ └── container_b
# │ ├── Dockerfile
# │ ├── requirements.txt
# │ └── script.py
# ├── _terraform/
# ├── PIPELINE_A
# ├── PIPELINE_B
# ├── ...
# └── dataset.yaml
#
# Docker images will be built and pushed to GCR by default whenever the `scripts/generate_dag.py` is run. To skip building and pushing images, use the optional `--skip-builds` flag.
image: "{{ var.json.DATASET_FOLDER_NAME.container_registry.IMAGE_REPOSITORY }}"
# Always pull the latest image. We recommend to keep this as "Always".
image_pull_policy: "Always"
# Set the environment variables you need initialized in the container. Use these as input variables for the script your container is expected to perform.
env_vars:
TEST_ENV_VAR: "test-value"
ANOTHER_ENV_VAR: 12345
# Set resource limits for the pod here. For resource units in Kubernetes, see https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes
resources:
limit_memory: "250M"
limit_cpu: "1"
# Documentation:
# http://airflow.apache.org/docs/apache-airflow-providers-google/stable/_api/airflow/providers/google/cloud/operators/kubernetes_engine/index.html#airflow.providers.google.cloud.operators.kubernetes_engine.GKEStartPodOperator
- operator: "GKEStartPodOperator"
args:
task_id: "gke_start_pod_task"
project_id: "{{ var.value.gcp_project_id }}"
location: "{{ var.value.gcp_location }}"
# The name of the Google Kubernetes Engine cluster the pod should be spawned in
cluster_name: "GKE_CLUSTER_NAME"
# The namespace to run within Kubernetes
namespace: "default"
# The name of the pod in which the task will run. This will be used (plus a random suffix) to generate a pod id
name: "sample-gke-pod"
# The GCR image URL. It's recommended to redact its value and set it using Airflow variables
image: "{{ var.json.DATASET_FOLDER_NAME.container_registry.IMAGE_REPOSITORY }}"
# [Optional] Entrypoint command for the container
cmds:
- "mkdir -p /airflow/xcom/ &&"
- "echo '[1,2,3,4]' > /airflow/xcom/return.json"
# [Optional] Enable the usage of XCom on the operator. XCom allows you to store values used downstream
# in your DAG. To do this, create a `return.json` file under /airflow/xcom in your pod script. To use
# XCom variables in other operators, use the macro {{ task_instance.xcom_pull('POD_TASK_ID')[0] }}
do_xcom_push: True
# Deletes a GKE cluster, including the Kubernetes endpoint and all worker nodes.
- operator: "GKEDeleteClusterOperator"
args:
task_id: "gke_delete_cluster_task"
project_id: "{{ var.value.gcp_project_id }}"
location: "{{ var.value.gcp_location }}"
# The GKE cluster name
name: "sample-gke-cluster"
# Optional service account to impersonate using short-term credentials
impersonation_chain: "{{ var.json.DATASET_FOLDER_NAME.PIPELINE_FOLDER_NAME.service_account }}"
# Create a GKE cluster of specified dimensions. The operator will wait until the cluster is created.
- operator: "GKECreateClusterOperator"
args:
task_id: "gke_create_cluster_task"
project_id: "{{ var.value.gcp_project_id }}"
location: "{{ var.value.gcp_location }}"
# The cluster definition to create,
# see https://googleapis.dev/python/container/latest/container_v1/types.html#google.cloud.container_v1.types.Cluster
body:
name: "sample-gke-cluster"
initial_node_count: 1
node_config:
# For the list of machine types, see https://cloud.google.com/compute/docs/general-purpose-machines#e2-standard.
# Some of the machine types and their specs are listed below for convenience:
#
# Machine type vCPUs Memory (GB)
# e2-micro 2 (shared core) 1
# e2-small 2 (shared core) 2
# e2-medium 2 (shared core) 4
# e2-standard-2 2 8
# e2-standard-4 4 16
# e2-standard-8 8 32
machine_type: e2-standard-2
# https://googleapis.dev/python/container/latest/container_v1/types.html#google.cloud.container_v1.types.NodeConfig.oauth_scopes
oauth_scopes:
- https://www.googleapis.com/auth/devstorage.read_write
# Optional service account to impersonate using short-term credentials
impersonation_chain: "{{ var.json.DATASET_FOLDER_NAME.PIPELINE_FOLDER_NAME.service_account }}"
# Launch Cloud Dataflow jobs written in Python
# https://airflow.apache.org/docs/apache-airflow-providers-google/stable/_api/airflow/providers/google/cloud/operators/dataflow/index.html#airflow.providers.google.cloud.operators.dataflow.DataflowCreatePythonJobOperator
#
# You must organize your files as follows:
#
# datasets/DATASET_NAME/PIPELINE_NAME/custom/
# DATAFLOW_JOB_NAME/
# main.py (contains the Beam Pipeline definition)
# setup.py (this file is required)
# my_package/ (optional folder containing custom dependencies)
# __init__.py (this file is required inside every package subfolder)
# my_file.py
# ...
# ...
- operator: "DataflowCreatePythonJobOperator"
description: "Task to run a Dataflow job"
args:
task_id: "sample_dataflow_task"
# Path to the Python script containing the Apache Beam Pipeline definition.
# Place the script in your pipeline's `custom` folder, preferrably namespaced
# under a folder named after the job name (templated as `DATAFLOW_JOB_NAME`).
py_file: "{{ var.value.airflow_dags_folder }}/DATASET_NAME/PIPELINE_NAME/custom/DATAFLOW_JOB_NAME/main.py"
# Python version of the Beam pipeline. If None, this defaults to python3.
py_interpreter: "python3"
# List of Python package(s) required by your Dataflow job.
# The three packages listed below are often needed.
py_requirements:
- "apache-beam"
- "google-apitools"
- "google-cloud-storage"
dataflow_default_options:
# Your Google Cloud project ID
project: "{{ var.value.gcp_project }}"
# [Required] Path for temporary files.
# You can use the Composer bucket (preferred) or your own bucket.
# When using the Cloud Composer bucket, use the templated path below to prevent collisions.
temp_location: "{{ var.value.composer_bucket }}/data/DATASET_NAME/PIPELINE_NAME/dataflow/tmp/DATAFLOW_JOB_NAME/"
# The GCS bucket path to store staging binary files used by Apache Beam
# You can use the Composer bucket (preferred) or your own bucket.
# When using the Cloud Composer bucket, use the templated path below to prevent collisions.
staging_location: "{{ var.value.composer_bucket }}/data/DATASET_NAME/PIPELINE_NAME/dataflow/staging/"
# [Optional] If your pipeline uses public packages from the PyPI, make
# these packages available via a requirements.txt file. Create the file
# in the `custom` folder, in the same subfolder as your `main.py`.
requirements_file: "{{ var.value.airflow_dags_folder }}/DATASET_NAME/PIPELINE_NAME/custom/DATAFLOW_JOB_NAME/requirements.txt"
# [Required] The pipeline runner to use. Must be set to "DataflowRunner" to run on
# Google Cloud Dataflow. Keep this unchanged.
runner: "DataflowRunner"
# [Optional] Use the parameter below if you have custom Python dependencies for the job.
# For instance, if you have a main.py file that contains your Apache Beam Pipeline definition
# along with references to other packages/files you've written (i.e. non-PyPI):
#
# import apache_beam as beam
# from my_package import my_file
#
# # Define data transforms
# my_class = my_file.MyClass(...)
# my_file.my_function(...)
# ...
#
# # Define pipeline
# with beam.Pipeline(options=options) as pipeline:
# (
# pipeline
# | "stage_1" >> beam.Create(collection)
# | "stage_2" >> beam.Map(...)
# | "stage_3" >> beam.MapTuple(...)
# | "stage_3" >> beam.FlatMap(...)
# ...
# )
# ...
#
# you must organize your files as follows:
#
# datasets/DATASET_NAME/PIPELINE_NAME/custom/
# DATAFLOW_JOB_NAME/
# main.py
# setup.py (this file is required when using custom packages)
# requirements.txt
# my_package/ (folder containing a custom package)
# __init__.py (this file is required inside every package subfolder)
# my_file.py
# ...
# ...
#
# where every subfolder is a package. You must also include a setup.py in the same directory as main.py.
# Your setup.py must contain
#
# import setuptools
#
# setuptools.setup(
# name='DATAFLOW_JOB_NAME',
# version='1.0.0',
# install_requires=[],
# packages=setuptools.find_packages(),
# )
#
# (For more info, see https://beam.apache.org/documentation/sdks/python-pipeline-dependencies/#multiple-file-dependencies)
#
# Now, use this parameter to declare the path to the setup.py file under /home/airflow/gcs/dags
setup_file: "/home/airflow/gcs/dags/DATASET_NAME/PIPELINE_NAME/custom/DATAFLOW_JOB_NAME/setup.py"
# [Optional] The Google Compute Engine machine type, must be n1-standard-2 or higher.
# GCE machine types: https://cloud.google.com/compute/docs/machine-types.
# The Dataflow service chooses the machine type based on your job if you do not set this option.
machine_type: "n1-standard-2"
# [Optional] The disk size, in gigabytes, to use on each remote Compute Engine worker instance.
# Set to 0 to use the default size defined in your Cloud Platform project.
disk_size_gb: 64
# [Optional] The maximum number of workers to be made available to your
# pipeline during execution. If unspecified, Dataflow determines an
# appropriate number of workers, usually set to 12 or 18.
max_num_workers: 18
# [Optional] Number of workers to use when executing the Dataflow job.
# If not set, the Dataflow service will use a reasonable default.
# Prefer using `max_num_workers` instead of this one to control resource
# limits and let autoscaling adjust the workers up to that number.
num_workers: 1
graph_paths:
# This is where you specify the relationships (i.e. directed paths/edges)
# among the tasks specified above. Use the bitshift operator to define the
# relationships and the `task_id` value above to represent tasks.
#
# For more info, see
# https://airflow.apache.org/docs/apache-airflow/stable/tutorial.html#setting-up-dependencies
- "sample_bash_task >> [sample_gcs_to_bq_task, sample_gcs_to_gcs_task, sample_data_transfer_service_gcs_to_gcs]"
- "sample_gcs_to_bq_task >> sample_dataflow_task >> [sample_bq_sql_task, gcs_delete_task]"
- "gke_create_cluster_task >> gke_start_pod_task >> gke_delete_cluster_task"