Airflow Configuration

  ____________       _____________
 ____    |__( )_________  __/__  /________      __
____  /| |_  /__  ___/_  /_ __  /_  __ \_ | /| / /
___  ___ |  / _  /   _  __/ _  / / /_/ /_ |/ |/ /
 _/_/  |_/_/  /_/    /_/    /_/  \____/____/|__/
   v1.10.1
/usr/local/airflow/airflow.cfg
[core]
# The home folder for airflow, default is ~/airflow
airflow_home = /usr/local/airflow

# The folder where your airflow pipelines live, most likely a
# subfolder in a code repository
# This path must be absolute
dags_folder = /usr/local/airflow/dags

# The folder where airflow should store its log files
# This path must be absolute
base_log_folder = /usr/local/airflow/logs

# Airflow can store logs remotely in AWS S3, Google Cloud Storage or Elastic Search.
# Users must supply an Airflow connection id that provides access to the storage
# location. If remote_logging is set to true, see UPDATING.md for additional
# configuration requirements.
remote_logging = False
remote_log_conn_id =
remote_base_log_folder =
encrypt_s3_logs = False

# Logging level
logging_level = INFO
fab_logging_level = WARN

# Logging class
# Specify the class that will specify the logging configuration
# This class has to be on the python classpath
# logging_config_class = my.path.default_local_settings.LOGGING_CONFIG
logging_config_class =

# Log format
# we need to escape the curly braces by adding an additional curly brace
log_format = [%%(asctime)s] {{%%(filename)s:%%(lineno)d}} %%(levelname)s - %%(message)s
simple_log_format = %%(asctime)s %%(levelname)s - %%(message)s

# Log filename format
# we need to escape the curly braces by adding an additional curly brace
log_filename_template = {{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log
log_processor_filename_template = {{ filename }}.log
dag_processor_manager_log_location = /usr/local/airflow/logs/dag_processor_manager/dag_processor_manager.log

# Hostname by providing a path to a callable, which will resolve the hostname
hostname_callable = socket:getfqdn

# Default timezone in case supplied date times are naive
# can be utc (default), system, or any IANA timezone string (e.g. Europe/Amsterdam)
default_timezone = utc

# The executor class that airflow should use. Choices include
# SequentialExecutor, LocalExecutor, CeleryExecutor, DaskExecutor
executor = SequentialExecutor

# The SqlAlchemy connection string to the metadata database.
# SqlAlchemy supports many different database engine, more information
# their website
# sql_alchemy_conn = sqlite:////tmp/airflow.db

# If SqlAlchemy should pool database connections.
sql_alchemy_pool_enabled = True

# The encoding for the databases
sql_engine_encoding = utf-8

# The SqlAlchemy pool size is the maximum number of database connections
# in the pool. 0 indicates no limit.
sql_alchemy_pool_size = 5

# The SqlAlchemy pool recycle is the number of seconds a connection
# can be idle in the pool before it is invalidated. This config does
# not apply to sqlite. If the number of DB connections is ever exceeded,
# a lower config value will allow the system to recover faster.
sql_alchemy_pool_recycle = 1800

# How many seconds to retry re-establishing a DB connection after
# disconnects. Setting this to 0 disables retries.
sql_alchemy_reconnect_timeout = 300

# The schema to use for the metadata database
# SqlAlchemy supports databases with the concept of multiple schemas.
sql_alchemy_schema =

# The amount of parallelism as a setting to the executor. This defines
# the max number of task instances that should run simultaneously
# on this airflow installation
parallelism = 32

# The number of task instances allowed to run concurrently by the scheduler
dag_concurrency = 16

# Are DAGs paused by default at creation
dags_are_paused_at_creation = True

# When not using pools, tasks are run in the "default pool",
# whose size is guided by this config element
non_pooled_task_slot_count = 128

# The maximum number of active DAG runs per DAG
max_active_runs_per_dag = 16

# Whether to load the examples that ship with Airflow. It's good to
# get started, but you probably want to set this to False in a production
# environment
load_examples = True

# Where your Airflow plugins are stored
plugins_folder = /usr/local/airflow/plugins

# Secret key to save connection passwords in the db
fernet_key = $FERNET_KEY

# Whether to disable pickling dags
donot_pickle = False

# How long before timing out a python file import while filling the DagBag
dagbag_import_timeout = 30

# The class to use for running task instances in a subprocess
task_runner = BashTaskRunner

# If set, tasks without a `run_as_user` argument will be run with this user
# Can be used to de-elevate a sudo user running Airflow when executing tasks
default_impersonation =

# What security module to use (for example kerberos):
security =

# If set to False enables some unsecure features like Charts and Ad Hoc Queries.
# In 2.0 will default to True.
secure_mode = False

# Turn unit test mode on (overwrites many configuration options with test
# values at runtime)
unit_test_mode = False

# Name of handler to read task instance logs.
# Default to use task handler.
task_log_reader = task

# Whether to enable pickling for xcom (note that this is insecure and allows for
# RCE exploits). This will be deprecated in Airflow 2.0 (be forced to False).
enable_xcom_pickling = True

# When a task is killed forcefully, this is the amount of time in seconds that
# it has to cleanup after it is sent a SIGTERM, before it is SIGKILLED
killed_task_cleanup_time = 60

# Whether to override params with dag_run.conf. If you pass some key-value pairs through `airflow backfill -c` or
# `airflow trigger_dag -c`, the key-value pairs will override the existing ones in params.
dag_run_conf_overrides_params = False

# Worker initialisation check to validate Metadata Database connection
worker_precheck = False

[cli]
# In what way should the cli access the API. The LocalClient will use the
# database directly, while the json_client will use the api running on the
# webserver
api_client = airflow.api.client.local_client

# If you set web_server_url_prefix, do NOT forget to append it here, ex:
# endpoint_url = http://localhost:8080/myroot
# So api will look like: http://localhost:8080/myroot/api/experimental/...
endpoint_url = http://localhost:8080

[api]
# How to authenticate users of the API
auth_backend = airflow.api.auth.backend.default

[lineage]
# what lineage backend to use
backend =

[atlas]
sasl_enabled = False
host =
port = 21000
username =
password =

[operators]
# The default owner assigned to each new operator, unless
# provided explicitly or passed via `default_args`
default_owner = Airflow
default_cpus = 1
default_ram = 512
default_disk = 512
default_gpus = 0

[hive]
# Default mapreduce queue for HiveOperator tasks
default_hive_mapred_queue =
# Template for mapred_job_name in HiveOperator, supports the following named parameters:
# hostname, dag_id, task_id, execution_date
mapred_job_name_template = Airflow HiveOperator task for {hostname}.{dag_id}.{task_id}.{execution_date}

[webserver]
# The base url of your website as airflow cannot guess what domain or
# cname you are using. This is used in automated emails that
# airflow sends to point links to the right web server
base_url = http://localhost:8080

# The ip specified when starting the web server
web_server_host = 0.0.0.0

# The port on which to run the web server
web_server_port = 8080

# Paths to the SSL certificate and key for the web server. When both are
# provided SSL will be enabled. This does not change the web server port.
web_server_ssl_cert =
web_server_ssl_key =

# Number of seconds the webserver waits before killing gunicorn master that doesn't respond
web_server_master_timeout = 120

# Number of seconds the gunicorn webserver waits before timing out on a worker
web_server_worker_timeout = 120

# Number of workers to refresh at a time. When set to 0, worker refresh is
# disabled. When nonzero, airflow periodically refreshes webserver workers by
# bringing up new ones and killing old ones.
worker_refresh_batch_size = 1

# Number of seconds to wait before refreshing a batch of workers.
worker_refresh_interval = 30

# Secret key used to run your flask app
secret_key = temporary_key

# Number of workers to run the Gunicorn web server
workers = 4

# The worker class gunicorn should use. Choices include
# sync (default), eventlet, gevent
worker_class = sync

# Log files for the gunicorn webserver. '-' means log to stderr.
access_logfile = -
error_logfile = -

# Expose the configuration file in the web server
# This is only applicable for the flask-admin based web UI (non FAB-based).
# In the FAB-based web UI with RBAC feature,
# access to configuration is controlled by role permissions.
expose_config = True

# Set to true to turn on authentication:
# https://airflow.incubator.apache.org/security.html#web-authentication
authenticate = False

# Filter the list of dags by owner name (requires authentication to be enabled)
filter_by_owner = False

# Filtering mode. Choices include user (default) and ldapgroup.
# Ldap group filtering requires using the ldap backend
#
# Note that the ldap server needs the "memberOf" overlay to be set up
# in order to user the ldapgroup mode.
owner_mode = user

# Default DAG view.  Valid values are:
# tree, graph, duration, gantt, landing_times
dag_default_view = tree

# Default DAG orientation. Valid values are:
# LR (Left->Right), TB (Top->Bottom), RL (Right->Left), BT (Bottom->Top)
dag_orientation = LR

# Puts the webserver in demonstration mode; blurs the names of Operators for
# privacy.
demo_mode = False

# The amount of time (in secs) webserver will wait for initial handshake
# while fetching logs from other worker machine
log_fetch_timeout_sec = 5

# By default, the webserver shows paused DAGs. Flip this to hide paused
# DAGs by default
hide_paused_dags_by_default = False

# Consistent page size across all listing views in the UI
page_size = 100

# Use FAB-based webserver with RBAC feature
rbac = False

# Define the color of navigation bar
navbar_color = #007A87

# Default dagrun to show in UI
default_dag_run_display_number = 25

[email]
email_backend = airflow.utils.email.send_email_smtp

[smtp]
# If you want airflow to send emails on retries, failure, and you want to use
# the airflow.utils.email.send_email_smtp function, you have to configure an
# smtp server here
smtp_host = localhost
smtp_starttls = True
smtp_ssl = False
# Uncomment and set the user/pass settings if you want to use SMTP AUTH
# smtp_user = airflow
# smtp_password = airflow
smtp_port = 25
smtp_mail_from = airflow@example.com

[celery]
# This section only applies if you are using the CeleryExecutor in
# [core] section above

# The app name that will be used by celery
celery_app_name = airflow.executors.celery_executor

# The concurrency that will be used when starting workers with the
# "airflow worker" command. This defines the number of task instances that
# a worker will take, so size up your workers based on the resources on
# your worker box and the nature of your tasks
worker_concurrency = 16

# When you start an airflow worker, airflow starts a tiny web server
# subprocess to serve the workers local log files to the airflow main
# web server, who then builds pages and sends them to users. This defines
# the port on which the logs are served. It needs to be unused, and open
# visible from the main web server to connect into the workers.
worker_log_server_port = 8793

# The Celery broker URL. Celery supports RabbitMQ, Redis and experimentally
# a sqlalchemy database. Refer to the Celery documentation for more
# information.
broker_url = redis://redis:6379/1

# Another key Celery setting
result_backend = db+postgresql://airflow:airflow@postgres/airflow

# Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start
# it `airflow flower`. This defines the IP that Celery Flower runs on
flower_host = 0.0.0.0

# The root URL for Flower
# Ex: flower_url_prefix = /flower
flower_url_prefix =

# This defines the port that Celery Flower runs on
flower_port = 5555

# Default queue that tasks get assigned to and that worker listen on.
default_queue = default

# Import path for celery configuration options
celery_config_options = airflow.config_templates.default_celery.DEFAULT_CELERY_CONFIG

# In case of using SSL
ssl_active = False
ssl_key =
ssl_cert =
ssl_cacert =

[celery_broker_transport_options]
# This section is for specifying options which can be passed to the
# underlying celery broker transport.  See:
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_transport_options

# The visibility timeout defines the number of seconds to wait for the worker
# to acknowledge the task before the message is redelivered to another worker.
# Make sure to increase the visibility timeout to match the time of the longest
# ETA you're planning to use.
#
# visibility_timeout is only supported for Redis and SQS celery brokers.
# See:
#   http://docs.celeryproject.org/en/master/userguide/configuration.html#std:setting-broker_transport_options
#
#visibility_timeout = 21600

[dask]
# This section only applies if you are using the DaskExecutor in
# [core] section above

# The IP address and port of the Dask cluster's scheduler.
cluster_address = 127.0.0.1:8786
# TLS/ SSL settings to access a secured Dask scheduler.
tls_ca =
tls_cert =
tls_key =

[scheduler]
# Task instances listen for external kill signal (when you clear tasks
# from the CLI or the UI), this defines the frequency at which they should
# listen (in seconds).
job_heartbeat_sec = 5

# The scheduler constantly tries to trigger new tasks (look at the
# scheduler section in the docs for more information). This defines
# how often the scheduler should run (in seconds).
scheduler_heartbeat_sec = 5

# after how much time should the scheduler terminate in seconds
# -1 indicates to run continuously (see also num_runs)
run_duration = -1

# after how much time a new DAGs should be picked up from the filesystem
min_file_process_interval = 0

# How often (in seconds) to scan the DAGs directory for new files. Default to 5 minutes.
dag_dir_list_interval = 300

# How often should stats be printed to the logs
print_stats_interval = 30

child_process_log_directory = /usr/local/airflow/logs/scheduler

# Local task jobs periodically heartbeat to the DB. If the job has
# not heartbeat in this many seconds, the scheduler will mark the
# associated task instance as failed and will re-schedule the task.
scheduler_zombie_task_threshold = 300

# Turn off scheduler catchup by setting this to False.
# Default behavior is unchanged and
# Command Line Backfills still work, but the scheduler
# will not do scheduler catchup if this is False,
# however it can be set on a per DAG basis in the
# DAG definition (catchup)
catchup_by_default = True

# This changes the batch size of queries in the scheduling main loop.
# This depends on query length limits and how long you are willing to hold locks.
# 0 for no limit
max_tis_per_query = 512

# Statsd (https://github.com/etsy/statsd) integration settings
statsd_on = False
statsd_host = localhost
statsd_port = 8125
statsd_prefix = airflow

# The scheduler can run multiple threads in parallel to schedule dags.
# This defines how many threads will run.
max_threads = 2

authenticate = False

# Turn off scheduler use of cron intervals by setting this to False.
# DAGs submitted manually in the web UI or with trigger_dag will still run.
use_job_schedule = True

[ldap]
# set this to ldaps://<your.ldap.server>:<port>
uri =
user_filter = objectClass=*
user_name_attr = uid
group_member_attr = memberOf
superuser_filter =
data_profiler_filter =
bind_user = cn=Manager,dc=example,dc=com
bind_password = insecure
basedn = dc=example,dc=com
cacert = /etc/ca/ldap_ca.crt
search_scope = LEVEL

[mesos]
# Mesos master address which MesosExecutor will connect to.
master = localhost:5050

# The framework name which Airflow scheduler will register itself as on mesos
framework_name = Airflow

# Number of cpu cores required for running one task instance using
# 'airflow run <dag_id> <task_id> <execution_date> --local -p <pickle_id>'
# command on a mesos slave
task_cpu = 1

# Memory in MB required for running one task instance using
# 'airflow run <dag_id> <task_id> <execution_date> --local -p <pickle_id>'
# command on a mesos slave
task_memory = 256

# Enable framework checkpointing for mesos
# See http://mesos.apache.org/documentation/latest/slave-recovery/
checkpoint = False

# Failover timeout in milliseconds.
# When checkpointing is enabled and this option is set, Mesos waits
# until the configured timeout for
# the MesosExecutor framework to re-register after a failover. Mesos
# shuts down running tasks if the
# MesosExecutor framework fails to re-register within this timeframe.
# failover_timeout = 604800

# Enable framework authentication for mesos
# See http://mesos.apache.org/documentation/latest/configuration/
authenticate = False

# Mesos credentials, if authentication is enabled
# default_principal = admin
# default_secret = admin

# Optional Docker Image to run on slave before running the command
# This image should be accessible from mesos slave i.e mesos slave
# should be able to pull this docker image before executing the command.
# docker_image_slave = puckel/docker-airflow

[kerberos]
ccache = /tmp/airflow_krb5_ccache
# gets augmented with fqdn
principal = airflow
reinit_frequency = 3600
kinit_path = kinit
keytab = airflow.keytab

[github_enterprise]
api_rev = v3

[admin]
# UI to hide sensitive variable fields when set to True
hide_sensitive_variable_fields = True

[elasticsearch]
elasticsearch_host =
# we need to escape the curly braces by adding an additional curly brace
elasticsearch_log_id_template = {dag_id}-{task_id}-{execution_date}-{try_number}
elasticsearch_end_of_log_mark = end_of_log

[kubernetes]
# The repository, tag and imagePullPolicy of the Kubernetes Image for the Worker to Run
worker_container_repository =
worker_container_tag =
worker_container_image_pull_policy = IfNotPresent
worker_dags_folder =

# If True (default), worker pods will be deleted upon termination
delete_worker_pods = True

# The Kubernetes namespace where airflow workers should be created. Defaults to `default`
namespace = default

# The name of the Kubernetes ConfigMap Containing the Airflow Configuration (this file)
airflow_configmap =

# For either git sync or volume mounted DAGs, the worker will look in this subpath for DAGs
dags_volume_subpath =

# For DAGs mounted via a volume claim (mutually exclusive with volume claim)
dags_volume_claim =

# For volume mounted logs, the worker will look in this subpath for logs
logs_volume_subpath =

# A shared volume claim for the logs
logs_volume_claim =

# Git credentials and repository for DAGs mounted via Git (mutually exclusive with volume claim)
git_repo =
git_branch =
git_user =
git_password =
git_subpath =

# For cloning DAGs from git repositories into volumes: https://github.com/kubernetes/git-sync
git_sync_container_repository = gcr.io/google-containers/git-sync-amd64
git_sync_container_tag = v2.0.5
git_sync_init_container_name = git-sync-clone

# The name of the Kubernetes service account to be associated with airflow workers, if any.
# Service accounts are required for workers that require access to secrets or cluster resources.
# See the Kubernetes RBAC documentation for more:
#   https://kubernetes.io/docs/admin/authorization/rbac/
worker_service_account_name =

# Any image pull secrets to be given to worker pods, If more than one secret is
# required, provide a comma separated list: secret_a,secret_b
image_pull_secrets =

# GCP Service Account Keys to be provided to tasks run on Kubernetes Executors
# Should be supplied in the format: key-name-1:key-path-1,key-name-2:key-path-2
gcp_service_account_keys =

# Use the service account kubernetes gives to pods to connect to kubernetes cluster.
# It's intended for clients that expect to be running inside a pod running on kubernetes.
# It will raise an exception if called from a process not running in a kubernetes environment.
in_cluster = True

[kubernetes_node_selectors]
# The Key-value pairs to be given to worker pods.
# The worker pods will be scheduled to the nodes of the specified key-value pairs.
# Should be supplied in the format: key = value

[kubernetes_secrets]
# The scheduler mounts the following secrets into your workers as they are launched by the
# scheduler. You may define as many secrets as needed and the kubernetes launcher will parse the
# defined secrets and mount them as secret environment variables in the launched workers.
# Secrets in this section are defined as follows
#     <environment_variable_mount> = <kubernetes_secret_object>:<kubernetes_secret_key>
#
# For example if you wanted to mount a kubernetes secret key named `postgres_password` from the
# kubernetes secret object `airflow-secret` as the environment variable `POSTGRES_PASSWORD` into
# your workers you would follow the following format:
#     POSTGRES_PASSWORD = airflow-secret:postgres_credentials
#
# Additionally you may override worker airflow settings with the AIRFLOW__<SECTION>__<KEY>
# formatting as supported by airflow normally.


Running Configuration


Section Key Value Source
core airflow_home /usr/local/airflow airflow.cfg
core dags_folder /usr/local/airflow/dags airflow.cfg
core base_log_folder /usr/local/airflow/logs airflow.cfg
core remote_logging False airflow.cfg
core remote_log_conn_id airflow.cfg
core remote_base_log_folder airflow.cfg
core encrypt_s3_logs False airflow.cfg
core logging_level INFO airflow.cfg
core fab_logging_level WARN airflow.cfg
core logging_config_class airflow.cfg
core log_format [%(asctime)s] {{%(filename)s:%(lineno)d}} %(levelname)s - %(message)s airflow.cfg
core simple_log_format %(asctime)s %(levelname)s - %(message)s airflow.cfg
core log_filename_template {{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log airflow.cfg
core log_processor_filename_template {{ filename }}.log airflow.cfg
core hostname_callable socket:getfqdn airflow.cfg
core default_timezone utc airflow.cfg
core executor LocalExecutor env var
core sql_alchemy_conn postgresql+psycopg2://airflow:airflow@postgres:5432/airflow env var
core sql_engine_encoding utf-8 airflow.cfg
core sql_alchemy_pool_enabled True airflow.cfg
core sql_alchemy_pool_size 5 airflow.cfg
core sql_alchemy_pool_recycle 1800 airflow.cfg
core sql_alchemy_reconnect_timeout 300 airflow.cfg
core parallelism 32 airflow.cfg
core dag_concurrency 16 airflow.cfg
core dags_are_paused_at_creation True airflow.cfg
core non_pooled_task_slot_count 128 airflow.cfg
core max_active_runs_per_dag 16 airflow.cfg
core load_examples False env var
core plugins_folder /usr/local/airflow/plugins airflow.cfg
core fernet_key hnuIsaXwBLx5p9PTO5hM1_9qVHeQFMhhylI79GV6jJo= env var
core donot_pickle False airflow.cfg
core dagbag_import_timeout 30 airflow.cfg
core task_runner BashTaskRunner airflow.cfg
core default_impersonation airflow.cfg
core security airflow.cfg
core secure_mode False airflow.cfg
core unit_test_mode False airflow.cfg
core task_log_reader task airflow.cfg
core enable_xcom_pickling True airflow.cfg
core killed_task_cleanup_time 60 airflow.cfg
core dag_run_conf_overrides_params False airflow.cfg
core worker_precheck False airflow.cfg
core dag_processor_manager_log_location /usr/local/airflow/logs/dag_processor_manager/dag_processor_manager.log airflow.cfg
core sql_alchemy_schema airflow.cfg
cli api_client airflow.api.client.local_client airflow.cfg
cli endpoint_url http://localhost:8080 airflow.cfg
api auth_backend airflow.api.auth.backend.default airflow.cfg
lineage backend airflow.cfg
atlas sasl_enabled False airflow.cfg
atlas host airflow.cfg
atlas port 21000 airflow.cfg
atlas username airflow.cfg
atlas password airflow.cfg
operators default_owner Airflow airflow.cfg
operators default_cpus 1 airflow.cfg
operators default_ram 512 airflow.cfg
operators default_disk 512 airflow.cfg
operators default_gpus 0 airflow.cfg
hive default_hive_mapred_queue airflow.cfg
hive mapred_job_name_template Airflow HiveOperator task for {hostname}.{dag_id}.{task_id}.{execution_date} airflow.cfg
webserver base_url http://localhost:8080 airflow.cfg
webserver web_server_host 0.0.0.0 airflow.cfg
webserver web_server_port 8080 airflow.cfg
webserver web_server_ssl_cert airflow.cfg
webserver web_server_ssl_key airflow.cfg
webserver web_server_master_timeout 120 airflow.cfg
webserver web_server_worker_timeout 120 airflow.cfg
webserver worker_refresh_batch_size 1 airflow.cfg
webserver worker_refresh_interval 30 airflow.cfg
webserver secret_key temporary_key airflow.cfg
webserver workers 4 airflow.cfg
webserver worker_class sync airflow.cfg
webserver access_logfile - airflow.cfg
webserver error_logfile - airflow.cfg
webserver expose_config True airflow.cfg
webserver authenticate False airflow.cfg
webserver filter_by_owner False airflow.cfg
webserver owner_mode user airflow.cfg
webserver dag_default_view tree airflow.cfg
webserver dag_orientation LR airflow.cfg
webserver demo_mode False airflow.cfg
webserver log_fetch_timeout_sec 5 airflow.cfg
webserver hide_paused_dags_by_default False airflow.cfg
webserver page_size 100 airflow.cfg
webserver rbac False airflow.cfg
webserver navbar_color #007A87 airflow.cfg
webserver default_dag_run_display_number 25 airflow.cfg
webserver enable_proxy_fix False default
email email_backend airflow.utils.email.send_email_smtp airflow.cfg
smtp smtp_host localhost airflow.cfg
smtp smtp_starttls True airflow.cfg
smtp smtp_ssl False airflow.cfg
smtp smtp_port 25 airflow.cfg
smtp smtp_mail_from airflow@example.com airflow.cfg
celery celery_app_name airflow.executors.celery_executor airflow.cfg
celery worker_concurrency 16 airflow.cfg
celery worker_log_server_port 8793 airflow.cfg
celery broker_url redis://redis:6379/1 airflow.cfg
celery result_backend db+postgresql://airflow:airflow@postgres:5432/airflow env var
celery flower_host 0.0.0.0 airflow.cfg
celery flower_url_prefix airflow.cfg
celery flower_port 5555 airflow.cfg
celery default_queue default airflow.cfg
celery celery_config_options airflow.config_templates.default_celery.DEFAULT_CELERY_CONFIG airflow.cfg
celery ssl_active False airflow.cfg
celery ssl_key airflow.cfg
celery ssl_cert airflow.cfg
celery ssl_cacert airflow.cfg
dask cluster_address 127.0.0.1:8786 airflow.cfg
dask tls_ca airflow.cfg
dask tls_cert airflow.cfg
dask tls_key airflow.cfg
scheduler job_heartbeat_sec 5 airflow.cfg
scheduler scheduler_heartbeat_sec 5 airflow.cfg
scheduler run_duration -1 airflow.cfg
scheduler min_file_process_interval 0 airflow.cfg
scheduler dag_dir_list_interval 300 airflow.cfg
scheduler print_stats_interval 30 airflow.cfg
scheduler child_process_log_directory /usr/local/airflow/logs/scheduler airflow.cfg
scheduler scheduler_zombie_task_threshold 300 airflow.cfg
scheduler catchup_by_default True airflow.cfg
scheduler max_tis_per_query 512 airflow.cfg
scheduler statsd_on False airflow.cfg
scheduler statsd_host localhost airflow.cfg
scheduler statsd_port 8125 airflow.cfg
scheduler statsd_prefix airflow airflow.cfg
scheduler max_threads 2 airflow.cfg
scheduler authenticate False airflow.cfg
scheduler use_job_schedule True airflow.cfg
ldap uri airflow.cfg
ldap user_filter objectClass=* airflow.cfg
ldap user_name_attr uid airflow.cfg
ldap group_member_attr memberOf airflow.cfg
ldap superuser_filter airflow.cfg
ldap data_profiler_filter airflow.cfg
ldap bind_user cn=Manager,dc=example,dc=com airflow.cfg
ldap bind_password insecure airflow.cfg
ldap basedn dc=example,dc=com airflow.cfg
ldap cacert /etc/ca/ldap_ca.crt airflow.cfg
ldap search_scope LEVEL airflow.cfg
mesos master localhost:5050 airflow.cfg
mesos framework_name Airflow airflow.cfg
mesos task_cpu 1 airflow.cfg
mesos task_memory 256 airflow.cfg
mesos checkpoint False airflow.cfg
mesos authenticate False airflow.cfg
kerberos ccache /tmp/airflow_krb5_ccache airflow.cfg
kerberos principal airflow airflow.cfg
kerberos reinit_frequency 3600 airflow.cfg
kerberos kinit_path kinit airflow.cfg
kerberos keytab airflow.keytab airflow.cfg
github_enterprise api_rev v3 airflow.cfg
admin hide_sensitive_variable_fields True airflow.cfg
elasticsearch elasticsearch_host airflow.cfg
elasticsearch elasticsearch_log_id_template {dag_id}-{task_id}-{execution_date}-{try_number} airflow.cfg
elasticsearch elasticsearch_end_of_log_mark end_of_log airflow.cfg
kubernetes worker_container_repository airflow.cfg
kubernetes worker_container_tag airflow.cfg
kubernetes delete_worker_pods True airflow.cfg
kubernetes namespace default airflow.cfg
kubernetes airflow_configmap airflow.cfg
kubernetes dags_volume_subpath airflow.cfg
kubernetes dags_volume_claim airflow.cfg
kubernetes logs_volume_subpath airflow.cfg
kubernetes logs_volume_claim airflow.cfg
kubernetes git_repo airflow.cfg
kubernetes git_branch airflow.cfg
kubernetes git_user airflow.cfg
kubernetes git_password airflow.cfg
kubernetes git_subpath airflow.cfg
kubernetes git_sync_container_repository gcr.io/google-containers/git-sync-amd64 airflow.cfg
kubernetes git_sync_container_tag v2.0.5 airflow.cfg
kubernetes git_sync_init_container_name git-sync-clone airflow.cfg
kubernetes worker_service_account_name airflow.cfg
kubernetes image_pull_secrets airflow.cfg
kubernetes gcp_service_account_keys airflow.cfg
kubernetes in_cluster True airflow.cfg
kubernetes worker_container_image_pull_policy IfNotPresent airflow.cfg
kubernetes worker_dags_folder airflow.cfg