I gave up on airflow

This commit is contained in:
2025-09-13 10:23:14 -04:00
parent abd4ee798b
commit 469dfcd094
5 changed files with 1 additions and 153 deletions
-27
View File
@@ -1,27 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: airflow
namespace: ai
annotations:
argocd.argoproj.io/sync-wave: "0"
spec:
project: ai-stack
destination:
server: https://kubernetes.default.svc
namespace: ai
sources:
- repoURL: https://airflow.apache.org
chart: airflow
targetRevision: "*"
helm:
valueFiles:
- $values/values/airflow.yaml
- repoURL: https://git.ion606.com/ion606/ollama-plus
targetRevision: argo
ref: values
syncPolicy:
automated:
prune: true
selfHeal: true
-90
View File
@@ -1,90 +0,0 @@
useStandardNaming: true
executor: KubernetesExecutor
airflow:
extraPipPackages: []
# metastore (postgres)
env:
- name: AIRFLOW__DATABASE__SQL_ALCHEMY_CONN
value: "postgresql+psycopg2://postgres:mypassword@postgresql-primary.ai.svc.cluster.local:5432/airflow"
# using external postgres and not the subchart
postgresql:
enabled: false
data:
metadataConnection:
user: postgres
pass: mypassword
protocol: postgresql
host: postgresql-primary.ai.svc.cluster.local
port: 5432
db: airflow # better to keep airflow separate from other apps
sslmode: disable
# enable pgbouncer and sync with extraIni
pgbouncer:
enabled: true
# official default is scram-sha-256, but bitnami uses md5 so...
auth_type: md5
maxClientConn: 200
metadataPoolSize: 20 # server connections to metadata db
resultBackendPoolSize: 5
# general pgbouncer section stuffs
extraIni: |
pool_mode = transaction
listen_port = 6543
listen_addr = *
ignore_startup_parameters = extra_float_digits
server_tls_sslmode = prefer
metricsExporterSidecar:
enabled: true
logs:
persistence:
enabled: true
size: 2Gi
allowPodLaunching: true
scheduler:
resources:
requests:
cpu: "200m"
memory: "512Mi"
limits:
cpu: "1"
memory: "1Gi"
webserver:
webserverSecretKeySecretName: airflow-webserver-secret
service:
type: NodePort
nodePort: 30082 # 3000032767
resources:
requests:
cpu: "100m"
memory: "256Mi"
limits:
cpu: "500m"
memory: "512Mi"
triggerer:
resources:
requests:
cpu: "50m"
memory: "128Mi"
limits:
cpu: "200m"
memory: "256Mi"
# bc using nodeport
ingress:
enabled: false
# naur helm hooks for these jobs
createUserJob:
useHelmHooks: false
migrateDatabaseJob:
useHelmHooks: false