Files
k3s/argocd/apps/plane/values-plane.yaml
2025-10-24 11:56:19 +02:00

128 lines
3.6 KiB
YAML

apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: plane
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
ignoreDifferences:
- group: ""
kind: PersistentVolumeClaim
jsonPointers:
- /metadata/creationTimestamp
- /status
- group: apps
kind: StatefulSet
jsonPointers:
- /spec/volumeClaimTemplates
- /status
- /spec/replicas
- group: apps
kind: Deployment
name: plane-worker-wl
jsonPointers:
- /status
project: default
source:
repoURL: 'https://helm.plane.so/'
chart: 'plane-ce'
targetRevision: 1.3.3
helm:
values: |
ingress:
enabled: true
appHost: "plane.innovation-hub-niedersachsen.de"
ingressClass: "traefik"
ingress_annotations:
cert-manager.io/cluster-issuer: lets-encrypt
traefik.ingress.kubernetes.io/router.entrypoints: websecure
ssl:
tls_secret_name: "plane-tls"
createIssuer: false
generateCerts: false
minio:
local_setup: false
env:
docstore_bucket: "planedocstore"
doc_upload_size_limit: "5242880"
aws_access_key: "a0ccb47cc0994bf51ecd"
aws_secret_access_key: "0d54ee2f943f2a56b8cafc3afe9cb1e2f9fecac2"
aws_region: "eu-central-1"
aws_s3_endpoint_url: "https://sws3.innovation-hub-niedersachsen.de"
# Celery Worker - Aggressive Memory-Begrenzung
CELERY_WORKER_CONCURRENCY: "1"
CELERY_WORKER_MAX_TASKS_PER_CHILD: "5"
CELERY_WORKER_MAX_MEMORY_PER_CHILD: "400000"
CELERY_WORKER_PREFETCH_MULTIPLIER: "1"
# Task Limits - Sehr restriktiv
CELERY_TASK_SOFT_TIME_LIMIT: "120"
CELERY_TASK_TIME_LIMIT: "180"
CELERY_TASK_ACKS_LATE: "true"
CELERY_TASK_REJECT_ON_WORKER_LOST: "true"
# Task-Routing um problematische Tasks zu isolieren
CELERY_TASK_DEFAULT_QUEUE: "default"
CELERY_TASK_CREATE_MISSING_QUEUES: "true"
# Python Memory Management
PYTHONMALLOC: "malloc"
MALLOC_TRIM_THRESHOLD_: "65536"
MALLOC_MMAP_THRESHOLD_: "65536"
# Logging erhöhen um Problem-Tasks zu identifizieren
CELERY_WORKER_LOG_LEVEL: "INFO"
CELERY_TASK_LOG_FORMAT: "[%(asctime)s: %(levelname)s/%(processName)s] %(task_name)s[%(task_id)s]: %(message)s - Memory: %(process)s"
worker:
replicas: 2
concurrency: 1
resources:
requests:
memory: "2Gi"
cpu: "500m"
limits:
memory: "6Gi"
cpu: "1500m"
# Health Checks komplett deaktiviert für Debugging
readinessProbe:
exec:
command: ["/bin/true"]
initialDelaySeconds: 10
periodSeconds: 300
livenessProbe:
exec:
command: ["/bin/true"]
initialDelaySeconds: 30
periodSeconds: 300
# Graceful Shutdown
terminationGracePeriodSeconds: 60
destination:
server: 'https://kubernetes.default.svc'
namespace: plane
syncPolicy:
managedNamespaceMetadata:
labels:
pod-security.kubernetes.io/enforce: "privileged"
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true
- ServerSideApply=true
- PruneLast=true
- RespectIgnoreDifferences=true