plane switsch wits seaweedfs

This commit is contained in:
titver968
2025-10-21 16:54:02 +02:00
parent 459c77906c
commit edbdd56272
4 changed files with 163 additions and 352 deletions

View File

@@ -1,66 +1,109 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: seaweedfs
name: plane
finalizers:
- resources-finalizer.argocd.argoproj.io
- resources-finalizer.argocd.argoproj.io
spec:
# Health Check für Worker überspringen (temporär)
ignoreDifferences:
# PVCs - creationTimestamp und Status ignorieren
- group: ""
kind: PersistentVolumeClaim
jsonPointers:
- /metadata/creationTimestamp
- /status
# StatefulSets - alle volumeClaimTemplates komplett ignorieren
- group: apps
kind: StatefulSet
jsonPointers:
- /spec/volumeClaimTemplates
- /status
- /spec/replicas
# Worker Deployment - Replica Status ignorieren
- group: apps
kind: Deployment
name: plane-worker-wl
jsonPointers:
- /status
project: default
source:
repoURL: 'https://seaweedfs.github.io/seaweedfs/helm'
chart: seaweedfs
targetRevision: 4.0.393
repoURL: 'https://helm.plane.so/'
chart: 'plane-ce'
targetRevision: 1.3.1
helm:
values: |
master:
enabled: true
replicas: 1
volume:
enabled: true
replicas: 1
filer:
enabled: true
replicas: 1
s3:
enabled: true
replicas: 1
port: 8333
httpsPort: 8433
enableAuth: true
existingConfigSecret: "admin-s3-secret"
worker:
memoryLimit:: 16Gi
cpuLimit: 5000m
cpuRequest: 1000m
memoryRequest: 8Gi
ingress:
enabled: true
className: "traefik"
host: "sws3.innovation-hub-niedersachsen.de"
# additional ingress annotations for the s3 endpoint
annotations:
kubernetes.io/ingress.class: "traefik"
traefik.ingress.kubernetes.io/router.entrypoints: "websecure"
traefik.ingress.kubernetes.io/router.tls: "true"
cert-manager.io/cluster-issuer: "lets-encrypt"
hosts:
- host: "sws3.innovation-hub-niedersachsen.de"
paths:
- path: /
pathType: Prefix
tls:
- secretName: "sws3.innovation-hub-niedersachsen.de-tls"
hosts:
- "sws3.innovation-hub-niedersachsen.de"
appHost: "plane.innovation-hub-niedersachsen.de"
ingressClass: "traefik"
ingress_annotations:
cert-manager.io/cluster-issuer: lets-encrypt
traefik.ingress.kubernetes.io/router.entrypoints: websecure
ssl:
tls_secret_name: "plane-tls"
createIssuer: false
generateCerts: false
minio:
local_setup: false
env:
docstore_bucket: "plane-docstore"
doc_upload_size_limit: "5242880"
aws_access_key: "a0ccb47cc0994bf51ecd"
aws_secret_access_key: "0d54ee2f943f2a56b8cafc3afe9cb1e2f9fecac2"
aws_region: "eu-central-1"
aws_s3_endpoint_url: "https://sws3.innovation-hub-niedersachsen.de"
# Celery Worker Konfiguration - Reduziere Concurrency für Stabilität
CELERY_WORKER_CONCURRENCY: "4"
CELERY_WORKER_MAX_TASKS_PER_CHILD: "500"
CELERY_WORKER_MAX_MEMORY_PER_CHILD: "100000"
# Worker-spezifische Einstellungen
worker:
# Reduziere Replicas falls zu viele Workers laufen
replicas: 1
# Celery Concurrency (Anzahl paralleler Worker-Prozesse)
concurrency: 4
resources:
requests:
memory: "8Gi"
cpu: "600m"
limits:
memory: "4Gi"
cpu: "1000m"
# Exec-basierte Probes für Celery Worker
readinessProbe:
exec:
command:
- /bin/sh
- -c
- celery -A plane inspect ping -d celery@$HOSTNAME
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 10
failureThreshold: 3
livenessProbe:
exec:
command:
- /bin/sh
- -c
- celery -A plane inspect ping -d celery@$HOSTNAME
initialDelaySeconds: 60
periodSeconds: 60
timeoutSeconds: 10
failureThreshold: 3
destination:
server: 'https://kubernetes.default.svc'
namespace: seaweedfs
namespace: plane
syncPolicy:
managedNamespaceMetadata:
labels:
@@ -70,3 +113,6 @@ spec:
prune: true
syncOptions:
- CreateNamespace=true
- ServerSideApply=true
- PruneLast=true
- RespectIgnoreDifferences=true

View File

@@ -1,183 +0,0 @@
planeVersion: stable
dockerRegistry:
enabled: false
host: "index.docker.io/v1/"
loginid: ""
password: ""
ingress:
enabled: true
appHost: "plane.example.com"
minioHost: ""
rabbitmqHost: ""
ingressClass: "nginx"
ingress_annotations: {"nginx.ingress.kubernetes.io/proxy-body-size": "5m"}
# SSL Configuration - Valid only if ingress.enabled is true
ssl:
tls_secret_name: "" # If you have a custom TLS secret name
# If you want to use Let's Encrypt, set createIssuer and generateCerts to true
createIssuer: false
issuer: "http" # Allowed : cloudflare, digitalocean, http
token: "" # not required for http
server: https://acme-v02.api.letsencrypt.org/directory
email: plane@example.com
generateCerts: false
redis:
local_setup: true
image: valkey/valkey:7.2.5-alpine
servicePort: 6379
storageClass: ""
volumeSize: 100Mi
pullPolicy: IfNotPresent
assign_cluster_ip: false
postgres:
local_setup: true
image: postgres:15.7-alpine
servicePort: 5432
storageClass: ""
volumeSize: 1Gi
pullPolicy: IfNotPresent
assign_cluster_ip: false
rabbitmq:
local_setup: true
image: rabbitmq:3.13.6-management-alpine
pullPolicy: IfNotPresent
servicePort: 5672
managementPort: 15672
storageClass: ""
volumeSize: 100Mi
default_user: plane
default_password: plane
external_rabbitmq_url: ''
assign_cluster_ip: false
minio:
image: minio/minio:latest
image_mc: minio/mc:latest
local_setup: true
pullPolicy: IfNotPresent
root_password: password
root_user: admin
storageClass: ""
volumeSize: 1Gi
assign_cluster_ip: false
env:
minio_endpoint_ssl: false
web:
replicas: 1
memoryLimit: 1000Mi
cpuLimit: 500m
cpuRequest: 50m
memoryRequest: 50Mi
image: artifacts.plane.so/makeplane/plane-frontend
pullPolicy: Always
assign_cluster_ip: false
space:
replicas: 1
memoryLimit: 1000Mi
cpuLimit: 500m
cpuRequest: 50m
memoryRequest: 50Mi
image: artifacts.plane.so/makeplane/plane-space
pullPolicy: Always
assign_cluster_ip: false
admin:
replicas: 1
memoryLimit: 1000Mi
cpuLimit: 500m
cpuRequest: 50m
memoryRequest: 50Mi
image: artifacts.plane.so/makeplane/plane-admin
pullPolicy: Always
assign_cluster_ip: false
live:
replicas: 1
memoryLimit: 1000Mi
cpuLimit: 500m
cpuRequest: 50m
memoryRequest: 50Mi
image: artifacts.plane.so/makeplane/plane-live
pullPolicy: Always
assign_cluster_ip: false
api:
replicas: 1
memoryLimit: 1000Mi
cpuLimit: 500m
cpuRequest: 50m
memoryRequest: 50Mi
image: artifacts.plane.so/makeplane/plane-backend
pullPolicy: Always
assign_cluster_ip: false
worker:
replicas: 1
memoryLimit: 1000Mi
cpuLimit: 500m
cpuRequest: 50m
memoryRequest: 50Mi
image: artifacts.plane.so/makeplane/plane-backend
pullPolicy: Always
beatworker:
replicas: 1
memoryLimit: 1000Mi
cpuLimit: 500m
cpuRequest: 50m
memoryRequest: 50Mi
image: artifacts.plane.so/makeplane/plane-backend
pullPolicy: Always
external_secrets:
# Name of the existing Kubernetes Secret resource; see README for more details
rabbitmq_existingSecret: ''
pgdb_existingSecret: ''
doc_store_existingSecret: ''
app_env_existingSecret: ''
live_env_existingSecret: ''
env:
# NEXT_PUBLIC_DEPLOY_URL: ""
# REDIS
remote_redis_url: "" #INCASE OF REMOTE REDIS ONLY
# POSTGRES DB VALUES
pgdb_username: plane
pgdb_password: plane
pgdb_name: plane
pgdb_remote_url: "" #INCASE OF REMOTE PG DB URL ONLY
# DATA STORE
docstore_bucket: "uploads"
doc_upload_size_limit: "5242880" # 5MB
# REQUIRED IF MINIO LOCAL SETUP IS FALSE
aws_access_key: ""
aws_secret_access_key: ""
aws_region: ""
aws_s3_endpoint_url: ""
secret_key: "60gp0byfz2dvffa45cxl20p1scy9xbpf6d8c5y0geejgkyp1b5"
sentry_dsn: ""
sentry_environment: ""
cors_allowed_origins: ""
default_cluster_domain: cluster.local
live_sentry_dsn: ""
live_sentry_environment: ""
live_sentry_traces_sample_rate: ""
api_key_rate_limit: "60/minute"

View File

@@ -1,118 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: plane
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
# Health Check für Worker überspringen (temporär)
ignoreDifferences:
# PVCs - creationTimestamp und Status ignorieren
- group: ""
kind: PersistentVolumeClaim
jsonPointers:
- /metadata/creationTimestamp
- /status
# StatefulSets - alle volumeClaimTemplates komplett ignorieren
- group: apps
kind: StatefulSet
jsonPointers:
- /spec/volumeClaimTemplates
- /status
- /spec/replicas
# Worker Deployment - Replica Status ignorieren
- group: apps
kind: Deployment
name: plane-worker-wl
jsonPointers:
- /status
project: default
source:
repoURL: 'https://helm.plane.so/'
chart: 'plane-ce'
targetRevision: 1.3.1
helm:
values: |
ingress:
enabled: true
appHost: "plane.innovation-hub-niedersachsen.de"
ingressClass: "traefik"
ingress_annotations:
cert-manager.io/cluster-issuer: lets-encrypt-staging
traefik.ingress.kubernetes.io/router.entrypoints: websecure
ssl:
tls_secret_name: "plane-tls"
createIssuer: false
generateCerts: false
minio:
local_setup: false
env:
docstore_bucket: "plane-docstore"
doc_upload_size_limit: "5242880"
aws_access_key: "a0ccb47cc0994bf51ecd"
aws_secret_access_key: "0d54ee2f943f2a56b8cafc3afe9cb1e2f9fecac2"
aws_region: "eu-central-1"
aws_s3_endpoint_url: "https://sws3.innovation-hub-niedersachsen.de"
# Celery Worker Konfiguration - Reduziere Concurrency für Stabilität
CELERY_WORKER_CONCURRENCY: "4"
CELERY_WORKER_MAX_TASKS_PER_CHILD: "500"
CELERY_WORKER_MAX_MEMORY_PER_CHILD: "100000"
# Worker-spezifische Einstellungen
worker:
# Reduziere Replicas falls zu viele Workers laufen
replicas: 1
# Celery Concurrency (Anzahl paralleler Worker-Prozesse)
concurrency: 4
resources:
requests:
memory: "512Mi"
cpu: "200m"
limits:
memory: "2Gi"
cpu: "1000m"
# Exec-basierte Probes für Celery Worker
readinessProbe:
exec:
command:
- /bin/sh
- -c
- celery -A plane inspect ping -d celery@$HOSTNAME
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 10
failureThreshold: 3
livenessProbe:
exec:
command:
- /bin/sh
- -c
- celery -A plane inspect ping -d celery@$HOSTNAME
initialDelaySeconds: 60
periodSeconds: 60
timeoutSeconds: 10
failureThreshold: 3
destination:
server: 'https://kubernetes.default.svc'
namespace: plane
syncPolicy:
managedNamespaceMetadata:
labels:
pod-security.kubernetes.io/enforce: "privileged"
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true
- ServerSideApply=true
- PruneLast=true
- RespectIgnoreDifferences=true

View File

@@ -0,0 +1,66 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: seaweedfs
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: 'https://seaweedfs.github.io/seaweedfs/helm'
chart: seaweedfs
targetRevision: 4.0.393
helm:
values: |
master:
enabled: true
replicas: 1
volume:
enabled: true
replicas: 1
filer:
enabled: true
replicas: 1
s3:
enabled: true
replicas: 1
port: 8333
httpsPort: 8433
enableAuth: true
existingConfigSecret: "admin-s3-secret"
ingress:
enabled: true
className: "traefik"
host: "sws3.innovation-hub-niedersachsen.de"
# additional ingress annotations for the s3 endpoint
annotations:
kubernetes.io/ingress.class: "traefik"
traefik.ingress.kubernetes.io/router.entrypoints: "websecure"
traefik.ingress.kubernetes.io/router.tls: "true"
cert-manager.io/cluster-issuer: "lets-encrypt"
hosts:
- host: "sws3.innovation-hub-niedersachsen.de"
paths:
- path: /
pathType: Prefix
tls:
- secretName: "sws3.innovation-hub-niedersachsen.de-tls"
hosts:
- "sws3.innovation-hub-niedersachsen.de"
destination:
server: 'https://kubernetes.default.svc'
namespace: seaweedfs
syncPolicy:
managedNamespaceMetadata:
labels:
pod-security.kubernetes.io/enforce: "privileged"
automated:
selfHeal: true
prune: true
syncOptions:
- CreateNamespace=true