add kuma #10

Open
opened 2025-12-08 12:51:26 +00:00 by Ghostinvisible-forgejo-org · 3 comments

Flux

apiVersion: apps/v1
kind: Deployment
metadata:
  name: uptime-kuma
spec:
  selector:
    matchLabels:
      app: uptime-kuma
  replicas: 1
  template:
    metadata:
      labels:
        app: uptime-kuma
    spec:
      automountServiceAccountToken: false
      containers:
        - name: main
          image: louislam/uptime-kuma
          ports:
            - containerPort: 3001
              name: http
          resources:
            requests:
              memory: 32Mi
              cpu: 50m
            limits:
              memory: 1Gi
              cpu: 500m

          startupProbe:
            httpGet:
              path: /manifest.json
              port: http
            initialDelaySeconds: 5
            timeoutSeconds: 5
            failureThreshold: 15

          # https://github.com/louislam/uptime-kuma/issues/4500
          # livenessProbe:
          #   httpGet:
          #     path: /manifest.json
          #     port: http
          #   timeoutSeconds: 15

          volumeMounts:
            - name: data
              mountPath: /app/data
              subPath: data
      volumes:
        - name: data
          hostPath:
            path: /opt/docker/uptime-kuma

      # tolerations:
      #   - key: kriese.eu/hosted
      #     operator: Exists

      nodeSelector:
        # topology.kubernetes.io/region: ovh
        topology.kubernetes.io/region: md-hq
        kubernetes.io/hostname: docker-worker1

---
apiVersion: v1
kind: Service
metadata:
  name: uptime-kuma
  labels:
    app: uptime-kuma
spec:
  ports:
    - protocol: TCP
      port: 80
      targetPort: http
      name: http
  selector:
    app: uptime-kuma
  ipFamilyPolicy: PreferDualStack
  ipFamilies:
    - IPv4
    - IPv6

---
# internal only
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: uptime-kuma
  annotations:
    traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
  rules:
    - host: 'kuma.test.com'
      http:
        paths:
          - path: /
            pathType: Prefix
            backend:
              service:
                name: uptime-kuma
                port:
                  name: http
    - host: 'status.test.com'
      http:
        paths:
          - path: /
            pathType: Prefix
            backend:
              service:
                name: uptime-kuma
                port:
                  name: http

---
# public
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: uptime-kuma-public
  annotations:
    kubernetes.io/ingress.class: traefik
    traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
  # ingressClassName: traefik # doesn't work
  rules:
    - host: 'status.test.com'
      http:
        paths:
          - path: /
            pathType: Prefix
            backend:
              service:
                name: uptime-kuma
                port:
                  name: http


Kustomization

apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

namespace: status

images:
  - name: louislam/uptime-kuma
    newName: ghcr.io/visualon/uptime-kuma:1.23.15

resources:
  - uptime-kuma.yaml

Flux ```yaml apiVersion: apps/v1 kind: Deployment metadata: name: uptime-kuma spec: selector: matchLabels: app: uptime-kuma replicas: 1 template: metadata: labels: app: uptime-kuma spec: automountServiceAccountToken: false containers: - name: main image: louislam/uptime-kuma ports: - containerPort: 3001 name: http resources: requests: memory: 32Mi cpu: 50m limits: memory: 1Gi cpu: 500m startupProbe: httpGet: path: /manifest.json port: http initialDelaySeconds: 5 timeoutSeconds: 5 failureThreshold: 15 # https://github.com/louislam/uptime-kuma/issues/4500 # livenessProbe: # httpGet: # path: /manifest.json # port: http # timeoutSeconds: 15 volumeMounts: - name: data mountPath: /app/data subPath: data volumes: - name: data hostPath: path: /opt/docker/uptime-kuma # tolerations: # - key: kriese.eu/hosted # operator: Exists nodeSelector: # topology.kubernetes.io/region: ovh topology.kubernetes.io/region: md-hq kubernetes.io/hostname: docker-worker1 --- apiVersion: v1 kind: Service metadata: name: uptime-kuma labels: app: uptime-kuma spec: ports: - protocol: TCP port: 80 targetPort: http name: http selector: app: uptime-kuma ipFamilyPolicy: PreferDualStack ipFamilies: - IPv4 - IPv6 --- # internal only apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: uptime-kuma annotations: traefik.ingress.kubernetes.io/router.entrypoints: websecure spec: rules: - host: 'kuma.test.com' http: paths: - path: / pathType: Prefix backend: service: name: uptime-kuma port: name: http - host: 'status.test.com' http: paths: - path: / pathType: Prefix backend: service: name: uptime-kuma port: name: http --- # public apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: uptime-kuma-public annotations: kubernetes.io/ingress.class: traefik traefik.ingress.kubernetes.io/router.entrypoints: websecure spec: # ingressClassName: traefik # doesn't work rules: - host: 'status.test.com' http: paths: - path: / pathType: Prefix backend: service: name: uptime-kuma port: name: http ``` Kustomization ```yaml apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization namespace: status images: - name: louislam/uptime-kuma newName: ghcr.io/visualon/uptime-kuma:1.23.15 resources: - uptime-kuma.yaml ```
unofficial kuma helm chart - https://artifacthub.io/packages/helm/uptime-kuma/uptime-kuma - https://github.com/dirsigler/uptime-kuma-helm/tree/main/charts/uptime-kuma

Uptime Kuma is deliberately hosted on a small virtual machine far away from all other infrastructure so that it can remain active and alert us in the event of a catostrophic failure that takes down the entire network.

Uptime Kuma is deliberately hosted on a small virtual machine far away from all other infrastructure so that it can remain active and alert us in the event of a catostrophic failure that takes down the entire network.

The idea would be for kuma to be setup so that it is HA between finland and germany and not subject to the failure of a single hosting / network. Your host has been exceptionally stable and has the necessary quality of not being on the same host / network as well so it is not really a requirement.

But ....

This is exactly the setup that's needed for data.forgejo.org as well and having kuma to experiment with is less of a challenge than doing it directly for data.forgejo.org. That's part of the incentive.

What do you think?

The idea would be for kuma to be setup so that it is HA between finland and germany and not subject to the failure of a single hosting / network. Your host has been exceptionally stable and has the necessary quality of not being on the same host / network as well so it is not really a requirement. But .... This is exactly the setup that's needed for data.forgejo.org as well and having kuma to experiment with is less of a challenge than doing it directly for data.forgejo.org. That's part of the incentive. What do you think?
Sign in to join this conversation.
No milestone
No project
No assignees
1 participant
Notifications
Due date
The due date is invalid or out of range. Please use the format "yyyy-mm-dd".

No due date set.

Dependencies

No dependencies set.

Reference
infrastructure/k8s-cluster#10
No description provided.