update qbitty, sonarr,sab

This commit is contained in:
Matt Reeves 2025-02-18 22:19:44 -05:00
parent 928e59767e
commit 32f645f51f
12 changed files with 293 additions and 235 deletions

View file

@ -1,53 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: prowlarr
namespace: arr
labels:
app: prowlarr
spec:
replicas: 1
selector:
matchLabels:
app: prowlarr
template:
metadata:
labels:
app: prowlarr
spec:
securityContext:
runAsUser: 65534
runAsGroup: 65534
fsGroup: 65534
fsGroupChangePolicy: OnRootMismatch
containers:
- name: prowlarr
image: ghcr.io/onedr0p/prowlarr:rolling@sha256:7234ae8ca5b14153baddf42257cc2ddc928695ce604d11a9616b635eca0e43e7
imagePullPolicy: IfNotPresent
resources:
requests:
memory: 512Mi
cpu: 150m
limits:
memory: 2Gi
cpu: 500m
volumeMounts:
- mountPath: /config
name: prowlarr-config
volumes:
- name: prowlarr-config
persistentVolumeClaim:
claimName: prowlarr-config
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: prowlarr-config
namespace: arr
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: longhorn

View file

@ -0,0 +1,123 @@
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: &app prowlarr
namespace: arr
spec:
interval: 15m
chart:
spec:
chart: app-template
version: 3.7.1
interval: 30m
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
install:
remediation:
retries: 3
upgrade:
remediation:
retries: 3
values:
global:
fullnameOverride: *app
namespace: arr
controllers:
prowlarr:
enabled: true
type: statefulset
annotations:
reloader.stakater.com/auto: "true"
replicas: 1
statefulset:
volumeClaimTemplates:
- name: prowlarr-config
accessMode: ReadWriteOnce
size: 3Gi
storageClass: longhorn
globalMounts:
- path: /config
pod:
securityContext:
runAsUser: 1000
runAsGroup: &group 1000
fsGroup: *group
fsGroupChangePolicy: "OnRootMismatch"
dnsPolicy: None
dnsConfig:
nameservers:
- 1.1.1.1
- 8.8.8.8
containers:
app:
image:
repository: ghcr.io/onedr0p/prowlarr
tag: 1.30.2.4939
pullPolicy: IfNotPresent
env:
TZ: "${TZ}"
PROWLARR__INSTANCE_NAME: *app
PROWLARR__PORT: &port 7878
PROWLARR__APPLICATION_URL: "https://prowlarr.${LOCAL_DOMAIN}"
PROWLARR__THEME: dark
PROWLARR__LOG_LEVEL: info
probes:
liveness:
enabled: false
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
resources:
requests:
cpu: 50m
memory: 150Mi
limits:
memory: 512Mi
service:
app:
primary: true
controller: prowlarr
ports:
http:
port: *port
ingress:
internal:
enabled: true
className: nginx
hosts:
- host: "prowlarr.${LOCAL_DOMAIN}"
paths:
- path: /
pathType: Prefix
service:
identifier: app
port: http
tls:
- hosts:
- "prowlarr.${LOCAL_DOMAIN}"
secretName: local-mafyuh-dev-production-tls
persistence:
data:
enabled: true
type: nfs
server: "${NAS_IP}"
path: /mnt/thePool/thePoolShare
globalMounts:
- path: /data

View file

@ -1,22 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: prowlarr
namespace: arr
spec:
ingressClassName: nginx
rules:
- host: "prowlarr.local.mafyuh.dev"
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: headless-prowlarr
port:
number: 9696
tls:
- hosts:
- "prowlarr.local.mafyuh.dev"
secretName: local-mafyuh-dev-production-tls

View file

@ -1,6 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment.yaml
- service.yaml
- ingress.yaml
- helmrelease.yaml

View file

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: headless-prowlarr
namespace: arr
spec:
selector:
app: prowlarr
ports:
- port: 9696
targetPort: 9696
protocol: TCP
type: ClusterIP

View file

@ -1,99 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: qbitty
namespace: arr
labels:
app: qbitty
spec:
replicas: 1
selector:
matchLabels:
app: qbitty
template:
metadata:
labels:
app: qbitty
spec:
containers:
- name: qbitty
image: ghcr.io/hotio/qbittorrent@sha256:43312cb59ec3054d99848481f0913336275b7afa18ef814d2091e0b87509fc23
imagePullPolicy: IfNotPresent
env:
- name: VPN_ENABLED
value: "true"
- name: VPN_CONF
value: "wg0"
- name: VPN_PROVIDER
value: "proton"
- name: VPN_KEEP_LOCAL_DNS
value: "false"
- name: VPN_AUTO_PORT_FORWARD
value: "true"
- name: VPN_LAN_NETWORK
valueFrom:
secretKeyRef:
name: lan-network
key: lan-network
- name: VPN_LAN_LEAK_ENABLED
value: "false"
- name: VPN_FIREWALL_TYPE
value: "auto"
- name: PRIVOXY_ENABLED
value: "false"
- name: WEBUI_PORT
value: "8080"
- name: VPN_HEALTHCHECK_ENABLED
value: "false"
- name: UNBOUND_ENABLED
value: "false"
resources:
requests:
memory: "256Mi"
cpu: "100m"
limits:
memory: "1Gi"
cpu: "5000m"
volumeMounts:
- mountPath: /config
name: qbitty-conf
- mountPath: /data
name: nas
- mountPath: /config/wireguard/
name: wireguard-config
- mountPath: /incomplete
name: qbitty-incomplete
securityContext:
capabilities:
add: ["NET_ADMIN"]
ports:
- containerPort: 8080
name: webui
protocol: TCP
volumes:
- name: nas
nfs:
path: /mnt/thePool/thePoolShare
server: 10.0.0.10
- name: qbitty-conf
persistentVolumeClaim:
claimName: qbitty-conf
- name: wireguard-config
secret:
secretName: qbitty-wireguard
- name: qbitty-incomplete
emptyDir:
sizeLimit: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: qbitty-conf
namespace: arr
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
storageClassName: longhorn

View file

@ -0,0 +1,143 @@
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: &app qbitty
namespace: arr
spec:
interval: 15m
chart:
spec:
chart: app-template
version: 3.7.1
interval: 30m
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
install:
remediation:
retries: 3
upgrade:
remediation:
retries: 3
values:
global:
fullnameOverride: *app
namespace: arr
controllers:
qbitty:
enabled: true
type: statefulset
annotations:
reloader.stakater.com/auto: "true"
replicas: 1
statefulset:
volumeClaimTemplates:
- name: qbitty-config
accessMode: ReadWriteOnce
size: 500Mi
storageClass: longhorn
globalMounts:
- path: /config
pod:
securityContext:
runAsUser: 1000
runAsGroup: &group 1000
fsGroup: *group
fsGroupChangePolicy: "OnRootMismatch"
containers:
app:
image:
repository: ghcr.io/hotio/qbittorrent
digest: "sha256:d97080a8a978d7705297dc44bcd6c599b3b47631fec8dcfc0cb7039279d05b02"
pullPolicy: IfNotPresent
env:
TZ: "${TZ}"
WEBUI_PORT: &port 8080
VPN_ENABLED: "true"
VPN_CONF: "wg0"
VPN_PROVIDER: "proton"
VPN_KEEP_LOCAL_DNS: "false"
VPN_AUTO_PORT_FORWARD: "true"
VPN_LAN_NETWORK:
valueFrom:
secretKeyRef:
name: lan-network
key: lan-network
VPN_LAN_LEAK_ENABLED: "false"
VPN_FIREWALL_TYPE: "auto"
PRIVOXY_ENABLED: "false"
VPN_HEALTHCHECK_ENABLED: "false"
UNBOUND_ENABLED: "false"
probes:
liveness:
enabled: false
securityContext:
capabilities:
add:
- NET_ADMIN
resources:
requests:
cpu: 20m
memory: 200Mi
limits:
memory: 4000Mi
service:
app:
primary: true
controller: qbitty
ports:
http:
port: *port
ingress:
internal:
enabled: true
className: nginx
hosts:
- host: "qbitty.${LOCAL_DOMAIN}"
paths:
- path: /
pathType: Prefix
service:
identifier: app
port: http
tls:
- hosts:
- "qbitty.${LOCAL_DOMAIN}"
secretName: local-mafyuh-dev-production-tls
persistence:
data:
enabled: true
type: nfs
server: "${NAS_IP}"
path: /mnt/thePool/thePoolShare
globalMounts:
- path: /data
incomplete:
enabled: true
type: emptyDir
sizeLimit: 100Gi
globalMounts:
- path: /incomplete
wireguard-config:
enabled: true
type: secret
name: qbitty-wireguard
defaultMode: 0400
globalMounts:
- path: /config/wireguard/

View file

@ -1,22 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: qbitty
namespace: arr
spec:
ingressClassName: nginx
rules:
- host: "qbitty.local.mafyuh.dev"
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: qbitty
port:
number: 8080
tls:
- hosts:
- "qbitty.local.mafyuh.dev"
secretName: local-mafyuh-dev-production-tls

View file

@ -1,6 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment.yaml
- service.yaml
- ingress.yaml
- helmrelease.yaml

View file

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: qbitty
namespace: arr
spec:
selector:
app: qbitty
ports:
- port: 8080
targetPort: 8080
protocol: TCP
type: ClusterIP

View file

@ -38,8 +38,8 @@ spec:
statefulset:
volumeClaimTemplates:
- name: config
accessMode: ReadWriteMany
- name: sabnzbd-config
accessMode: ReadWriteOnce
size: 500Mi
storageClass: longhorn
globalMounts:
@ -47,8 +47,8 @@ spec:
pod:
securityContext:
runAsUser: 65534
runAsGroup: &group 65534
runAsUser: 1000
runAsGroup: &group 1000
fsGroup: *group
fsGroupChangePolicy: "OnRootMismatch"
@ -61,6 +61,12 @@ spec:
env:
TZ: "${TZ}"
SABNZBD__PORT: &port 8080
SABNZBD__HOST_WHITELIST_ENTRIES: >-
{{ .Release.Name }},
{{ .Release.Name }}.arr,
{{ .Release.Name }}.arr.svc,
{{ .Release.Name }}.arr.svc.cluster.local,
sab.${LOCAL_DOMAIN}
probes:
liveness:
@ -92,7 +98,7 @@ spec:
enabled: true
className: nginx
hosts:
- host: "sabnzbd.${LOCAL_DOMAIN}"
- host: "sab.${LOCAL_DOMAIN}"
paths:
- path: /
pathType: Prefix
@ -101,7 +107,7 @@ spec:
port: http
tls:
- hosts:
- "sabnzbd.${LOCAL_DOMAIN}"
- "sab.${LOCAL_DOMAIN}"
secretName: local-mafyuh-dev-production-tls
persistence:
@ -111,4 +117,11 @@ spec:
server: "${NAS_IP}"
path: /mnt/thePool/thePoolShare
globalMounts:
- path: /data
- path: /data
incomplete:
enabled: true
type: emptyDir
sizeLimit: 100Gi
globalMounts:
- path: /incomplete

View file

@ -51,6 +51,11 @@ spec:
runAsGroup: &group 1000
fsGroup: *group
fsGroupChangePolicy: "OnRootMismatch"
dnsPolicy: None
dnsConfig:
nameservers:
- 1.1.1.1
- 8.8.8.8
containers:
app: