feat: apply new helm structure

use minio s3 for savepoint and checkpoint path
separate task-manager, job-manager and operator
use statefulset for task-manager to handle replication
support basic credential for download jar request
update to flink 1.20.1
This commit is contained in:
2025-04-05 01:39:02 +03:30
parent 7f78faeed7
commit 830e265162
26 changed files with 386 additions and 256 deletions

6
helm/chart/Chart.lock Normal file
View File

@@ -0,0 +1,6 @@
dependencies:
- name: minio
repository: https://charts.bitnami.com/bitnami
version: 16.0.2
digest: sha256:9a822e9c5a4eee1b6515c143150c1dd6f84ceb080a7be4573e09396c5916f7d3
generated: "2025-04-04T14:42:09.771390014+03:30"

View File

@@ -4,3 +4,7 @@ description: Helm chart for flink kube operator
type: application
version: 0.1.14
appVersion: "0.1.0"
dependencies:
- name: minio
repository: https://charts.bitnami.com/bitnami
version: 16.0.2

Binary file not shown.

View File

@@ -17,6 +17,6 @@
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "flink-kube-operator.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
echo "Visit http://127.0.0.1:8081 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8081:$CONTAINER_PORT
{{- end }}

View File

@@ -0,0 +1,33 @@
{{- define "flink.env" -}}
- name: JOB_MANAGER_RPC_ADDRESS
value: "localhost"
- name: NAMESPACE
value: {{ .Release.Namespace }}
- name: FLINK_PROPERTIES
value: |
jobmanager.rpc.address: {{ .Release.Name }}-flink-job-manager
jobmanager.memory.process.size: {{ .Values.flink.jobManager.processMemory }}
taskmanager.memory.process.size: {{ .Values.flink.taskManager.processMemory }}
taskmanager.data.port: 6125
taskmanager.numberOfTaskSlots: {{ .Values.flink.taskManager.numberOfTaskSlots }}
parallelism.default: {{ .Values.flink.parallelism.default }}
state.backend: {{ .Values.flink.state.backend }}
rest.port: 8081
rootLogger.level = DEBUG
rootLogger.appenderRef.console.ref = ConsoleAppender
high-availability.type: kubernetes
kubernetes.namespace: {{ .Release.Namespace }}
kubernetes.cluster-id: {{ .Values.clusterId | default (print .Release.Name "-cluster") }}
execution.checkpointing.interval: {{ .Values.flink.checkpoint.interval }}
execution.checkpointing.mode: {{ .Values.flink.checkpoint.mode }}
state.checkpoints.dir: s3://{{ .Release.Name }}-minio:9000/checkpoints
state.backend.rocksdb.localdir: /opt/flink/rocksdb
high-availability.storageDir: /opt/flink/ha
state.savepoints.dir: s3://{{ .Release.Name }}-minio:9000/savepoints
state.backend.incremental: {{ .Values.flink.state.incremental }}
rest.profiling.enabled: true
s3.endpoint: http://{{ .Release.Name }}-minio:9000 # Use Kubernetes service name
s3.path.style.access: true
s3.fs.hadoop.impl: org.apache.hadoop.fs.s3a.S3AFileSystem # Keep for compatibility
fs.s3a.aws.credentials.provider: com.amazonaws.auth.DefaultAWSCredentialsProviderChain
{{- end }}

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .Values.flink.state.data.pvcName }}
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.flink.state.data.size }} # Use size defined in values.yaml

View File

@@ -1,165 +0,0 @@
{{- define "flink.env" -}}
- name: JOB_MANAGER_RPC_ADDRESS
value: "localhost"
- name: NAMESPACE
value: {{ .Release.Namespace }}
- name: FLINK_PROPERTIES
value: |
jobmanager.rpc.address: localhost
jobmanager.memory.process.size: {{ .Values.flink.jobManager.processMemory }}
taskmanager.memory.process.size: {{ .Values.flink.taskManager.processMemory }}
taskmanager.data.port: 6125
taskmanager.numberOfTaskSlots: {{ .Values.flink.taskManager.numberOfTaskSlots }}
parallelism.default: {{ .Values.flink.parallelism.default }}
state.backend: {{ .Values.flink.state.backend }}
rest.port: 8081
rootLogger.level = DEBUG
rootLogger.appenderRef.console.ref = ConsoleAppender
high-availability.type: kubernetes
kubernetes.namespace: {{ .Release.Namespace }}
kubernetes.cluster-id: {{ .Values.clusterId | default (print .Release.Name "-cluster") }}
execution.checkpointing.interval: {{ .Values.flink.checkpoint.interval }}
execution.checkpointing.mode: {{ .Values.flink.checkpoint.mode }}
web.upload.dir: {{ .Values.flink.state.data.dir }}/web-upload
state.checkpoints.dir: file://{{ .Values.flink.state.data.dir }}/checkpoints
state.backend.rocksdb.localdir: file://{{ .Values.flink.state.data.dir }}/rocksdb
high-availability.storageDir: file://{{ .Values.flink.state.ha.dir }}
state.savepoints.dir: file://{{ .Values.flink.state.savepoints.dir }}
state.backend.incremental: {{ .Values.flink.state.incremental }}
rest.profiling.enabled: true
{{- end }}
{{- define "flink.volumeMounts" -}}
- name: flink-data
mountPath: {{ .Values.flink.state.data.dir }}/data
- name: flink-data
mountPath: {{ .Values.flink.state.data.dir }}/rocksdb
subPath: rocksdb
- name: flink-data
mountPath: {{ .Values.flink.state.data.dir }}/checkpoints
subPath: checkpoints
- name: flink-data
mountPath: {{ .Values.flink.state.data.dir }}/web-upload
subPath: web-upload
- name: flink-ha
mountPath: {{ .Values.flink.state.ha.dir }}
- name: flink-savepoints
mountPath: {{ .Values.flink.state.savepoints.dir }}
{{- end }}
{{- define "flink.volumes" -}}
- name: flink-data
persistentVolumeClaim:
claimName: {{ .Values.flink.state.data.pvcName }}
- name: flink-savepoints
persistentVolumeClaim:
claimName: {{ .Values.flink.state.savepoints.pvcName }}
- name: flink-ha
persistentVolumeClaim:
claimName: {{ .Values.flink.state.ha.pvcName }}
{{- end }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}-flink
labels:
app.kubernetes.io/name: {{ .Release.Name }}-flink
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/name: {{ .Release.Name }}-flink
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ .Release.Name }}-flink
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
serviceAccountName: {{ include "flink-kube-operator.serviceAccountName" . }}
initContainers:
- name: volume-mount-hack
image: {{ .Values.flink.image.repository }}:{{ .Values.flink.image.tag }}
runAsUser: 0
command: ["sh", "-c", "chown -R flink {{ .Values.flink.state.data.dir }}/data {{ .Values.flink.state.data.dir }}/rocksdb {{ .Values.flink.state.data.dir }}/checkpoints {{ .Values.flink.state.data.dir }}/web-upload {{ .Values.flink.state.ha.dir }} {{ .Values.flink.state.savepoints.dir }}"]
volumeMounts:
{{- include "flink.volumeMounts" . | nindent 12 }}
containers:
- name: jobmanager
image: {{ .Values.flink.image.repository }}:{{ .Values.flink.image.tag }}
imagePullPolicy: Always
args: ["jobmanager"]
ports:
- containerPort: 6123 # JobManager RPC port
name: rpc
- containerPort: 6124 # JobManager blob server port
name: blob
- containerPort: 6125 # JobManager queryable state port
name: query
- containerPort: 8081 # JobManager Web UI port
name: ui
env:
{{- include "flink.env" . | nindent 12 }}
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
{{- include "flink.volumeMounts" . | nindent 12 }}
- name: taskmanager
image: {{ .Values.flink.image.repository }}:{{ .Values.flink.image.tag }}
imagePullPolicy: Always
args: ["taskmanager"]
ports:
- containerPort: 6121 # TaskManager data port
name: data
- containerPort: 6122 # TaskManager RPC port
name: rpc
env:
{{- include "flink.env" . | nindent 12 }}
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
{{- include "flink.volumeMounts" . | nindent 12 }}
- name: operator
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: {{ .Values.service.port }}
protocol: TCP
env:
- name: FLINK_API_URL
value: localhost:8081
- name: SAVEPOINT_PATH
value: file://{{ .Values.flink.state.savepoints.dir }}
- name: NAMESPACE
value: "{{ .Release.Namespace }}"
resources:
{{- toYaml .Values.resources | nindent 12 }}
volumeMounts:
{{- include "flink.volumeMounts" . | nindent 12 }}
volumes:
{{- include "flink.volumes" . | nindent 8 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -1,7 +1,7 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .Values.flink.state.ha.pvcName }}
name: {{ .Release.Name }}-{{ .Values.flink.state.ha.pvcName }}
spec:
accessModes:
- ReadWriteOnce

View File

@@ -0,0 +1,84 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}-flink-job-manager
labels:
app: {{ .Release.Name }}-flink-operator
component: {{ .Release.Name }}-flink-job-manager
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: {{ .Release.Name }}-flink-operator
component: {{ .Release.Name }}-flink-job-manager
template:
metadata:
labels:
app: {{ .Release.Name }}-flink-operator
component: {{ .Release.Name }}-flink-job-manager
spec:
serviceAccountName: {{ include "flink-kube-operator.serviceAccountName" . }}
initContainers:
- name: volume-mount-hack
image: {{ .Values.flink.image.repository }}:{{ .Values.flink.image.tag }}
runAsUser: 0
command: ["sh", "-c", "chown -R flink {{ .Values.flink.state.ha.dir }}"]
volumeMounts:
- name: flink-ha
mountPath: {{ .Values.flink.state.ha.dir }}
containers:
- name: jobmanager
image: {{ .Values.flink.image.repository }}:{{ .Values.flink.image.tag }}
imagePullPolicy: Always
args: ["jobmanager"]
ports:
- containerPort: 6123 # JobManager RPC port
name: rpc
- containerPort: 6124 # JobManager blob server port
name: blob
- containerPort: 6125 # JobManager queryable state port
name: query
- containerPort: 8081 # JobManager Web UI port
name: ui
env:
{{- include "flink.env" . | nindent 12 }}
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: S3_ENDPOINT
value: "http://minio-service:9000"
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-flink-secrets
key: minio_access_key
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-flink-secrets
key: minio_secret_key
volumeMounts:
- name: flink-ha
mountPath: {{ .Values.flink.state.ha.dir }}
volumes:
- name: flink-ha
persistentVolumeClaim:
claimName: {{ .Release.Name }}-{{ .Values.flink.state.ha.pvcName }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -0,0 +1,28 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-flink-job-manager
labels:
app.kubernetes.io/name: {{ .Release.Name }}-flink-job-manager
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
ports:
- name: flink-web-ui
port: 8081
targetPort: 8081
- name: rpc
port: 6123
targetPort: 6123
- name: blob
port: 6124
targetPort: 6124
- name: query
port: 6125
targetPort: 6125
- name: operator
port: 3000
targetPort: 3000
selector:
app: {{ .Release.Name }}-flink-operator
component: {{ .Release.Name }}-flink-job-manager
type: ClusterIP # Change to LoadBalancer if you want external access

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .Values.flink.state.savepoints.pvcName }}
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.flink.state.savepoints.size }} # Use size defined in values.yaml

View File

@@ -1,19 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: flink
labels:
app.kubernetes.io/name: {{ .Release.Name }}-flink
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
ports:
- port: 8081
name: flink-web-ui
targetPort: 8081
- port: 3000
name: operator
targetPort: 3000
selector:
app.kubernetes.io/name: {{ .Release.Name }}-flink
app.kubernetes.io/instance: {{ .Release.Name }}
type: ClusterIP # Change to LoadBalancer if you want external access

View File

@@ -0,0 +1,58 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ .Release.Name }}-flink-task-manager
labels:
app: {{ .Release.Name }}-flink-operator
component: taskmanager
spec:
serviceName: {{ .Release.Name }}-flink-task-manager
replicas: {{ .Values.flink.taskManager.replicas }}
selector:
matchLabels:
app: {{ .Release.Name }}-flink-operator
component: {{ .Release.Name }}-flink-task-manager
template:
metadata:
labels:
app: {{ .Release.Name }}-flink-operator
component: {{ .Release.Name }}-flink-task-manager
spec:
serviceAccountName: {{ include "flink-kube-operator.serviceAccountName" . }}
containers:
- name: task-manager
image: {{ .Values.flink.image.repository }}:{{ .Values.flink.image.tag }}
imagePullPolicy: Always
args: ["taskmanager"]
env:
{{- include "flink.env" . | nindent 8 }}
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: S3_ENDPOINT
value: "http://minio-service:9000"
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-flink-secrets
key: minio_access_key
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-flink-secrets
key: minio_secret_key
volumeMounts:
- name: rocksdb-storage
mountPath: /opt/flink/rocksdb
resources:
{{- toYaml .Values.flink.taskManager.resources | nindent 10 }}
volumeClaimTemplates:
- metadata:
name: rocksdb-storage
spec:
accessModes: [ ReadWriteOnce ]
resources:
requests:
storage: {{ .Values.flink.taskManager.storage.rocksDb.size }}

View File

@@ -1,9 +1,10 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "flink-kube-operator.fullname" . }}
name: {{ .Release.Name }}-flink-operator
labels:
{{- include "flink-kube-operator.labels" . | nindent 4 }}
app: {{ .Release.Name }}-flink-operator
component: {{ .Release.Name }}-flink-operator
spec:
type: {{ .Values.service.type }}
ports:
@@ -12,4 +13,5 @@ spec:
protocol: TCP
name: http
selector:
{{- include "flink-kube-operator.selectorLabels" . | nindent 4 }}
app: {{ .Release.Name }}-flink-operator
component: {{ .Release.Name }}-flink-operator

View File

@@ -0,0 +1,66 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ .Release.Name }}-flink-operator
labels:
app: {{ .Release.Name }}-flink-operator
component: {{ .Release.Name }}-flink-operator
spec:
serviceName: {{ .Release.Name }}-flink-operator
replicas: 1
selector:
matchLabels:
app: {{ .Release.Name }}-flink-operator
component: {{ .Release.Name }}-flink-operator
template:
metadata:
labels:
app: {{ .Release.Name }}-flink-operator
component: {{ .Release.Name }}-flink-operator
spec:
serviceAccountName: {{ include "flink-kube-operator.serviceAccountName" . }}
initContainers:
- name: wait-for-jobmanager
image: curlimages/curl:8.5.0 # Lightweight curl image
command:
- sh
- -c
- |
echo "Waiting for Flink JobManager to be ready..."
until curl -sSf "http://{{ .Release.Name }}-flink-job-manager:8081/taskmanagers"; do
echo "JobManager not ready yet - retrying in 5s..."
sleep 5
done
echo "JobManager is ready!"
containers:
- name: operator
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: {{ .Values.service.port }}
protocol: TCP
env:
- name: FLINK_API_URL
value: {{ .Release.Name }}-flink-job-manager:8081
- name: SAVEPOINT_PATH
value: s3://{{ .Release.Name }}-minio:9000/savepoints
- name: NAMESPACE
value: "{{ .Release.Namespace }}"
- name: S3_ENDPOINT
value: "http://{{ .Release.Name }}-minio:9000"
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-minio
key: root-user
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-minio
key: root-password

View File

@@ -117,7 +117,7 @@ affinity: {}
flink:
image:
repository: lcr.logicamp.tech/library/flink
tag: 1.20.0-scala_2.12-java17-minicluster
tag: 1.20.1-scala_2.12-java17-minicluster
parallelism:
default: 1 # Default parallelism for Flink jobs
@@ -129,14 +129,6 @@ flink:
state:
backend: rocksdb # Use RocksDB for state backend
incremental: true
savepoints:
dir: "/opt/flink/savepoints" # Directory to store savepoints
pvcName: flink-savepoints-pvc # PVC for savepoints persistence
size: 10Gi # PVC size for savepoints storage
data:
dir: "/opt/flink/data" # Directory to store checkpoints/web-upload/rocksdb
pvcName: flink-data-pvc # PVC for checkpoints/web-upload/rocksdb
size: 10Gi # PVC size for checkpoints/web-upload/rocksdb
ha:
dir: "/opt/flink/ha" # Directory to store ha data
pvcName: flink-ha-pvc # PVC for ha
@@ -149,5 +141,14 @@ flink:
taskManager:
numberOfTaskSlots: 12 # Number of task slots for TaskManager
processMemory: 4096m # Size of task manager process memory
# clusterId: some-id
replicas: 1
storage:
rocksDb:
size: 4Gi
resources:
limits:
cpu: 3
memory: 4Gi
requests:
cpu: 1
memory: 2Gi

View File

@@ -53,7 +53,7 @@ entries:
version: 0.1.10
- apiVersion: v2
appVersion: 0.1.0
created: "2025-04-04T13:50:27.975218534+03:30"
created: "2025-03-04T18:04:35.495842696+03:30"
description: Helm chart for flink kube operator
digest: abc08853c65ba36ff3485f182555522408e150f2508d4cac672d588972ddca3c
name: flink-kube-operator
@@ -63,7 +63,7 @@ entries:
version: 0.1.9
- apiVersion: v2
appVersion: 0.1.0
created: "2025-04-04T13:50:27.974750898+03:30"
created: "2025-03-04T18:04:35.495392608+03:30"
description: Helm chart for flink kube operator
digest: 3986a0a2348db1e17a1524eb0d87eabf6d64050d4007c5b393f723393cc4b675
name: flink-kube-operator
@@ -73,7 +73,7 @@ entries:
version: 0.1.8
- apiVersion: v2
appVersion: 0.1.0
created: "2025-04-04T13:50:27.974306458+03:30"
created: "2025-03-04T18:04:35.494948853+03:30"
description: Helm chart for flink kube operator
digest: 1bbeb92ecd10e36fa7d742a61cced0d842139ada0cfeff6fa1b0cf8718189235
name: flink-kube-operator
@@ -83,7 +83,7 @@ entries:
version: 0.1.7
- apiVersion: v2
appVersion: 0.1.0
created: "2025-04-04T13:50:27.973833587+03:30"
created: "2025-03-04T18:04:35.49450822+03:30"
description: Helm chart for flink kube operator
digest: 4031f4a79e65f6c5e60b6ebf9dd7e2a663b1fb6f893056ad81ca33660f94406e
name: flink-kube-operator
@@ -93,7 +93,7 @@ entries:
version: 0.1.6
- apiVersion: v2
appVersion: 0.1.0
created: "2025-04-04T13:50:27.972800097+03:30"
created: "2025-03-04T18:04:35.494040193+03:30"
description: Helm chart for flink kube operator
digest: 22ed155c8538ca5e7dc26863304eb9f76b09c454edbf709a891d7ccc440f35f6
name: flink-kube-operator
@@ -103,7 +103,7 @@ entries:
version: 0.1.5
- apiVersion: v2
appVersion: 0.1.0
created: "2025-04-04T13:50:27.972374168+03:30"
created: "2025-03-04T18:04:35.493584927+03:30"
description: Helm chart for flink kube operator
digest: b548a64ef89bbcd12d92fefffd1fd37758e8fccda02aecd97c7519a08f10fa4a
name: flink-kube-operator
@@ -113,7 +113,7 @@ entries:
version: 0.1.4
- apiVersion: v2
appVersion: 0.1.0
created: "2025-04-04T13:50:27.971952322+03:30"
created: "2025-03-04T18:04:35.493138547+03:30"
description: Helm chart for flink kube operator
digest: 05a9664f574e2d5d1cca764efb6481ad21b9176663b907973a8ef5264f15a91f
name: flink-kube-operator
@@ -123,7 +123,7 @@ entries:
version: 0.1.3
- apiVersion: v2
appVersion: 0.1.0
created: "2025-04-04T13:50:27.971461428+03:30"
created: "2025-03-04T18:04:35.492696005+03:30"
description: Helm chart for flink kube operator
digest: 89345b1a9a79aa18b646705aeb8cfdc547629600cb8a00708a3f64d188f296f2
name: flink-kube-operator
@@ -133,7 +133,7 @@ entries:
version: 0.1.2
- apiVersion: v2
appVersion: 0.1.0
created: "2025-04-04T13:50:27.968770748+03:30"
created: "2025-03-04T18:04:35.490170385+03:30"
description: Helm chart for flink kube operator
digest: 1d2af9af6b9889cc2962d627946464766f1b65b05629073b7fffb9a98cd957e2
name: flink-kube-operator
@@ -143,7 +143,7 @@ entries:
version: 0.1.1
- apiVersion: v2
appVersion: 0.1.0
created: "2025-04-04T13:50:27.968266924+03:30"
created: "2025-03-04T18:04:35.489734651+03:30"
description: Helm chart for flink kube operator
digest: 0890d955904e6a3b2155c086a933b27e45266d896fb69eaad0e811dea40414da
name: flink-kube-operator