fix: prevent concurrent jobs map writes by adding synchronization
This fix prevents undefined behavior and runtime crashes in multithreaded use cases.
This commit is contained in:
@@ -2,25 +2,28 @@ package crd
|
||||
|
||||
import (
|
||||
"flink-kube-operator/internal/crd/v1alpha1"
|
||||
"maps"
|
||||
"flink-kube-operator/pkg"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
var jobs = map[types.UID]*v1alpha1.FlinkJob{}
|
||||
// var jobs = map[types.UID]*v1alpha1.FlinkJob{}
|
||||
var jobs = pkg.SafeMap[types.UID, *v1alpha1.FlinkJob]{}
|
||||
|
||||
func (crd *Crd) repsert(job *v1alpha1.FlinkJob) {
|
||||
jobs[job.GetUID()] = job
|
||||
jobs.Store(job.GetUID(), job)
|
||||
}
|
||||
|
||||
func GetJob(uid types.UID) v1alpha1.FlinkJob {
|
||||
return *jobs[uid].DeepCopy()
|
||||
job, _ := jobs.Load(uid)
|
||||
return *job.DeepCopy()
|
||||
}
|
||||
|
||||
func GetAllJobKeys() []types.UID {
|
||||
keys := []types.UID{}
|
||||
for k := range maps.Keys(jobs) {
|
||||
jobs.Range(func(k types.UID, value *v1alpha1.FlinkJob) bool {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
return true
|
||||
})
|
||||
return keys
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user