feat: add manager

This commit is contained in:
2024-11-30 23:36:44 +03:30
parent d8b87ac6ee
commit 6bd197b812
14 changed files with 109 additions and 67 deletions

View File

@@ -1,15 +1 @@
package config
import "time"
type JobDef struct {
Key string `yaml:"key"`
Name string `yaml:"name"`
EntryClass string `yaml:"entryClass"`
JarURI string `yaml:"jarURI"`
SavepointInterval time.Duration `yaml:"savepointInterval"`
}
type Config struct {
Jobs []JobDef `yaml:"jobs"`
}

26
internal/crd/repo.go Normal file
View File

@@ -0,0 +1,26 @@
package crd
import (
"flink-kube-operator/internal/crd/v1alpha1"
"maps"
"k8s.io/apimachinery/pkg/types"
)
var jobs = map[types.UID]*v1alpha1.FlinkJob{}
func (crd Crd) repsert(job *v1alpha1.FlinkJob) {
jobs[job.GetUID()] = job
}
func GetJob(uid types.UID) v1alpha1.FlinkJob {
return *jobs[uid].DeepCopy()
}
func GetAllJobKeys() []types.UID {
keys := []types.UID{}
for k := range maps.Keys(jobs) {
keys = append(keys, k)
}
return keys
}

View File

@@ -40,25 +40,13 @@ func (crd Crd) watchFlinkJobs() {
case watch.Modified:
fmt.Printf("FlinkJob updated: %s\n", job.GetName())
// Handle the new FlinkJob
handleNewFlinkJob(job)
crd.repsert(job)
case watch.Added:
fmt.Printf("New FlinkJob created: %s\n", job.GetName())
// Handle the new FlinkJob
handleNewFlinkJob(job)
crd.repsert(job)
case watch.Deleted:
}
}
}
func handleNewFlinkJob(job *v1alpha1.FlinkJob) {
// Extract job details
name := job.GetName()
namespace := job.GetNamespace()
// Process job specification
fmt.Printf("Processing FlinkJob %s in namespace %s kind: %s \n", name, namespace, job.Kind)
lc.Logger.Debug("[crd] [watch]", zap.Any("spec", job), zap.Any("name", job.Spec.Name))
// Add your custom logic here
}

View File

@@ -29,7 +29,7 @@ func NewJarFile(URI string) (*JarFile, error) {
return jarFile, nil
}
func (JarFile JarFile) Upload(flinkClient *api.Client) (fileName string, err error) {
func (JarFile *JarFile) Upload(flinkClient *api.Client) (fileName string, err error) {
resp, err := flinkClient.UploadJar(JarFile.filePath)
if err != nil {
@@ -65,6 +65,6 @@ func (jarFile *JarFile) Download() error {
return nil
}
func (jarFile JarFile) Delete() error {
func (jarFile *JarFile) Delete() error {
return os.Remove(jarFile.filePath)
}

View File

@@ -28,7 +28,7 @@ func (job *ManagedJob) startCycle() {
}
func (job *ManagedJob) cycle() {
lc.Logger.Debug("[managed-job] [new] check cycle", zap.String("jobKey", job.def.Key))
lc.Logger.Debug("[managed-job] [new] check cycle", zap.String("jobKey", string(job.def.UID)))
// Init job
if job.state == nil {
@@ -58,7 +58,7 @@ func (job *ManagedJob) cycle() {
if errors.Is(err, ErrNoJobId) {
job.state = nil
}
if job.state.LastSavepointDate == nil || time.Now().Add(-job.def.SavepointInterval).After(*job.state.LastSavepointDate) {
if job.state.LastSavepointDate == nil || time.Now().Add(-job.def.Spec.SavepointInterval.Duration).After(*job.state.LastSavepointDate) {
if job.state.SavepointTriggerId == nil {
job.createSavepoint()
} else {

View File

@@ -1,21 +1,21 @@
package managed_job
import (
"flink-kube-operator/internal/config"
"flink-kube-operator/internal/crd/v1alpha1"
api "github.com/logi-camp/go-flink-client"
"github.com/tidwall/buntdb"
)
type ManagedJob struct {
def config.JobDef
def v1alpha1.FlinkJob
client *api.Client
jarId string
db *buntdb.DB
state *jobState
}
func NewManagedJob(client *api.Client, db *buntdb.DB, def config.JobDef) *ManagedJob {
func NewManagedJob(client *api.Client, db *buntdb.DB, def v1alpha1.FlinkJob) *ManagedJob {
job := &ManagedJob{
def: def,
client: client,

View File

@@ -10,7 +10,7 @@ import (
// upload jar file and set the jarId for later usages
func (job *ManagedJob) upload() error {
jarFile, err := jar.NewJarFile(job.def.JarURI)
jarFile, err := jar.NewJarFile(job.def.Spec.JarURI)
if err != nil {
lc.Logger.Debug("[main] error on download jar", zap.Error(err))
return err
@@ -32,7 +32,7 @@ func (job *ManagedJob) run() error {
runJarResp, err := job.client.RunJar(api.RunOpts{
JarID: job.jarId,
AllowNonRestoredState: true,
EntryClass: job.def.EntryClass,
EntryClass: job.def.Spec.EntryClass,
})
if err != nil {
lc.Logger.Error("[managed-job] [run]", zap.Error(err))

View File

@@ -13,7 +13,7 @@ import (
func (job *ManagedJob) loadState() {
err := job.db.View(
func(tx *buntdb.Tx) error {
if val, err := tx.Get(job.def.Key); err != nil {
if val, err := tx.Get(string(job.def.GetUID())); err != nil {
return err
} else {
return json.Unmarshal([]byte(val), job.state)
@@ -30,15 +30,15 @@ func (job *ManagedJob) updateState(state jobState) {
value, _ := json.Marshal(job.state)
job.db.Update(func(tx *buntdb.Tx) error {
_, _, err := tx.Set(job.def.Key, string(value), nil)
if err != nil {
return err
}
return tx.Commit()
_, _, err := tx.Set(string(job.def.GetUID()), string(value), nil)
return err
})
}
func (job *ManagedJob) setError(errMsg string) {
if job.state == nil {
job.state = &jobState{}
}
job.state.Error = &errMsg
job.state.Status = JobStatusError
job.updateState(*job.state)

View File

@@ -0,0 +1,42 @@
package manager
import (
"flink-kube-operator/internal/crd"
"flink-kube-operator/internal/managed_job"
"time"
api "github.com/logi-camp/go-flink-client"
"github.com/tidwall/buntdb"
"k8s.io/apimachinery/pkg/types"
)
var managedJobs = map[types.UID]managed_job.ManagedJob{}
func Setup(client *api.Client, db *buntdb.DB) {
ticker := time.NewTicker(5 * time.Second)
quit := make(chan struct{})
go func() {
for {
select {
case <-ticker.C:
cycle(client, db)
case <-quit:
ticker.Stop()
return
}
}
}()
}
func cycle(client *api.Client, db *buntdb.DB) {
for _, uid := range crd.GetAllJobKeys() {
job := crd.GetJob(uid)
_, ok := managedJobs[uid]
if ok {
} else {
managed_job.NewManagedJob(client, db, job)
}
}
}