2023-07-29 13:58:11 +00:00
|
|
|
package fusenrender
|
|
|
|
|
|
|
|
import (
|
2023-08-02 10:16:05 +00:00
|
|
|
"context"
|
2023-07-29 22:50:58 +00:00
|
|
|
"io"
|
2023-07-29 13:58:11 +00:00
|
|
|
"log"
|
2023-07-29 22:50:58 +00:00
|
|
|
"sync"
|
2023-07-29 13:58:11 +00:00
|
|
|
"time"
|
|
|
|
|
2023-08-01 03:14:20 +00:00
|
|
|
"github.com/474420502/execute/triggered"
|
2023-08-02 10:16:05 +00:00
|
|
|
"github.com/474420502/passer"
|
2023-07-29 13:58:11 +00:00
|
|
|
sm "github.com/lni/dragonboat/v4/statemachine"
|
|
|
|
)
|
|
|
|
|
2023-08-05 19:54:11 +00:00
|
|
|
// 结构体异步传递后, 执行的注册函数
|
2023-08-02 10:16:05 +00:00
|
|
|
var FsPasser *passer.Passer[sm.Result] = func() *passer.Passer[sm.Result] {
|
2023-07-31 09:09:23 +00:00
|
|
|
|
2023-08-02 10:16:05 +00:00
|
|
|
fsPasser := passer.NewPasser[sm.Result]()
|
|
|
|
fsPasser.RegisterPasser(&CmdEnqueue{}, func(cxt context.Context, obj any) (sm.Result, error) {
|
2023-07-29 13:58:11 +00:00
|
|
|
|
2023-08-02 10:16:05 +00:00
|
|
|
var smqueue = cxt.Value(ctxSMQueue{}).(*SMQueue)
|
2023-08-05 19:54:11 +00:00
|
|
|
var consumer = cxt.Value(ctxDequeueHandler{}).(*triggered.EventExecute[bool])
|
2023-07-29 13:58:11 +00:00
|
|
|
|
2023-08-02 10:16:05 +00:00
|
|
|
cmd := obj.(*CmdEnqueue)
|
2023-07-29 22:50:58 +00:00
|
|
|
key := cmd.Item.GetKey()
|
|
|
|
|
2023-08-02 10:16:05 +00:00
|
|
|
smqueue.mu.Lock()
|
2023-07-29 22:50:58 +00:00
|
|
|
var queue *PriorityQueue[QueueItem]
|
|
|
|
var ok bool
|
2023-08-02 10:16:05 +00:00
|
|
|
if queue, ok = smqueue.queues[cmd.Group]; !ok {
|
2023-07-29 22:50:58 +00:00
|
|
|
queue = NewPriorityQueue[QueueItem]()
|
2023-08-02 10:16:05 +00:00
|
|
|
smqueue.queues[cmd.Group] = queue
|
2023-07-29 22:50:58 +00:00
|
|
|
}
|
2023-08-02 10:16:05 +00:00
|
|
|
smqueue.mu.Unlock()
|
2023-07-29 13:58:11 +00:00
|
|
|
|
2023-07-29 22:50:58 +00:00
|
|
|
queue.Push(&Slice[QueueItem]{
|
|
|
|
Key: key,
|
|
|
|
Value: cmd.Item,
|
2023-07-29 13:58:11 +00:00
|
|
|
})
|
2023-08-02 10:16:05 +00:00
|
|
|
|
|
|
|
var result sm.Result
|
|
|
|
consumer.Notify(consumer.NULL) // 通知可以执行update
|
|
|
|
return result, nil
|
|
|
|
})
|
|
|
|
|
|
|
|
fsPasser.RegisterPasser(&CmdDequeue{}, func(cxt context.Context, obj any) (sm.Result, error) {
|
|
|
|
|
|
|
|
var smqueue = cxt.Value(ctxSMQueue{}).(*SMQueue)
|
|
|
|
var e = cxt.Value(ctxEntry{}).(*sm.Entry)
|
|
|
|
|
2023-07-29 22:50:58 +00:00
|
|
|
var queue *PriorityQueue[QueueItem]
|
|
|
|
var ok bool
|
2023-08-02 10:16:05 +00:00
|
|
|
cmd := obj.(*CmdDequeue)
|
|
|
|
smqueue.mu.Lock()
|
|
|
|
if queue, ok = smqueue.queues[cmd.Group]; !ok {
|
2023-07-29 22:50:58 +00:00
|
|
|
queue = NewPriorityQueue[QueueItem]()
|
2023-08-02 10:16:05 +00:00
|
|
|
smqueue.queues[cmd.Group] = queue
|
2023-07-29 22:50:58 +00:00
|
|
|
}
|
2023-08-02 10:16:05 +00:00
|
|
|
smqueue.mu.Unlock()
|
2023-07-31 09:09:23 +00:00
|
|
|
|
2023-08-02 10:16:05 +00:00
|
|
|
var result sm.Result
|
2023-07-31 09:09:23 +00:00
|
|
|
if queue.Empty() {
|
|
|
|
return result, nil
|
|
|
|
}
|
2023-07-29 13:58:11 +00:00
|
|
|
|
2023-07-29 22:50:58 +00:00
|
|
|
item := queue.Pop()
|
2023-07-31 09:09:23 +00:00
|
|
|
if item == nil {
|
|
|
|
result.Value = 0
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2023-07-29 22:50:58 +00:00
|
|
|
if item != nil {
|
|
|
|
d, err := item.Encode()
|
2023-07-29 13:58:11 +00:00
|
|
|
if err != nil {
|
2023-07-29 22:50:58 +00:00
|
|
|
return result, err
|
2023-07-29 13:58:11 +00:00
|
|
|
}
|
2023-07-29 22:50:58 +00:00
|
|
|
e.Result.Data = d
|
|
|
|
result.Data = d
|
2023-07-31 09:09:23 +00:00
|
|
|
queue.Empty()
|
|
|
|
size := queue.Size()
|
2023-08-01 03:14:20 +00:00
|
|
|
|
2023-08-02 10:16:05 +00:00
|
|
|
smqueue.counter.Notify(size)
|
2023-07-31 09:09:23 +00:00
|
|
|
// log.Println("queue remain:", queue.Size())
|
2023-07-29 22:50:58 +00:00
|
|
|
}
|
2023-07-31 09:09:23 +00:00
|
|
|
|
2023-08-02 10:16:05 +00:00
|
|
|
return result, nil
|
|
|
|
})
|
|
|
|
|
|
|
|
return fsPasser
|
|
|
|
}()
|
|
|
|
|
|
|
|
// SMQueue实现了StateMachine接口,用于管理基于PriorityQueue的消息队列
|
|
|
|
type SMQueue struct {
|
|
|
|
// 所属的Shard ID
|
|
|
|
shardID uint64
|
|
|
|
// Replica ID
|
|
|
|
replicaID uint64
|
|
|
|
|
|
|
|
// 互斥锁,保护队列Map的并发访问
|
|
|
|
mu *sync.Mutex
|
|
|
|
// 组名到队列的映射
|
|
|
|
queues map[string]*PriorityQueue[QueueItem]
|
|
|
|
|
|
|
|
counter *triggered.EventExecute[int64]
|
|
|
|
}
|
|
|
|
|
|
|
|
// // NewSMQueue creates and return a new ExampleStateMachine object.
|
|
|
|
func NewSMQueue(shardID uint64, replicaID uint64) sm.IStateMachine {
|
|
|
|
mu := &sync.Mutex{}
|
|
|
|
return &SMQueue{
|
|
|
|
shardID: shardID,
|
|
|
|
replicaID: replicaID,
|
|
|
|
|
|
|
|
mu: mu,
|
|
|
|
queues: make(map[string]*PriorityQueue[QueueItem]),
|
|
|
|
|
|
|
|
counter: triggered.RegisterExecute[int64](func(params *triggered.Params[int64]) {
|
2023-08-05 19:54:11 +00:00
|
|
|
if params.Value != 0 {
|
|
|
|
log.Printf("queue remain: %d\n", params.Value)
|
|
|
|
time.Sleep(time.Second * 5)
|
|
|
|
} else {
|
|
|
|
time.Sleep(time.Second * 15)
|
|
|
|
}
|
2023-08-02 10:16:05 +00:00
|
|
|
}),
|
2023-07-29 13:58:11 +00:00
|
|
|
}
|
2023-08-02 10:16:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup performs local lookup on the ExampleStateMachine instance. In this example,
|
|
|
|
// we always return the Count value as a little endian binary encoded byte
|
|
|
|
// slice.
|
|
|
|
func (s *SMQueue) Lookup(group interface{}) (item interface{}, err error) {
|
|
|
|
|
|
|
|
return item, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type ctxEntry struct{}
|
|
|
|
type ctxSMQueue struct{}
|
2023-08-05 19:54:11 +00:00
|
|
|
type ctxDequeueHandler struct{}
|
2023-08-02 10:16:05 +00:00
|
|
|
|
|
|
|
// Update处理Entry中的更新命令
|
|
|
|
// Update updates the object using the specified committed raft entry.
|
|
|
|
func (s *SMQueue) Update(e sm.Entry) (result sm.Result, err error) {
|
|
|
|
ctx := context.TODO()
|
2023-07-29 13:58:11 +00:00
|
|
|
|
2023-08-02 10:16:05 +00:00
|
|
|
ctx = context.WithValue(ctx, ctxEntry{}, &e)
|
|
|
|
ctx = context.WithValue(ctx, ctxSMQueue{}, s)
|
2023-08-05 19:54:11 +00:00
|
|
|
ctx = context.WithValue(ctx, ctxDequeueHandler{}, DequeueHandler)
|
2023-08-02 10:16:05 +00:00
|
|
|
return FsPasser.ExecuteWithBytes(ctx, e.Cmd)
|
2023-07-29 13:58:11 +00:00
|
|
|
}
|
|
|
|
|
2023-07-29 22:50:58 +00:00
|
|
|
// SaveSnapshot saves the current IStateMachine state into a snapshot using the
|
|
|
|
// specified io.Writer object.
|
|
|
|
func (s *SMQueue) SaveSnapshot(w io.Writer,
|
|
|
|
fc sm.ISnapshotFileCollection, done <-chan struct{}) error {
|
|
|
|
// as shown above, the only state that can be saved is the Count variable
|
|
|
|
// there is no external file in this IStateMachine example, we thus leave
|
|
|
|
// the fc untouched
|
|
|
|
|
2023-08-04 10:59:53 +00:00
|
|
|
// s.mu.Lock()
|
|
|
|
// defer s.mu.Unlock()
|
2023-07-29 22:50:58 +00:00
|
|
|
|
2023-08-04 10:59:53 +00:00
|
|
|
// return gob.NewEncoder(w).Encode(&s.queues)
|
|
|
|
return nil
|
2023-07-29 22:50:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// RecoverFromSnapshot recovers the state using the provided snapshot.
|
|
|
|
func (s *SMQueue) RecoverFromSnapshot(r io.Reader,
|
|
|
|
files []sm.SnapshotFile,
|
|
|
|
done <-chan struct{}) error {
|
|
|
|
// restore the Count variable, that is the only state we maintain in this
|
|
|
|
// example, the input files is expected to be empty
|
|
|
|
|
2023-08-04 10:59:53 +00:00
|
|
|
// err := gob.NewDecoder(r).Decode(&s.queues)
|
|
|
|
// if err != nil {
|
|
|
|
// return err
|
|
|
|
// }
|
2023-07-29 22:50:58 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close closes the IStateMachine instance. There is nothing for us to cleanup
|
|
|
|
// or release as this is a pure in memory data store. Note that the Close
|
|
|
|
// method is not guaranteed to be called as node can crash at any time.
|
|
|
|
func (s *SMQueue) Close() error { return nil }
|