TODO: 重新设计 均衡处理方案
This commit is contained in:
parent
f7dbbb35cb
commit
644e65603f
|
@ -97,7 +97,6 @@ public class PacketsProcessor implements RpcProcessor<PacketsProcessor.PacketsRe
|
||||||
log.debug("finsh tasks size {}, size: {}", status, request.packets.size());
|
log.debug("finsh tasks size {}, size: {}", status, request.packets.size());
|
||||||
rpcCtx.sendResponse(resp);
|
rpcCtx.sendResponse(resp);
|
||||||
}
|
}
|
||||||
|
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
|
@ -205,6 +205,7 @@ public class StateServerFactory {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void applyWorkerState(WorkerState state, SyncClosure<State> closure) {
|
public void applyWorkerState(WorkerState state, SyncClosure<State> closure) {
|
||||||
|
log.debug("applyWorkerState");
|
||||||
if (!ss.isLeader()) {
|
if (!ss.isLeader()) {
|
||||||
ss.handlerNotLeaderError(closure);
|
ss.handlerNotLeaderError(closure);
|
||||||
return;
|
return;
|
||||||
|
@ -213,24 +214,21 @@ public class StateServerFactory {
|
||||||
StateServerFactory.getStateServer().useFsmStateNotLock((fsmState)->{
|
StateServerFactory.getStateServer().useFsmStateNotLock((fsmState)->{
|
||||||
var wmap = fsmState.getWorkers();
|
var wmap = fsmState.getWorkers();
|
||||||
var wstate = wmap.get(state.getPeerId());
|
var wstate = wmap.get(state.getPeerId());
|
||||||
|
|
||||||
if(wstate == null) {
|
if(wstate == null) {
|
||||||
wmap.put(state.getPeerId(), state);
|
wmap.put(state.getPeerId(), state);
|
||||||
try {
|
}
|
||||||
final Task task = new Task();
|
|
||||||
closure.setValue(fsmState);
|
try {
|
||||||
task.setData(ByteBuffer.wrap(SerializerManager.getSerializer(SerializerManager.Hessian2).serialize(fsmState)));
|
final Task task = new Task();
|
||||||
task.setDone(closure);
|
closure.setValue(fsmState);
|
||||||
StateServerFactory.getStateServer().getNode().apply(task); // 提交数据
|
task.setData(ByteBuffer.wrap(SerializerManager.getSerializer(SerializerManager.Hessian2).serialize(fsmState)));
|
||||||
} catch (CodecException e) {
|
task.setDone(closure);
|
||||||
String errorMsg = "Fail to encode TaskState";
|
StateServerFactory.getStateServer().getNode().apply(task); // 提交数据
|
||||||
log.debug("{}:{}",errorMsg, e);
|
} catch (CodecException e) {
|
||||||
closure.failure(errorMsg, PeerId.emptyPeer());
|
String errorMsg = "Fail to encode TaskState";
|
||||||
closure.run(new Status(RaftError.EINTERNAL, errorMsg));
|
log.debug("{}:{}",errorMsg, e);
|
||||||
}
|
closure.failure(errorMsg, PeerId.emptyPeer());
|
||||||
} else {
|
closure.run(new Status(RaftError.EINTERNAL, errorMsg));
|
||||||
closure.success(fsmState);
|
|
||||||
closure.run(Status.OK());
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
|
@ -56,7 +56,7 @@ public class SyncWorkerStateProcessor implements RpcProcessor<SyncWorkerStatePro
|
||||||
@Override
|
@Override
|
||||||
public void handleRequest(RpcContext rpcCtx, RequestWorkerState request) {
|
public void handleRequest(RpcContext rpcCtx, RequestWorkerState request) {
|
||||||
|
|
||||||
log.info("request: {}", request);
|
log.info("RequestWorkerState: {}", request);
|
||||||
final SyncClosure<State> closure = new SyncClosure< State>() {
|
final SyncClosure<State> closure = new SyncClosure< State>() {
|
||||||
@Override
|
@Override
|
||||||
public void run(Status status) {
|
public void run(Status status) {
|
||||||
|
|
|
@ -7,11 +7,13 @@
|
||||||
package com.yuandian.dataflow.statemachine.state;
|
package com.yuandian.dataflow.statemachine.state;
|
||||||
|
|
||||||
import java.time.Instant;
|
import java.time.Instant;
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.concurrent.CountDownLatch;
|
import java.util.concurrent.CountDownLatch;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import com.alibaba.nacos.shaded.io.grpc.netty.shaded.io.netty.handler.codec.http.HttpContentEncoder.Result;
|
import com.alibaba.nacos.shaded.io.grpc.netty.shaded.io.netty.handler.codec.http.HttpContentEncoder.Result;
|
||||||
import com.alipay.sofa.jraft.Status;
|
import com.alipay.sofa.jraft.Status;
|
||||||
|
import com.alipay.sofa.jraft.entity.PeerId;
|
||||||
import com.alipay.sofa.jraft.error.RemotingException;
|
import com.alipay.sofa.jraft.error.RemotingException;
|
||||||
import com.alipay.sofa.jraft.rpc.InvokeCallback;
|
import com.alipay.sofa.jraft.rpc.InvokeCallback;
|
||||||
import com.google.protobuf.Any;
|
import com.google.protobuf.Any;
|
||||||
|
@ -38,6 +40,18 @@ import lombok.extern.slf4j.Slf4j;
|
||||||
@ToString
|
@ToString
|
||||||
public class StateFactory {
|
public class StateFactory {
|
||||||
|
|
||||||
|
@Getter
|
||||||
|
@Setter
|
||||||
|
@ToString
|
||||||
|
public static class PeerIdCap {
|
||||||
|
private PeerId peer;
|
||||||
|
private long cap;
|
||||||
|
public PeerIdCap(PeerId pid, long cap) {
|
||||||
|
this.peer = pid;
|
||||||
|
this.cap = cap;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public static Thread masterExecute = new Thread(new Runnable() {
|
public static Thread masterExecute = new Thread(new Runnable() {
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
|
@ -50,46 +64,24 @@ public class StateFactory {
|
||||||
|
|
||||||
var ss = StateServerFactory.getStateServer();
|
var ss = StateServerFactory.getStateServer();
|
||||||
// var state = ss.getFsm().getState();
|
// var state = ss.getFsm().getState();
|
||||||
|
|
||||||
|
ArrayList<PeerIdCap> pclist = new ArrayList<PeerIdCap>();
|
||||||
|
|
||||||
ss.useFsmStateNotLock((state) -> {
|
ss.useFsmStateNotLock((state) -> {
|
||||||
alivePeers.forEach((peer) -> {
|
alivePeers.forEach((peer) -> {
|
||||||
WorkerState ws = state.getWorkers().get(peer);
|
WorkerState ws = state.getWorkers().get(peer);
|
||||||
|
|
||||||
if (ws != null) {
|
if (ws != null) {
|
||||||
var cap = 100 - ws.getTaskQueueSize();
|
var cap = 100 - ws.getTaskQueueSize();
|
||||||
log.debug("cap :{} peer: {}", cap, peer);
|
log.debug("cap :{} peer: {}", cap, peer);
|
||||||
if (cap <= 0) {
|
if (cap <= 0) {
|
||||||
return ;
|
return ;
|
||||||
}
|
}
|
||||||
|
|
||||||
var request = new PacketsRequest();
|
|
||||||
for (int i = 0; i < cap; i++) {
|
|
||||||
var p = Any.pack(
|
|
||||||
BacktrackingFlowOuterClass.BacktrackingFlow.newBuilder()
|
|
||||||
.setTableId(10086)
|
|
||||||
.build());
|
|
||||||
request.getPackets().add(p);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
try {
|
|
||||||
log.debug("rpc {}", peer);
|
|
||||||
ss.getRpcClient().invokeAsync(peer.getEndpoint(),
|
|
||||||
request, new InvokeCallback() {
|
|
||||||
@Override
|
|
||||||
public void complete(Object result, Throwable err) {
|
|
||||||
if (err != null) {
|
|
||||||
err.printStackTrace();
|
|
||||||
} else {
|
|
||||||
log.info("{} peer result", result);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}, 5000);
|
|
||||||
} catch (InterruptedException | RemotingException e) {
|
|
||||||
e.printStackTrace();
|
|
||||||
}
|
|
||||||
ws.setUpdateAt(Instant.now());
|
ws.setUpdateAt(Instant.now());
|
||||||
ws.setTaskQueueSize(100);
|
ws.setTaskQueueSize(100);
|
||||||
|
var pc = new PeerIdCap(peer, cap);
|
||||||
|
pc.setCap(cap);
|
||||||
|
;
|
||||||
|
pclist.add(pc);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -97,6 +89,48 @@ public class StateFactory {
|
||||||
@Override
|
@Override
|
||||||
public void run(Status status) {
|
public void run(Status status) {
|
||||||
log.info("任务队列更新成功 {}", this.getValue().getWorkers());
|
log.info("任务队列更新成功 {}", this.getValue().getWorkers());
|
||||||
|
|
||||||
|
pclist.forEach((peer) -> {
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if (peer.getCap() <= 0) {
|
||||||
|
return ;
|
||||||
|
}
|
||||||
|
|
||||||
|
var request = new PacketsRequest();
|
||||||
|
for (int i = 0; i < peer.getCap(); i++) {
|
||||||
|
var p = Any.pack(
|
||||||
|
BacktrackingFlowOuterClass.BacktrackingFlow.newBuilder()
|
||||||
|
.setTableId(10086)
|
||||||
|
.build());
|
||||||
|
request.getPackets().add(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
try {
|
||||||
|
log.debug("rpc {}", peer);
|
||||||
|
ss.getRpcClient().invokeAsync(peer.peer.getEndpoint() ,
|
||||||
|
request, new InvokeCallback() {
|
||||||
|
@Override
|
||||||
|
public void complete(Object result, Throwable err) {
|
||||||
|
if (err != null) {
|
||||||
|
err.printStackTrace();
|
||||||
|
} else {
|
||||||
|
log.info("{} peer result", result);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}, 5000);
|
||||||
|
} catch (InterruptedException | RemotingException e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
});
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -113,7 +147,7 @@ public class StateFactory {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Thread.sleep(2000);
|
Thread.sleep(5000);
|
||||||
}
|
}
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
|
|
Loading…
Reference in New Issue
Block a user