fix: the bug of calculate thread pool size in ProcessorTracker

This commit is contained in:
tjq 2021-01-16 22:52:26 +08:00
commit dd66c8fd31
2 changed files with 5 additions and 2 deletions

View File

@ -340,13 +340,16 @@ public class ProcessorTracker {
ExecuteType executeType = ExecuteType.valueOf(instanceInfo.getExecuteType()); ExecuteType executeType = ExecuteType.valueOf(instanceInfo.getExecuteType());
ProcessorType processorType = ProcessorType.valueOf(instanceInfo.getProcessorType()); ProcessorType processorType = ProcessorType.valueOf(instanceInfo.getProcessorType());
if (executeType == ExecuteType.MAP_REDUCE) { if (executeType == ExecuteType.MAP_REDUCE || executeType == ExecuteType.MAP) {
return instanceInfo.getThreadConcurrency(); return instanceInfo.getThreadConcurrency();
} }
// 脚本类自带线程池不过为了少一点逻辑判断还是象征性分配一个线程 // 脚本类自带线程池不过为了少一点逻辑判断还是象征性分配一个线程
if (processorType == ProcessorType.PYTHON || processorType == ProcessorType.SHELL) { if (processorType == ProcessorType.PYTHON || processorType == ProcessorType.SHELL) {
return 1; return 1;
} }
if (TimeExpressionType.frequentTypes.contains(instanceInfo.getTimeExpressionType())) {
return instanceInfo.getThreadConcurrency();
}
return 2; return 2;
} }

View File

@ -173,7 +173,7 @@ public class FrequentTaskTracker extends TaskTracker {
// 判断是否超出最大执行实例数 // 判断是否超出最大执行实例数
if (maxInstanceNum > 0) { if (maxInstanceNum > 0) {
if (timeExpressionType == TimeExpressionType.FIXED_RATE) { if (timeExpressionType == TimeExpressionType.FIXED_RATE) {
if (subInstanceId2TimeHolder.size() > maxInstanceNum) { if (subInstanceId2TimeHolder.size() >= maxInstanceNum) {
log.warn("[FQTaskTracker-{}] cancel to launch the subInstance({}) due to too much subInstance is running.", instanceId, subInstanceId); log.warn("[FQTaskTracker-{}] cancel to launch the subInstance({}) due to too much subInstance is running.", instanceId, subInstanceId);
processFinishedSubInstance(subInstanceId, false, "TOO_MUCH_INSTANCE"); processFinishedSubInstance(subInstanceId, false, "TOO_MUCH_INSTANCE");
return; return;