#603 Other files formatted

This commit is contained in:
nkorange 2019-01-10 18:53:18 +08:00
parent 7307108ca7
commit 791b7c3739
30 changed files with 298 additions and 369 deletions

View File

@ -42,8 +42,8 @@ public class RunningConfig implements ApplicationListener<WebServerInitializedEv
@Override
public void onApplicationEvent(WebServerInitializedEvent event) {
Loggers.SRV_LOG.info("[SERVER-INIT] got port:" + event.getWebServer().getPort());
Loggers.SRV_LOG.info("[SERVER-INIT] got path:" + servletContext.getContextPath());
Loggers.SRV_LOG.info("[SERVER-INIT] got port: {}", event.getWebServer().getPort());
Loggers.SRV_LOG.info("[SERVER-INIT] got path: {}", servletContext.getContextPath());
serverPort = event.getWebServer().getPort();
contextPath = servletContext.getContextPath();
@ -51,7 +51,7 @@ public class RunningConfig implements ApplicationListener<WebServerInitializedEv
try {
RaftCore.init();
} catch (Exception e) {
Loggers.RAFT.error("VIPSRV-RAFT", "failed to initialize raft sub system", e);
Loggers.RAFT.error("[NACOS-RAFT] {} {}", "failed to initialize raft sub system", e);
}
}

View File

@ -65,11 +65,9 @@ public class ClusterController {
Cluster cluster = domain.getClusterMap().get(clusterName);
if (cluster == null) {
Loggers.SRV_LOG.warn("UPDATE-CLUSTER", "cluster not exist, will create it: " + clusterName + ", service:" + serviceName);
Loggers.SRV_LOG.warn("[UPDATE-CLUSTER] cluster not exist, will create it: {}, service: {}", clusterName, serviceName);
cluster = new Cluster();
cluster.setName(clusterName);
// throw new NacosException(NacosException.INVALID_PARAM, "cluster not found:"+ clusterName + ", " + serviceName);
}
cluster.setDefCkport(NumberUtils.toInt(checkPort));

View File

@ -187,24 +187,22 @@ public class Cluster extends com.alibaba.nacos.api.naming.pojo.Cluster implement
} else {
if (ip.isValid() != oldIP.isValid()) {
// ip validation status updated
Loggers.EVT_LOG.info("{" + getDom().getName() + "} {SYNC} " +
"{IP-" + (ip.isValid() ? "ENABLED" : "DISABLED") + "} " + ip.getIp()
+ ":" + ip.getPort() + "@" + getName());
Loggers.EVT_LOG.info("{} {SYNC} IP-{} {}:{}@{}",
getDom().getName(), (ip.isValid() ? "ENABLED" : "DISABLED"), ip.getIp(), ip.getPort(), getName());
}
}
if (ip.getWeight() != oldIP.getWeight()) {
// ip validation status updated
Loggers.EVT_LOG.info("{" + getDom().getName() + "} {SYNC} " +
"{IP-UPDATED} " + oldIP.toString() + "->" + ip.toString());
Loggers.EVT_LOG.info("{} {SYNC} {IP-UPDATED} {}->{}", getDom().getName(), oldIP.toString(), ip.toString());
}
}
}
List<IpAddress> newIPs = subtract(ips, oldIPMap.values());
if (newIPs.size() > 0) {
Loggers.EVT_LOG.info("{" + getDom().getName() + "} {SYNC} {IP-NEW} cluster: " + getName()
+ ", new ips(" + newIPs.size() + "): " + newIPs.toString());
Loggers.EVT_LOG.info("{} {SYNC} {IP-NEW} cluster: {}, new ips size: {}, content: {}",
getDom().getName(), getName(), newIPs.size(), newIPs.toString());
for (IpAddress ip : newIPs) {
HealthCheckStatus.reset(ip);
@ -214,8 +212,8 @@ public class Cluster extends com.alibaba.nacos.api.naming.pojo.Cluster implement
List<IpAddress> deadIPs = subtract(oldIPMap.values(), ips);
if (deadIPs.size() > 0) {
Loggers.EVT_LOG.info("{" + getDom().getName() + "} {SYNC} {IP-DEAD} cluster: " + getName()
+ ", dead ips(" + deadIPs.size() + "): " + deadIPs.toString());
Loggers.EVT_LOG.info("{} {SYNC} {IP-DEAD} cluster: {}, dead ips size: {}, content: {}",
getDom().getName(), getName(), deadIPs.size(), deadIPs.toString());
for (IpAddress ip : deadIPs) {
HealthCheckStatus.remv(ip);
@ -333,32 +331,38 @@ public class Cluster extends com.alibaba.nacos.api.naming.pojo.Cluster implement
public void update(Cluster cluster) {
if (!healthChecker.equals(cluster.getHealthChecker())) {
Loggers.SRV_LOG.info("[CLUSTER-UPDATE] " + cluster.getDom().getName() + ":" + cluster.getName() + ", healthChecker: " + healthChecker.toString() + " -> " + cluster.getHealthChecker().toString());
Loggers.SRV_LOG.info("[CLUSTER-UPDATE] {}:{}:, healthChecker: {} -> {}",
cluster.getDom().getName(), cluster.getName(), healthChecker.toString(), cluster.getHealthChecker().toString());
healthChecker = cluster.getHealthChecker();
}
if (defCkport != cluster.getDefCkport()) {
Loggers.SRV_LOG.info("[CLUSTER-UPDATE] " + cluster.getDom().getName() + ":" + cluster.getName() + ", defCkport: " + defCkport + " -> " + cluster.getDefCkport());
Loggers.SRV_LOG.info("[CLUSTER-UPDATE] {}:{}, defCkport: {} -> {}",
cluster.getDom().getName(), cluster.getName(), defCkport, cluster.getDefCkport());
defCkport = cluster.getDefCkport();
}
if (defIPPort != cluster.getDefIPPort()) {
Loggers.SRV_LOG.info("[CLUSTER-UPDATE] " + cluster.getDom().getName() + ":" + cluster.getName() + ", defIPPort: " + defIPPort + " -> " + cluster.getDefIPPort());
Loggers.SRV_LOG.info("[CLUSTER-UPDATE] {}:{}, defIPPort: {} -> {}",
cluster.getDom().getName(), cluster.getName(), defIPPort, cluster.getDefIPPort());
defIPPort = cluster.getDefIPPort();
}
if (!StringUtils.equals(submask, cluster.getSubmask())) {
Loggers.SRV_LOG.info("[CLUSTER-UPDATE] " + cluster.getDom().getName() + ":" + cluster.getName() + ", submask: " + submask + " -> " + cluster.getSubmask());
Loggers.SRV_LOG.info("[CLUSTER-UPDATE] {}:{}, submask: {} -> {}",
cluster.getDom().getName(), cluster.getName(), submask, cluster.getSubmask());
submask = cluster.getSubmask();
}
if (!StringUtils.equals(sitegroup, cluster.getSitegroup())) {
Loggers.SRV_LOG.info("[CLUSTER-UPDATE] " + cluster.getDom().getName() + ":" + cluster.getName() + ", sitegroup: " + sitegroup + " -> " + cluster.getSitegroup());
Loggers.SRV_LOG.info("[CLUSTER-UPDATE] {}:{}, sitegroup: {} -> {}",
cluster.getDom().getName(), cluster.getName(), sitegroup, cluster.getSitegroup());
sitegroup = cluster.getSitegroup();
}
if (isUseIPPort4Check() != cluster.isUseIPPort4Check()) {
Loggers.SRV_LOG.info("[CLUSTER-UPDATE] " + cluster.getDom().getName() + ":" + cluster.getName() + ", useIPPort4Check: " + isUseIPPort4Check() + " -> " + cluster.isUseIPPort4Check());
Loggers.SRV_LOG.info("[CLUSTER-UPDATE] {}:{}, useIPPort4Check: {} -> {}",
cluster.getDom().getName(), cluster.getName(), isUseIPPort4Check(), cluster.isUseIPPort4Check());
setUseIPPort4Check(cluster.isUseIPPort4Check());
}
@ -387,9 +391,9 @@ public class Cluster extends com.alibaba.nacos.api.naming.pojo.Cluster implement
public boolean responsible(IpAddress ip) {
return Switch.isHealthCheckEnabled(dom.getName())
&& !getHealthCheckTask().isCancelled()
&& DistroMapper.responsible(getDom().getName())
&& ipContains.containsKey(ip.toIPAddr());
&& !getHealthCheckTask().isCancelled()
&& DistroMapper.responsible(getDom().getName())
&& ipContains.containsKey(ip.toIPAddr());
}
public void valid() {

View File

@ -21,8 +21,6 @@ import com.alibaba.nacos.naming.misc.*;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.commons.collections.CollectionUtils;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
@ -61,7 +59,7 @@ public class DistroMapper {
init();
UtilsAndCommons.SERVER_STATUS_EXECUTOR.schedule(new ServerStatusReporter(),
60000, TimeUnit.MILLISECONDS);
60000, TimeUnit.MILLISECONDS);
}
/**
@ -110,7 +108,7 @@ public class DistroMapper {
// site:ip:lastReportTime:weight
String[] params = config.split("#");
if (params.length <= 3) {
Loggers.SRV_LOG.warn("received malformed distro map data: " + config);
Loggers.SRV_LOG.warn("received malformed distro map data: {}", config);
continue;
}
@ -159,10 +157,10 @@ public class DistroMapper {
float curRatio = (float) newHealthyList.size() / allSiteSrvs.size();
if (AUTO_DISABLED_HEALTH_CHECK
&& curRatio > Switch.getDistroThreshold()
&& System.currentTimeMillis() - LAST_HEALTH_SERVER_MILLIS > STABLE_PERIOD) {
&& curRatio > Switch.getDistroThreshold()
&& System.currentTimeMillis() - LAST_HEALTH_SERVER_MILLIS > STABLE_PERIOD) {
Loggers.SRV_LOG.info("[VIPSRV-DISTRO] distro threshold restored and " +
"stable now, enable health check. current ratio: " + curRatio);
"stable now, enable health check. current ratio: {}", curRatio);
Switch.setHeathCheckEnabled(true);
@ -174,7 +172,8 @@ public class DistroMapper {
// for every change disable healthy check for some while
if (Switch.isHealthCheckEnabled()) {
Loggers.SRV_LOG.info("[VIPSRV-DISTRO] healthy server list changed, " +
"disable health check for " + STABLE_PERIOD + "ms from now on, healthList: " + healthyList + ",newHealthyList " + newHealthyList);
"disable health check for {} ms from now on, healthList: {}, newHealthyList {}",
STABLE_PERIOD, healthyList, newHealthyList);
Switch.setHeathCheckEnabled(false);
AUTO_DISABLED_HEALTH_CHECK = true;
@ -200,7 +199,7 @@ public class DistroMapper {
// site:ip:lastReportTime:weight
String[] params = config.split("#");
if (params.length <= 3) {
Loggers.SRV_LOG.warn("received malformed distro map data: " + config);
Loggers.SRV_LOG.warn("received malformed distro map data: {}", config);
continue;
}
@ -232,8 +231,8 @@ public class DistroMapper {
if (serverId.equals(newServerId)) {
if (s.alive != server.alive || s.weight != server.weight) {
Loggers.SRV_LOG.warn("server beat out of date, current: " + JSON.toJSONString(server)
+ ", last: " + JSON.toJSONString(s));
Loggers.SRV_LOG.warn("server beat out of date, current: {}, last: {}",
JSON.toJSONString(server), JSON.toJSONString(s));
}
tmpServerList.add(server);
continue;
@ -281,10 +280,10 @@ public class DistroMapper {
float curRatio = (float) newHealthyList.size() / allLocalSiteSrvs.size();
if (AUTO_DISABLED_HEALTH_CHECK
&& curRatio > Switch.getDistroThreshold()
&& System.currentTimeMillis() - LAST_HEALTH_SERVER_MILLIS > STABLE_PERIOD) {
&& curRatio > Switch.getDistroThreshold()
&& System.currentTimeMillis() - LAST_HEALTH_SERVER_MILLIS > STABLE_PERIOD) {
Loggers.SRV_LOG.info("[VIPSRV-DISTRO] distro threshold restored and " +
"stable now, enable health check. current ratio: " + curRatio);
"stable now, enable health check. current ratio: {}", curRatio);
Switch.setHeathCheckEnabled(true);
@ -296,7 +295,7 @@ public class DistroMapper {
// for every change disable healthy check for some while
if (Switch.isHealthCheckEnabled()) {
Loggers.SRV_LOG.info("[VIPSRV-DISTRO] healthy server list changed, " +
"disable health check for " + STABLE_PERIOD + "ms from now on");
"disable health check for {} ms from now on", STABLE_PERIOD);
Switch.setHeathCheckEnabled(false);
AUTO_DISABLED_HEALTH_CHECK = true;
@ -402,7 +401,7 @@ public class DistroMapper {
try {
NamingProxy.reqAPI("distroStatus", params, serverIP, false);
} catch (Exception e) {
Loggers.SRV_LOG.warn("DISTRO-STATUS-CLEAN", "Failed to request to clean server status to " + serverIP, e);
Loggers.SRV_LOG.warn("[DISTRO-STATUS-CLEAN] Failed to request to clean server status to " + serverIP, e);
}
}
@ -488,7 +487,7 @@ public class DistroMapper {
}
if (!allServers.contains(localhostIP)) {
Loggers.SRV_LOG.error("NA", "local ip is not in serverlist, ip: " + localhostIP + ", serverlist: " + allServers);
Loggers.SRV_LOG.error("local ip is not in serverlist, ip: {}, serverlist: {}", localhostIP, allServers);
return;
}
@ -500,7 +499,7 @@ public class DistroMapper {
}
}
} catch (Exception e) {
Loggers.SRV_LOG.error("SERVER-STATUS", "Exception while sending server status: ", e);
Loggers.SRV_LOG.error("[SERVER-STATUS] Exception while sending server status", e);
} finally {
UtilsAndCommons.SERVER_STATUS_EXECUTOR.schedule(this, Switch.getServerStatusSynchronizationPeriodMillis(), TimeUnit.MILLISECONDS);
}

View File

@ -93,18 +93,18 @@ public class DomainsManager {
try {
TimeUnit.SECONDS.sleep(5);
} catch (InterruptedException e) {
Loggers.SRV_LOG.error("AUTO-INIT", "failed to auto init", e);
Loggers.SRV_LOG.error("[AUTO-INIT] failed to auto init", e);
}
try {
leader = RaftCore.getPeerSet().getLeader();
if (leader != null) {
Loggers.SRV_LOG.info("[AUTO-INIT] leader is: " + leader.ip);
Loggers.SRV_LOG.info("[AUTO-INIT] leader is: {}", leader.ip);
break;
}
} catch (Throwable throwable) {
Loggers.SRV_LOG.error("AUTO-INIT", "failed to auto init", throwable);
Loggers.SRV_LOG.error("[AUTO-INIT] failed to auto init", throwable);
}
}
@ -118,7 +118,7 @@ public class DomainsManager {
} catch (Exception e) {
toBeUpdatedDomsQueue.poll();
toBeUpdatedDomsQueue.add(new DomainKey(namespaceId, domName, serverIP, checksum));
Loggers.SRV_LOG.error("DOMAIN-STATUS", "Failed to add domain to be updatd to queue.", e);
Loggers.SRV_LOG.error("[DOMAIN-STATUS] Failed to add domain to be updatd to queue.", e);
} finally {
lock.unlock();
}
@ -139,7 +139,7 @@ public class DomainsManager {
try {
domainKey = toBeUpdatedDomsQueue.take();
} catch (Exception e) {
Loggers.EVT_LOG.error("UPDATE-DOMAIN", "Exception while taking item from LinkedBlockingDeque.");
Loggers.EVT_LOG.error("[UPDATE-DOMAIN] Exception while taking item from LinkedBlockingDeque.");
}
if (domainKey == null) {
@ -152,7 +152,7 @@ public class DomainsManager {
domainUpdateExecutor.execute(new DomUpdater(domainKey.getNamespaceId(), domName, serverIP));
}
} catch (Exception e) {
Loggers.EVT_LOG.error("UPDATE-DOMAIN", "Exception while update dom: " + domName + "from " + serverIP, e);
Loggers.EVT_LOG.error("[UPDATE-DOMAIN] Exception while update dom: {} from {}, error: {}", domName, serverIP, e);
}
}
}
@ -174,7 +174,8 @@ public class DomainsManager {
try {
updatedDom2(namespaceId, domName, serverIP);
} catch (Exception e) {
Loggers.SRV_LOG.warn("DOMAIN-UPDATER", "Exception while update dom: " + domName + "from " + serverIP, e);
Loggers.SRV_LOG.warn("[DOMAIN-UPDATER] Exception while update dom: {} from {}, error: {}",
domName, serverIP, e);
}
}
}
@ -204,9 +205,9 @@ public class DomainsManager {
Boolean valid = Boolean.parseBoolean(ipsMap.get(ipAddress.toIPAddr()));
if (valid != ipAddress.isValid()) {
ipAddress.setValid(valid);
Loggers.EVT_LOG.info("{" + domName + "} {SYNC} " +
"{IP-" + (ipAddress.isValid() ? "ENABLED" : "DISABLED") + "} " + ipAddress.getIp()
+ ":" + ipAddress.getPort() + "@" + ipAddress.getClusterName());
Loggers.EVT_LOG.info("{} {SYNC} IP-{} : {}@{}",
domName, (ipAddress.isValid() ? "ENABLED" : "DISABLED"),
ipAddress.getIp(), ipAddress.getPort(), ipAddress.getClusterName());
}
}
@ -217,7 +218,7 @@ public class DomainsManager {
stringBuilder.append(ipAddress.toIPAddr()).append("_").append(ipAddress.isValid()).append(",");
}
Loggers.EVT_LOG.info("[IP-UPDATED] dom: " + raftVirtualClusterDomain.getName() + ", ips: " + stringBuilder.toString());
Loggers.EVT_LOG.info("[IP-UPDATED] dom: {}, ips: {}", raftVirtualClusterDomain.getName(), stringBuilder.toString());
}
@ -257,7 +258,7 @@ public class DomainsManager {
for (String namespaceId : serviceMap.keySet()) {
for (Map.Entry<String, Domain> entry : serviceMap.get(namespaceId).entrySet()) {
if (DistroMapper.responsible(entry.getKey())) {
domCount ++;
domCount++;
}
}
}
@ -329,8 +330,8 @@ public class DomainsManager {
Cluster cluster = new Cluster(ipAddress.getClusterName());
cluster.setDom(dom);
dom.getClusterMap().put(ipAddress.getClusterName(), cluster);
Loggers.SRV_LOG.warn("cluster: " + ipAddress.getClusterName() + " not found, ip: " + ipAddress.toJSON()
+ ", will create new cluster with default configuration.");
Loggers.SRV_LOG.warn("cluster: {} not found, ip: {}, will create new cluster with default configuration.",
ipAddress.getClusterName(), ipAddress.toJSON());
}
ipAddressMap.put(ipAddress.getDatumKey(), ipAddress);
@ -374,7 +375,7 @@ public class DomainsManager {
}
}
} catch (Throwable throwable) {
Loggers.RAFT.error("NA", "error while processing json: " + oldJson, throwable);
Loggers.RAFT.error("error while processing json: " + oldJson, throwable);
} finally {
if (ipAddresses == null) {
ipAddresses = new ArrayList<>();
@ -517,7 +518,8 @@ public class DomainsManager {
public void addItem(String domName, String checksum) {
if (StringUtils.isEmpty(domName) || StringUtils.isEmpty(checksum)) {
Loggers.SRV_LOG.warn("DOMAIN-CHECKSUM", "domName or checksum is empty,domName: " + domName + " checksum: " + checksum);
Loggers.SRV_LOG.warn("[DOMAIN-CHECKSUM] domName or checksum is empty,domName: {}, checksum: {}",
domName, checksum);
return;
}
@ -531,7 +533,6 @@ public class DomainsManager {
public void run() {
try {
Map<String, Set<String>> allDomainNames = getAllDomNames();
if (allDomainNames.size() <= 0) {
@ -577,7 +578,7 @@ public class DomainsManager {
}
}
} catch (Exception e) {
Loggers.SRV_LOG.error("DOMAIN-STATUS", "Exception while sending domain status: ", e);
Loggers.SRV_LOG.error("[DOMAIN-STATUS] Exception while sending domain status", e);
} finally {
UtilsAndCommons.DOMAIN_SYNCHRONIZATION_EXECUTOR.schedule(this, Switch.getDomStatusSynchronizationPeriodMillis(), TimeUnit.MILLISECONDS);
}
@ -623,7 +624,7 @@ public class DomainsManager {
public void onChange(String key, String value) throws Exception {
try {
if (StringUtils.isEmpty(value)) {
Loggers.SRV_LOG.warn("received empty push from raft, key=" + key);
Loggers.SRV_LOG.warn("received empty push from raft, key: {}", key);
return;
}
@ -636,7 +637,7 @@ public class DomainsManager {
dom.setNamespaceId(UtilsAndCommons.getDefaultNamespaceId());
}
Loggers.RAFT.info("[RAFT-NOTIFIER] datum is changed, key:" + key + ", value:" + value);
Loggers.RAFT.info("[RAFT-NOTIFIER] datum is changed, key: {}, value: {}", key, value);
Domain oldDom = getDomain(dom.getNamespaceId(), dom.getName());
@ -648,13 +649,13 @@ public class DomainsManager {
putDomain(dom);
dom.init();
Loggers.SRV_LOG.info("[NEW-DOM-raft] " + dom.toJSON());
Loggers.SRV_LOG.info("[NEW-DOM-RAFT] {}", dom.toJSON());
}
wakeUp(UtilsAndCommons.assembleFullServiceName(dom.getNamespaceId(), dom.getName()));
} catch (Throwable e) {
Loggers.SRV_LOG.error("VIPSRV-DOM", "error while processing dom update", e);
Loggers.SRV_LOG.error("[NACOS-DOM] error while processing dom update", e);
}
}
@ -664,11 +665,11 @@ public class DomainsManager {
String namespace = domKey.split(UtilsAndCommons.SERVICE_GROUP_CONNECTOR)[0];
String name = domKey.split(UtilsAndCommons.SERVICE_GROUP_CONNECTOR)[1];
Domain dom = chooseDomMap(namespace).remove(name);
Loggers.RAFT.info("[RAFT-NOTIFIER] datum is deleted, key:" + key + ", value:" + value);
Loggers.RAFT.info("[RAFT-NOTIFIER] datum is deleted, key: {}, value: {}", key, value);
if (dom != null) {
dom.destroy();
Loggers.SRV_LOG.info("[DEAD-DOM] " + dom.toJSON());
Loggers.SRV_LOG.info("[DEAD-DOM] {}", dom.toJSON());
}
}
};

View File

@ -301,7 +301,7 @@ public class IpAddress extends Instance implements Comparable {
@Override
public int compareTo(Object o) {
if (!(o instanceof IpAddress)) {
Loggers.SRV_LOG.error("IPADDRESS-COMPARE", "Object is not an instance of IPAdress,object: " + o.getClass());
Loggers.SRV_LOG.error("[IPADDRESS-COMPARE] Object is not an instance of IPAdress, object: {}", o.getClass());
throw new IllegalArgumentException("Object is not an instance of IPAdress,object: " + o.getClass());
}

View File

@ -68,7 +68,7 @@ public class VirtualClusterDomain implements Domain, RaftListener {
private String namespaceId;
/**
* IP will be deleted if it has not send beat for some time, default timeout is half an hour .
* IP will be deleted if it has not send beat for some time, default timeout is 30 seconds.
*/
private long ipDeleteTimeout = 30 * 1000;
@ -172,11 +172,11 @@ public class VirtualClusterDomain implements Domain, RaftListener {
public void onChange(String key, String value) throws Exception {
if (StringUtils.isEmpty(value)) {
Loggers.SRV_LOG.warn("[VIPSRV-DOM] received empty iplist config for dom: " + name);
Loggers.SRV_LOG.warn("[NACOS-DOM] received empty iplist config for dom: {}", name);
return;
}
Loggers.RAFT.info("[VIPSRV-RAFT] datum is changed, key: " + key + ", value: " + value);
Loggers.RAFT.info("[NACOS-RAFT] datum is changed, key: {}, value: {}", key, value);
List<IpAddress> ips = JSON.parseObject(value, new TypeReference<List<IpAddress>>() {
});
@ -214,7 +214,7 @@ public class VirtualClusterDomain implements Domain, RaftListener {
for (IpAddress ip : ips) {
try {
if (ip == null) {
Loggers.SRV_LOG.error("VIPSRV-DOM", "received malformed ip");
Loggers.SRV_LOG.error("[NACOS-DOM] received malformed ip: null");
continue;
}
@ -224,7 +224,7 @@ public class VirtualClusterDomain implements Domain, RaftListener {
// put wild ip into DEFAULT cluster
if (!clusterMap.containsKey(ip.getClusterName())) {
Loggers.SRV_LOG.warn("cluster of IP not found: " + ip.toJSON());
Loggers.SRV_LOG.warn("cluster of IP not found: {}", ip.toJSON());
continue;
}
@ -236,7 +236,7 @@ public class VirtualClusterDomain implements Domain, RaftListener {
clusterIPs.add(ip);
} catch (Exception e) {
Loggers.SRV_LOG.error("VIPSRV-DOM", "failed to process ip: " + ip, e);
Loggers.SRV_LOG.error("[NACOS-DOM] failed to process ip: " + ip, e);
}
}
@ -253,7 +253,7 @@ public class VirtualClusterDomain implements Domain, RaftListener {
stringBuilder.append(ipAddress.toIPAddr()).append("_").append(ipAddress.isValid()).append(",");
}
Loggers.EVT_LOG.info("[IP-UPDATED] dom: " + getName() + ", ips: " + stringBuilder.toString());
Loggers.EVT_LOG.info("[IP-UPDATED] dom: {}, ips: {}", getName(), stringBuilder.toString());
}
@ -346,7 +346,7 @@ public class VirtualClusterDomain implements Domain, RaftListener {
return vDom;
} catch (Exception e) {
Loggers.SRV_LOG.error("VIPSRV-DOM", "parse cluster json error, " + e.toString() + ", content=" + json, e);
Loggers.SRV_LOG.error("[NACOS-DOM] parse cluster json content: {}, error: {}", json, e);
return null;
}
}
@ -470,42 +470,42 @@ public class VirtualClusterDomain implements Domain, RaftListener {
VirtualClusterDomain vDom = (VirtualClusterDomain) dom;
if (!StringUtils.equals(token, vDom.getToken())) {
Loggers.SRV_LOG.info("[DOM-UPDATE] dom: " + name + ",token" + token + " -> " + vDom.getToken());
Loggers.SRV_LOG.info("[DOM-UPDATE] dom: {}, token: {} -> {}", name, token, vDom.getToken());
token = vDom.getToken();
}
if (!ListUtils.isEqualList(owners, vDom.getOwners())) {
Loggers.SRV_LOG.info("[DOM-UPDATE] dom: " + name + ",owners: " + owners + " -> " + vDom.getToken());
Loggers.SRV_LOG.info("[DOM-UPDATE] dom: {}, owners: {} -> {}", name, owners, vDom.getOwners());
owners = vDom.getOwners();
}
if (protectThreshold != vDom.getProtectThreshold()) {
Loggers.SRV_LOG.info("[DOM-UPDATE] dom: " + name + ",protectThreshold: " + protectThreshold + " -> " + vDom.getProtectThreshold());
Loggers.SRV_LOG.info("[DOM-UPDATE] dom: {}, protectThreshold: {} -> {}", name, protectThreshold, vDom.getProtectThreshold());
protectThreshold = vDom.getProtectThreshold();
}
if (useSpecifiedURL != vDom.isUseSpecifiedURL()) {
Loggers.SRV_LOG.info("[DOM-UPDATE] dom: " + name + ",useSpecifiedURL: " + useSpecifiedURL + " -> " + vDom.isUseSpecifiedURL());
Loggers.SRV_LOG.info("[DOM-UPDATE] dom: {}, useSpecifiedURL: {} -> {}", name, useSpecifiedURL, vDom.isUseSpecifiedURL());
useSpecifiedURL = vDom.isUseSpecifiedURL();
}
if (resetWeight != vDom.getResetWeight().booleanValue()) {
Loggers.SRV_LOG.info("[DOM-UPDATE] dom: " + name + ",resetWeight: " + resetWeight + " -> " + vDom.getResetWeight());
Loggers.SRV_LOG.info("[DOM-UPDATE] dom: {}, resetWeight: {} -> {}", name, resetWeight, vDom.getResetWeight());
resetWeight = vDom.getResetWeight();
}
if (enableHealthCheck != vDom.getEnableHealthCheck().booleanValue()) {
Loggers.SRV_LOG.info("[DOM-UPDATE] dom: " + name + ", enableHealthCheck: " + enableHealthCheck + " -> " + vDom.getEnableHealthCheck());
Loggers.SRV_LOG.info("[DOM-UPDATE] dom: {}, enableHealthCheck: {} -> {}", name, enableHealthCheck, vDom.getEnableHealthCheck());
enableHealthCheck = vDom.getEnableHealthCheck();
}
if (enableClientBeat != vDom.getEnableClientBeat().booleanValue()) {
Loggers.SRV_LOG.info("[DOM-UPDATE] dom: " + name + ", enableClientBeat: " + enableClientBeat + " -> " + vDom.getEnableClientBeat());
Loggers.SRV_LOG.info("[DOM-UPDATE] dom: {}, enableClientBeat: {} -> {}", name, enableClientBeat, vDom.getEnableClientBeat());
enableClientBeat = vDom.getEnableClientBeat();
}
if (enabled != vDom.getEnabled().booleanValue()) {
Loggers.SRV_LOG.info("[DOM-UPDATE] dom: " + name + ", enabled: " + enabled + " -> " + vDom.getEnabled());
Loggers.SRV_LOG.info("[DOM-UPDATE] dom: {}, enabled: {} -> {}", name, enabled, vDom.getEnabled());
enabled = vDom.getEnabled();
}
@ -552,13 +552,13 @@ public class VirtualClusterDomain implements Domain, RaftListener {
MessageDigest md5 = MessageDigest.getInstance("MD5");
result = new BigInteger(1, md5.digest((ipsString.toString()).getBytes(Charset.forName("UTF-8")))).toString(16);
} catch (Exception e) {
Loggers.SRV_LOG.error("VIPSRV-DOM", "error while calculating checksum(md5)", e);
Loggers.SRV_LOG.error("[NACOS-DOM] error while calculating checksum(md5)", e);
result = RandomStringUtils.randomAscii(32);
}
checksum = result;
} catch (Exception e) {
Loggers.SRV_LOG.error("VIPSRV-DOM", "error while calculating checksum(md5)", e);
Loggers.SRV_LOG.error("[NACOS-DOM] error while calculating checksum(md5)", e);
checksum = RandomStringUtils.randomAscii(32);
}
}

View File

@ -76,7 +76,7 @@ public abstract class AbstractHealthCheckProcessor {
}
if (!healthCheckResults.offer(result)) {
Loggers.EVT_LOG.warn("HEALTH-CHECK-SYNC", "failed to add check result to queue, queue size: " + healthCheckResults.size());
Loggers.EVT_LOG.warn("[HEALTH-CHECK-SYNC] failed to add check result to queue, queue size: {}", healthCheckResults.size());
}
}
@ -110,17 +110,20 @@ public abstract class AbstractHealthCheckProcessor {
}
Map<String, String> params = new HashMap<>(10);
params.put("result", JSON.toJSONString(list));
Loggers.DEBUG_LOG.debug("[HEALTH-SYNC]" + server + ", " + JSON.toJSONString(list));
if (Loggers.DEBUG_LOG.isDebugEnabled()) {
Loggers.DEBUG_LOG.debug("[HEALTH-SYNC] server: {}, healthCheckResults: {}",
server, JSON.toJSONString(list));
}
if (!server.contains(":")) {
server = server + ":" + RunningConfig.getServerPort();
}
HttpClient.HttpResult httpResult = HttpClient.httpPost("http://" + server
+ RunningConfig.getContextPath() + UtilsAndCommons.NACOS_NAMING_CONTEXT
+ "/api/healthCheckResult", null, params);
+ RunningConfig.getContextPath() + UtilsAndCommons.NACOS_NAMING_CONTEXT
+ "/api/healthCheckResult", null, params);
if (httpResult.code != HttpURLConnection.HTTP_OK) {
Loggers.EVT_LOG.warn("HEALTH-CHECK-SYNC", "failed to send result to " + server
+ ", result: " + JSON.toJSONString(list));
Loggers.EVT_LOG.warn("[HEALTH-CHECK-SYNC] failed to send result to {}, result: {}",
server, JSON.toJSONString(list));
}
}
@ -215,25 +218,22 @@ public abstract class AbstractHealthCheckProcessor {
PushService.domChanged(vDom.getNamespaceId(), vDom.getName());
addResult(new HealthCheckResult(vDom.getName(), ip));
Loggers.EVT_LOG.info("{" + cluster.getDom().getName() + "} {POS} {IP-ENABLED} valid: "
+ ip.getIp() + ":" + ip.getPort() + "@" + cluster.getName()
+ ", region: " + DistroMapper.LOCALHOST_SITE + ", msg: " + msg);
Loggers.EVT_LOG.info("dom: {} {POS} {IP-ENABLED} valid: {}:{}@{}, region: {}, msg: {}",
cluster.getDom().getName(), ip.getIp(), ip.getPort(), cluster.getName(), DistroMapper.LOCALHOST_SITE, msg);
} else {
if (!ip.isMockValid()) {
ip.setMockValid(true);
Loggers.EVT_LOG.info("{" + cluster.getDom().getName() + "} {PROBE} {IP-ENABLED} valid: "
+ ip.getIp() + ":" + ip.getPort() + "@" + cluster.getName()
+ ", region: " + DistroMapper.LOCALHOST_SITE + ", msg: " + msg);
Loggers.EVT_LOG.info("dom: {} {PROBE} {IP-ENABLED} valid: {}:{}@{}, region: {}, msg: {}",
cluster.getDom().getName(), ip.getIp(), ip.getPort(), cluster.getName(), DistroMapper.LOCALHOST_SITE, msg);
}
}
} else {
Loggers.EVT_LOG.info("{" + cluster.getDom().getName() + "} {OTHER} " +
"{IP-ENABLED} pre-valid: " + ip.getIp() + ":" + ip.getPort() + "@"
+ cluster.getName() + " in " + ip.getOKCount() + ", msg: " + msg);
Loggers.EVT_LOG.info("dom: {} {OTHER} {IP-ENABLED} pre-valid: {}:{}@{} in {}, msg: {}",
cluster.getDom().getName(), ip.getIp(), ip.getPort(), cluster.getName(), ip.getOKCount(), msg);
}
}
} catch (Throwable t) {
Loggers.SRV_LOG.error("CHECK-OK", "error when close check task.", t);
Loggers.SRV_LOG.error("[CHECK-OK] error when close check task.", t);
}
ip.getFailCount().set(0);
@ -256,23 +256,20 @@ public abstract class AbstractHealthCheckProcessor {
PushService.domChanged(vDom.getNamespaceId(), vDom.getName());
Loggers.EVT_LOG.info("{" + cluster.getDom().getName() + "} {POS} {IP-DISABLED} invalid: "
+ ip.getIp() + ":" + ip.getPort() + "@" + cluster.getName()
+ ", region: " + DistroMapper.LOCALHOST_SITE + ", msg: " + msg);
Loggers.EVT_LOG.info("dom: {} {POS} {IP-DISABLED} invalid: {}:{}@{}, region: {}, msg: {}",
cluster.getDom().getName(), ip.getIp(), ip.getPort(), cluster.getName(), DistroMapper.LOCALHOST_SITE, msg);
} else {
Loggers.EVT_LOG.info("{" + cluster.getDom().getName() + "} {PROBE} {IP-DISABLED} invalid: "
+ ip.getIp() + ":" + ip.getPort() + "@" + cluster.getName()
+ ", region: " + DistroMapper.LOCALHOST_SITE + ", msg: " + msg);
Loggers.EVT_LOG.info("dom: {} {PROBE} {IP-DISABLED} invalid: {}:{}@{}, region: {}, msg: {}",
cluster.getDom().getName(), ip.getIp(), ip.getPort(), cluster.getName(), DistroMapper.LOCALHOST_SITE, msg);
}
} else {
Loggers.EVT_LOG.info("{" + cluster.getDom().getName() + "} {OTHER} " +
"{IP-DISABLED} pre-invalid: " + ip.getIp() + ":" + ip.getPort()
+ "@" + cluster.getName() + " in " + ip.getFailCount() + ", msg: " + msg);
Loggers.EVT_LOG.info("dom: {} {OTHER} {IP-DISABLED} pre-invalid: {}:{}@{} in {}, msg: {}",
cluster.getDom().getName(), ip.getIp(), ip.getPort(), cluster.getName(), ip.getFailCount(), msg);
}
}
} catch (Throwable t) {
Loggers.SRV_LOG.error("CHECK-FAIL", "error when close check task.", t);
Loggers.SRV_LOG.error("[CHECK-FAIL] error when close check task.", t);
}
ip.getOKCount().set(0);
@ -294,21 +291,19 @@ public abstract class AbstractHealthCheckProcessor {
PushService.domChanged(vDom.getNamespaceId(), vDom.getName());
addResult(new HealthCheckResult(vDom.getName(), ip));
Loggers.EVT_LOG.info("{" + cluster.getDom().getName() + "} {POS} {IP-DISABLED} invalid-now: "
+ ip.getIp() + ":" + ip.getPort() + "@" + cluster.getName()
+ ", region: " + DistroMapper.LOCALHOST_SITE + ", msg: " + msg);
Loggers.EVT_LOG.info("dom: {} {POS} {IP-DISABLED} invalid-now: {}:{}@{}, region: {}, msg: {}",
cluster.getDom().getName(), ip.getIp(), ip.getPort(), cluster.getName(), DistroMapper.LOCALHOST_SITE, msg);
} else {
if (ip.isMockValid()) {
ip.setMockValid(false);
Loggers.EVT_LOG.info("{" + cluster.getDom().getName() + "} {PROBE} {IP-DISABLED} invalid-now: "
+ ip.getIp() + ":" + ip.getPort() + "@" + cluster.getName()
+ ", region: " + DistroMapper.LOCALHOST_SITE + ", msg: " + msg);
Loggers.EVT_LOG.info("dom: {} {PROBE} {IP-DISABLED} invalid-now: {}:{}@{}, region: {}, msg: {}",
cluster.getDom().getName(), ip.getIp(), ip.getPort(), cluster.getName(), DistroMapper.LOCALHOST_SITE, msg);
}
}
}
} catch (Throwable t) {
Loggers.SRV_LOG.error("CHECK-FAIL-NOW", "error when close check task.", t);
Loggers.SRV_LOG.error("[CHECK-FAIL-NOW] error when close check task.", t);
}
ip.getOKCount().set(0);

View File

@ -56,10 +56,9 @@ public class ClientBeatCheckTask implements Runnable {
if (!ipAddress.isMarked()) {
if (ipAddress.isValid()) {
ipAddress.setValid(false);
Loggers.EVT_LOG.info("{" + ipAddress.getClusterName() + "} {POS} {IP-DISABLED} valid: "
+ ipAddress.getIp() + ":" + ipAddress.getPort() + "@" + ipAddress.getClusterName()
+ ", region: " + DistroMapper.LOCALHOST_SITE + ", msg: " + "client timeout after "
+ ClientBeatProcessor.CLIENT_BEAT_TIMEOUT + ", last beat: " + ipAddress.getLastBeat());
Loggers.EVT_LOG.info("{POS} {IP-DISABLED} valid: {}:{}@{}, region: {}, msg: client timeout after {}, last beat: {}",
ipAddress.getIp(), ipAddress.getPort(), ipAddress.getClusterName(),
DistroMapper.LOCALHOST_SITE, ClientBeatProcessor.CLIENT_BEAT_TIMEOUT, ipAddress.getLastBeat());
PushService.domChanged(domain.getNamespaceId(), domain.getName());
}
}
@ -67,16 +66,12 @@ public class ClientBeatCheckTask implements Runnable {
if (System.currentTimeMillis() - ipAddress.getLastBeat() > domain.getIpDeleteTimeout()) {
// delete ip
// if (domain.allIPs().size() > 1) {
Loggers.SRV_LOG.info("[AUTO-DELETE-IP] dom: " + domain.getName() + ", ip: " + JSON.toJSONString(ipAddress));
Loggers.SRV_LOG.info("[AUTO-DELETE-IP] dom: {}, ip: {}", domain.getName(), JSON.toJSONString(ipAddress));
deleteIP(ipAddress);
// }
}
}
} catch (Exception e) {
Loggers.SRV_LOG.warn("Exception while processing client beat time out.", e);
} finally {
// HealthCheckReactor.scheduleCheck(this);
}
}
@ -84,17 +79,17 @@ public class ClientBeatCheckTask implements Runnable {
private void deleteIP(IpAddress ipAddress) {
try {
String ipList = ipAddress.getIp() + ":" + ipAddress.getPort() + "_"
+ ipAddress.getWeight() + "_" + ipAddress.getClusterName();
+ ipAddress.getWeight() + "_" + ipAddress.getClusterName();
String url = "http://127.0.0.1:" + RunningConfig.getServerPort() + RunningConfig.getContextPath()
+ UtilsAndCommons.NACOS_NAMING_CONTEXT + "/api/remvIP4Dom?dom="
+ domain.getName() + "&ipList=" + ipList + "&token=" + domain.getToken();
+ UtilsAndCommons.NACOS_NAMING_CONTEXT + "/api/remvIP4Dom?dom="
+ domain.getName() + "&ipList=" + ipList + "&token=" + domain.getToken();
HttpClient.HttpResult result = HttpClient.httpGet(url, null, null);
if (result.code != HttpURLConnection.HTTP_OK) {
Loggers.SRV_LOG.error("IP-DEAD", "failed to delete ip automatically, ip: "
+ ipAddress.toJSON() + ", caused " + result.content + ",resp code: " + result.code);
Loggers.SRV_LOG.error("[IP-DEAD] failed to delete ip automatically, ip: {}, caused {}, resp code: {}",
ipAddress.toJSON(), result.content, result.code);
}
} catch (Exception e) {
Loggers.SRV_LOG.error("IP-DEAD", "failed to delete ip automatically, ip: " + ipAddress.toJSON(), e);
Loggers.SRV_LOG.error("[IP-DEAD] failed to delete ip automatically, ip: {}, error: {}", ipAddress.toJSON(), e);
}
}

View File

@ -61,7 +61,7 @@ public class ClientBeatProcessor implements Runnable {
return;
}
Loggers.EVT_LOG.debug("[CLIENT-BEAT] processing beat: " + rsInfo.toString());
Loggers.EVT_LOG.debug("[CLIENT-BEAT] processing beat: {}", rsInfo.toString());
String ip = rsInfo.getIp();
String clusterName = rsInfo.getCluster();
@ -69,19 +69,15 @@ public class ClientBeatProcessor implements Runnable {
Cluster cluster = virtualClusterDomain.getClusterMap().get(clusterName);
List<IpAddress> ipAddresses = cluster.allIPs();
boolean processed = false;
for (IpAddress ipAddress: ipAddresses) {
if (ipAddress.getIp().equals(ip) && ipAddress.getPort() == port) {
processed = true;
Loggers.EVT_LOG.debug("[CLIENT-BEAT] refresh beat: " + rsInfo.toString());
Loggers.EVT_LOG.debug("[CLIENT-BEAT] refresh beat: {}", rsInfo.toString());
ipAddress.setLastBeat(System.currentTimeMillis());
if (!ipAddress.isMarked()) {
if (!ipAddress.isValid()) {
ipAddress.setValid(true);
Loggers.EVT_LOG.info("{" + cluster.getDom().getName() + "} {POS} {IP-ENABLED} valid: "
+ ip+ ":" + port+ "@" + cluster.getName()
+ ", region: " + DistroMapper.LOCALHOST_SITE + ", msg: " + "client beat ok");
Loggers.EVT_LOG.info("dom: {} {POS} {IP-ENABLED} valid: {}:{}@{}, region: {}, msg: client beat ok",
cluster.getDom().getName(), ip, port, cluster.getName(), DistroMapper.LOCALHOST_SITE);
PushService.domChanged(virtualClusterDomain.getNamespaceId(), domain.getName());
}
}

View File

@ -62,7 +62,7 @@ public class HealthCheckReactor {
try {
scheduledFuture.cancel(true);
} catch (Exception e) {
Loggers.EVT_LOG.error("CANCEL-CHECK", "cancel failed!", e);
Loggers.EVT_LOG.error("[CANCEL-CHECK] cancel failed!", e);
}
}

View File

@ -64,7 +64,7 @@ public class HealthCheckStatus {
+ clusterName + ":"
+ datumKey;
} catch (Throwable e) {
Loggers.SRV_LOG.error("BUILD-KEY", "Exception while set rt, ip " + ip.toJSON(), e);
Loggers.SRV_LOG.error("[BUILD-KEY] Exception while set rt, ip {}, error: {}", ip.toJSON(), e);
}
return ip.getDefaultKey();

View File

@ -31,7 +31,7 @@ public class HealthCheckTask implements Runnable {
private long checkRTNormalized = -1;
private long checkRTBest = -1;
private long checkRTWorst= -1;
private long checkRTWorst = -1;
private long checkRTLast = -1;
private long checkRTLastLast = -1;
@ -55,38 +55,36 @@ public class HealthCheckTask implements Runnable {
@Override
public void run() {
AbstractHealthCheckProcessor processor = AbstractHealthCheckProcessor.getProcessor(cluster.getHealthChecker());
AbstractHealthCheckProcessor processor = AbstractHealthCheckProcessor.getProcessor(cluster.getHealthChecker());
try {
if (DistroMapper.responsible(cluster.getDom().getName())) {
processor.process(this);
Loggers.EVT_LOG.debug("[HEALTH-CHECK] schedule health check task: " + cluster.getDom().getName());
Loggers.EVT_LOG.debug("[HEALTH-CHECK] schedule health check task: {}", cluster.getDom().getName());
}
} catch (Throwable e) {
Loggers.SRV_LOG.error("VIPSRV-HEALTH-CHECK", "error while process health check for " + cluster.getDom().getName() + ":" + cluster.getName(), e);
Loggers.SRV_LOG.error("[HEALTH-CHECK] error while process health check for {}:{}, error: {}",
cluster.getDom().getName(), cluster.getName(), e);
} finally {
if (!cancelled) {
HealthCheckReactor.scheduleCheck(this);
// worst == 0 means never checked
if (this.getCheckRTWorst() > 0
&& Switch.isHealthCheckEnabled(cluster.getDom().getName())
&& DistroMapper.responsible(cluster.getDom().getName())) {
&& Switch.isHealthCheckEnabled(cluster.getDom().getName())
&& DistroMapper.responsible(cluster.getDom().getName())) {
// TLog doesn't support float so we must convert it into long
long diff = ((this.getCheckRTLast() - this.getCheckRTLastLast()) * 10000)
/ this.getCheckRTLastLast();
/ this.getCheckRTLastLast();
this.setCheckRTLastLast(this.getCheckRTLast());
Cluster cluster = this.getCluster();
if (((VirtualClusterDomain)cluster.getDom()).getEnableHealthCheck()) {
Loggers.CHECK_RT.info(cluster.getDom().getName() + ":" + cluster.getName()
+ "@" + processor.getType()
+ "->normalized: " + this.getCheckRTNormalized()
+ ", worst: " + this.getCheckRTWorst()
+ ", best: " + this.getCheckRTBest()
+ ", last: " + this.getCheckRTLast()
+ ", diff: " + diff);
if (((VirtualClusterDomain) cluster.getDom()).getEnableHealthCheck()) {
Loggers.CHECK_RT.info("{}:{}@{}->normalized: {}, worst: {}, best: {}, last: {}, diff: {}",
cluster.getDom().getName(), cluster.getName(), processor.getType(),
this.getCheckRTNormalized(), this.getCheckRTWorst(), this.getCheckRTBest(),
this.getCheckRTLast(), diff);
}
}
}

View File

@ -64,7 +64,7 @@ public class HttpHealthCheckProcessor extends AbstractHealthCheckProcessor {
builder.setUserAgent("VIPServer");
asyncHttpClient = new AsyncHttpClient(builder.build());
} catch (Throwable e) {
SRV_LOG.error("VIPSRV-HEALTH-CHECK", "Error while constructing HTTP asynchronous client, " + e.toString(), e);
SRV_LOG.error("[HEALTH-CHECK] Error while constructing HTTP asynchronous client", e);
}
}
@ -93,16 +93,14 @@ public class HttpHealthCheckProcessor extends AbstractHealthCheckProcessor {
if (ip.isMarked()) {
if (SRV_LOG.isDebugEnabled()) {
SRV_LOG.debug("http check, ip is marked as to skip health check, ip:" + ip.getIp());
SRV_LOG.debug("http check, ip is marked as to skip health check, ip: {}" + ip.getIp());
}
continue;
}
if (!ip.markChecking()) {
SRV_LOG.warn("http check started before last one finished, dom: "
+ task.getCluster().getDom().getName() + ":"
+ task.getCluster().getName() + ":"
+ ip.getIp());
SRV_LOG.warn("http check started before last one finished, dom: {}:{}:{}",
task.getCluster().getDom().getName(), task.getCluster().getName(), ip.getIp());
reEvaluateCheckRT(task.getCheckRTNormalized() * 2, task, Switch.getHttpHealthParams());
continue;

View File

@ -95,16 +95,14 @@ public class MysqlHealthCheckProcessor extends AbstractHealthCheckProcessor {
if (ip.isMarked()) {
if (SRV_LOG.isDebugEnabled()) {
SRV_LOG.debug("mysql check, ip is marked as to skip health check, ip:" + ip.getIp());
SRV_LOG.debug("mysql check, ip is marked as to skip health check, ip: {}", ip.getIp());
}
continue;
}
if (!ip.markChecking()) {
SRV_LOG.warn("mysql check started before last one finished, dom: "
+ task.getCluster().getDom().getName() + ":"
+ task.getCluster().getName() + ":"
+ ip.getIp());
SRV_LOG.warn("mysql check started before last one finished, dom: {}:{}:{}",
task.getCluster().getDom().getName(), task.getCluster().getName(), ip.getIp());
reEvaluateCheckRT(task.getCheckRTNormalized() * 2, task, Switch.getMysqlHealthParams());
continue;
@ -202,14 +200,14 @@ public class MysqlHealthCheckProcessor extends AbstractHealthCheckProcessor {
try {
statement.close();
} catch (SQLException e) {
Loggers.SRV_LOG.error("MYSQL-CHECK", "failed to close statement:" + statement, e);
Loggers.SRV_LOG.error("[MYSQL-CHECK] failed to close statement:" + statement, e);
}
}
if (resultSet != null) {
try {
resultSet.close();
} catch (SQLException e) {
Loggers.SRV_LOG.error("MYSQL-CHECK", "failed to close resultSet:" + resultSet, e);
Loggers.SRV_LOG.error("[MYSQL-CHECK] failed to close resultSet:" + resultSet, e);
}
}
}

View File

@ -167,7 +167,7 @@ public class TcpSuperSenseProcessor extends AbstractHealthCheckProcessor impleme
NIO_EXECUTOR.execute(new PostProcessor(key));
}
} catch (Throwable e) {
SRV_LOG.error("VIPSRV-HEALTH-CHECK", "error while processing NIO task", e);
SRV_LOG.error("[HEALTH-CHECK] error while processing NIO task", e);
}
}
}

View File

@ -53,7 +53,7 @@ public class DomainStatusSynchronizer implements Synchronizer {
@Override
public Integer onCompleted(Response response) throws Exception {
if (response.getStatusCode() != HttpURLConnection.HTTP_OK) {
Loggers.SRV_LOG.warn("STATUS-SYNCHRONIZE", "failed to request domStatus, remote server: " + serverIP);
Loggers.SRV_LOG.warn("[STATUS-SYNCHRONIZE] failed to request domStatus, remote server: {}", serverIP);
return 1;
}
@ -61,7 +61,7 @@ public class DomainStatusSynchronizer implements Synchronizer {
}
});
} catch (Exception e) {
Loggers.SRV_LOG.warn("STATUS-SYNCHRONIZE", "failed to request domStatus, remote server: " + serverIP, e);
Loggers.SRV_LOG.warn("[STATUS-SYNCHRONIZE] failed to request domStatus, remote server: " + serverIP, e);
}
}
@ -78,11 +78,10 @@ public class DomainStatusSynchronizer implements Synchronizer {
String result;
try {
Loggers.SRV_LOG.info("[STATUS-SYNCHRONIZE] sync dom status from: "
+ serverIP + ", dom: " + key);
Loggers.SRV_LOG.info("[STATUS-SYNCHRONIZE] sync dom status from: {}, dom: {}", serverIP, key);
result = NamingProxy.reqAPI("ip4Dom2", params, serverIP, false);
} catch (Exception e) {
Loggers.SRV_LOG.warn("STATUS-SYNCHRONIZE", "Failed to get domain status from " + serverIP, e);
Loggers.SRV_LOG.warn("[STATUS-SYNCHRONIZE] Failed to get domain status from " + serverIP, e);
return null;
}

View File

@ -112,7 +112,7 @@ public class HttpClient {
return getResult(conn);
} catch (Exception e) {
Loggers.SRV_LOG.warn("VIPSRV", "Exception while request: " + url + ", caused: " + e.getMessage());
Loggers.SRV_LOG.warn("[VIPSRV] Exception while request: {}, caused: {}", url, e);
return new HttpResult(500, e.toString(), Collections.<String, String>emptyMap());
} finally {
if (conn != null) {

View File

@ -80,7 +80,8 @@ public class NamingProxy {
if (System.currentTimeMillis() - lastSrvSiteRefreshTime > VIP_SRV_SITE_REF_INTER_MILLIS ||
!CollectionUtils.isEqualCollection(servers, lastServers)) {
if (!CollectionUtils.isEqualCollection(servers, lastServers)) {
Loggers.SRV_LOG.info("[REFRESH-SERVER-SITE] server list is changed, old: " + lastServers + ", new: " + servers);
Loggers.SRV_LOG.info("[REFRESH-SERVER-SITE] server list is changed, old: {}, new: {}",
lastServers, servers);
}
lastServers = servers;
@ -144,15 +145,21 @@ public class NamingProxy {
Loggers.SRV_LOG.warn("failed to get config: " + CLUSTER_CONF_FILE_PATH, e);
}
Loggers.DEBUG_LOG.debug("REFRESH-SERVER-LIST1", result);
if (Loggers.DEBUG_LOG.isDebugEnabled()) {
Loggers.DEBUG_LOG.debug("REFRESH-SERVER-LIST1 {}", result);
}
//use system env
if (CollectionUtils.isEmpty(result)) {
result = SystemUtils.getIPsBySystemEnv(UtilsAndCommons.SELF_SERVICE_CLUSTER_ENV);
Loggers.DEBUG_LOG.debug("REFRESH-SERVER-LIST4: " + result);
if (Loggers.DEBUG_LOG.isDebugEnabled()) {
Loggers.DEBUG_LOG.debug("REFRESH-SERVER-LIST4: {}", result);
}
}
Loggers.DEBUG_LOG.debug("REFRESH-SERVER-LIST2" + result);
if (Loggers.DEBUG_LOG.isDebugEnabled()) {
Loggers.DEBUG_LOG.debug("REFRESH-SERVER-LIST2 {}", result);
}
if (!result.isEmpty() && !result.get(0).contains(UtilsAndCommons.CLUSTER_CONF_IP_SPLITER)) {
for (int i = 0; i < result.size(); i++) {
@ -175,7 +182,9 @@ public class NamingProxy {
servers.put("sameSite", snapshot);
servers.put("otherSite", new ArrayList<String>());
Loggers.SRV_LOG.debug("sameSiteServers:" + servers.toString());
if (Loggers.SRV_LOG.isDebugEnabled()) {
Loggers.SRV_LOG.debug("sameSiteServers: {}", servers.toString());
}
return servers;
}

View File

@ -50,7 +50,8 @@ public class ServerStatusSynchronizer implements Synchronizer {
@Override
public Integer onCompleted(Response response) throws Exception {
if (response.getStatusCode() != HttpURLConnection.HTTP_OK) {
Loggers.SRV_LOG.warn("STATUS-SYNCHRONIZE", "failed to request serverStatus, remote server: " + serverIP);
Loggers.SRV_LOG.warn("[STATUS-SYNCHRONIZE] failed to request serverStatus, remote server: {}",
serverIP);
return 1;
}
@ -58,7 +59,7 @@ public class ServerStatusSynchronizer implements Synchronizer {
}
});
} catch (Exception e) {
Loggers.SRV_LOG.warn("STATUS-SYNCHRONIZE", "failed to request serverStatus, remote server: " + serverIP, e);
Loggers.SRV_LOG.warn("[STATUS-SYNCHRONIZE] failed to request serverStatus, remote server: " + serverIP, e);
}
}

View File

@ -58,7 +58,7 @@ public class Switch {
@Override
public void onChange(String key, String value) throws Exception {
Loggers.RAFT.info("[VIPSRV-RAFT] datum is changed, key: " + key + ", value: " + value);
Loggers.RAFT.info("[NACOS-RAFT] datum is changed, key: {}, value: {}", key, value);
if (StringUtils.isEmpty(value)) {
return;
}
@ -135,7 +135,7 @@ public class Switch {
try {
RaftCore.doSignalPublish(UtilsAndCommons.getDomStoreKey(dom), JSON.toJSONString(dom));
} catch (Exception e) {
Loggers.SRV_LOG.error("VIPSRV-SWITCH", "failed to save switch", e);
Loggers.SRV_LOG.error("[SWITCH] failed to save switch", e);
}
}

View File

@ -54,7 +54,7 @@ public class PerformanceLoggerThread {
}
private void freshHealthCheckSwitch() {
Loggers.SRV_LOG.info("[HEALTH-CHECK] health check is " + Switch.isHealthCheckEnabled());
Loggers.SRV_LOG.info("[HEALTH-CHECK] health check is {}", Switch.isHealthCheckEnabled());
}
class HealthCheckSwitchTask implements Runnable {
@ -85,9 +85,9 @@ public class PerformanceLoggerThread {
int ipCount = domainsManager.getInstanceCount();
long maxPushMaxCost = getMaxPushCost();
long avgPushCost = getAvgPushCost();
Loggers.PERFORMANCE_LOG.info("PERFORMANCE:" + "|" + domCount + "|" + ipCount + "|" + maxPushMaxCost + "|" + avgPushCost);
Loggers.PERFORMANCE_LOG.info("[PERFORMANCE] " + "|" + domCount + "|" + ipCount + "|" + maxPushMaxCost + "|" + avgPushCost);
} catch (Exception e) {
Loggers.SRV_LOG.warn("PERFORMANCE", "Exception while print performance log.", e);
Loggers.SRV_LOG.warn("[PERFORMANCE] Exception while print performance log.", e);
}
}

View File

@ -105,13 +105,13 @@ public class PushService {
try {
removeClientIfZombie();
} catch (Throwable e) {
Loggers.PUSH.warn("VIPSRV-PUSH", "failed to remove client zombied");
Loggers.PUSH.warn("[NACOS-PUSH] failed to remove client zombie");
}
}
}, 0, 20, TimeUnit.SECONDS);
} catch (SocketException e) {
Loggers.SRV_LOG.error("VIPSRV-PUSH", "failed to init push service");
Loggers.SRV_LOG.error("[NACOS-PUSH] failed to init push service");
}
}
@ -155,9 +155,9 @@ public class PushService {
} else {
PushClient res = clients.putIfAbsent(client.toString(), client);
if (res != null) {
Loggers.PUSH.warn("client:" + res.getAddrStr() + " already associated with key " + res.toString());
Loggers.PUSH.warn("client: {} already associated with key {}", res.getAddrStr(), res.toString());
}
Loggers.PUSH.debug("client: " + client.getAddrStr() + " added for dom: " + client.getDom());
Loggers.PUSH.debug("client: {} added for dom: {}", client.getAddrStr(), client.getDom());
}
}
@ -176,7 +176,7 @@ public class PushService {
size += clientConcurrentMap.size();
}
Loggers.PUSH.info("VIPSRV-PUSH", "clientMap size: " + size);
Loggers.PUSH.info("[NACOS-PUSH] clientMap size: {}", size);
}
@ -197,8 +197,8 @@ public class PushService {
return ackEntry;
} catch (Exception e) {
Loggers.PUSH.error("VIPSRV-PUSH", "failed to prepare data: [" + data + "] to client: [" +
client.getSocketAddr() + "]", e);
Loggers.PUSH.error("[NACOS-PUSH] failed to prepare data: {} to client: {}, error: {}",
data, client.getSocketAddr(), e);
}
return null;
@ -233,7 +233,7 @@ public class PushService {
}
Receiver.AckEntry ackEntry;
Loggers.PUSH.debug("push dom: " + dom + " to cleint: " + client.toString());
Loggers.PUSH.debug("push dom: {} to client: {}", dom, client.toString());
String key = getPushCacheKey(dom, client.getIp(), client.getAgent());
byte[] compressData = null;
Map<String, Object> data = null;
@ -242,7 +242,7 @@ public class PushService {
compressData = (byte[]) (pair.getValue0());
data = (Map<String, Object>) pair.getValue1();
Loggers.PUSH.debug("PUSH-CACHE", "cache hit: " + dom + ":" + client.getAddrStr());
Loggers.PUSH.debug("[PUSH-CACHE] cache hit: {}:{}", dom, client.getAddrStr());
}
if (compressData != null) {
@ -254,14 +254,13 @@ public class PushService {
}
}
Loggers.PUSH.info("dom: " + client.getDom() + " changed, schedule push for: "
+ client.getAddrStr() + ", agent: " + client.getAgent() + ", key: "
+ (ackEntry == null ? null : ackEntry.key));
Loggers.PUSH.info("dom: {} changed, schedule push for: {}, agent: {}, key: {}",
client.getDom(), client.getAddrStr(), client.getAgent(), (ackEntry == null ? null : ackEntry.key));
udpPush(ackEntry);
}
} catch (Exception e) {
Loggers.PUSH.error("VIPSRV-PUSH", "failed to push dom: " + dom + " to cleint", e);
Loggers.PUSH.error("[NACOS-PUSH] failed to push dom: {} to client, error: {}", dom, e);
} finally {
futureMap.remove(UtilsAndCommons.assembleFullServiceName(namespaceId, dom));
@ -467,7 +466,7 @@ public class PushService {
private static Receiver.AckEntry prepareAckEntry(PushClient client, Map<String, Object> data, long lastRefTime) {
if (MapUtils.isEmpty(data)) {
Loggers.PUSH.error("VIPSRV-PUSH", "pushing empty data for client is not allowed: " + client);
Loggers.PUSH.error("[NACOS-PUSH] pushing empty data for client is not allowed: {}", client);
return null;
}
@ -493,20 +492,20 @@ public class PushService {
return ackEntry;
} catch (Exception e) {
Loggers.PUSH.error("VIPSRV-PUSH", "failed to prepare data: [" + data + "] to client: [" +
client.getSocketAddr() + "]", e);
Loggers.PUSH.error("[NACOS-PUSH] failed to prepare data: {} to client: {}, error: {}",
data, client.getSocketAddr(), e);
return null;
}
}
private static Receiver.AckEntry udpPush(Receiver.AckEntry ackEntry) {
if (ackEntry == null) {
Loggers.PUSH.error("VIPSRV-PUSH", "ackEntry is null ");
Loggers.PUSH.error("[NACOS-PUSH] ackEntry is null.");
return null;
}
if (ackEntry.getRetryTimes() > MAX_RETRY_TIMES) {
Loggers.PUSH.warn("max re-push times reached, retry times " + ackEntry.retryTimes + ", key: " + ackEntry.key);
Loggers.PUSH.warn("max re-push times reached, retry times {}, key: {}", ackEntry.retryTimes, ackEntry.key);
ackMap.remove(ackEntry.key);
failedPush += 1;
return ackEntry;
@ -529,8 +528,8 @@ public class PushService {
return ackEntry;
} catch (Exception e) {
Loggers.PUSH.error("VIPSRV-PUSH", "failed to push data: [" + ackEntry.data + "] to client: [" +
ackEntry.origin.getAddress().getHostAddress() + "]", e);
Loggers.PUSH.error("[NACOS-PUSH] failed to push data: {} to client: {}, error: {}",
ackEntry.data, ackEntry.origin.getAddress().getHostAddress(), e);
ackMap.remove(ackEntry.key);
failedPush += 1;
@ -576,8 +575,7 @@ public class PushService {
int port = socketAddress.getPort();
if (System.nanoTime() - ackPacket.lastRefTime > ACK_TIMEOUT_NANOS) {
Loggers.PUSH.warn("ack takes too long from" + packet.getSocketAddress()
+ " ack json: " + json);
Loggers.PUSH.warn("ack takes too long from {} ack json: {}",packet.getSocketAddress(), json);
}
String ackKey = getACKKey(ip, port, ackPacket.lastRefTime);
@ -589,16 +587,15 @@ public class PushService {
long pushCost = System.currentTimeMillis() - udpSendTimeMap.get(ackKey);
Loggers.PUSH.info("received ack: " + json + " from: " + ip
+ ":" + port + ", cost: " + pushCost + "ms" + ", unacked: " + ackMap.size() +
",total push: " + totalPush);
Loggers.PUSH.info("received ack: {} from: {}:, cost: {} ms, unacked: {}, total push: {}",
json, ip, port, pushCost, ackMap.size(), totalPush);
pushCostMap.put(ackKey, pushCost);
udpSendTimeMap.remove(ackKey);
} catch (Throwable e) {
Loggers.PUSH.error("VIPSRV-PUSH", "error while receiving ack data", e);
Loggers.PUSH.error("[NACOS-PUSH] error while receiving ack data", e);
}
}
}

View File

@ -140,7 +140,7 @@ public class PeerSet {
if (!Objects.equals(leader, peer)) {
leader = peer;
Loggers.RAFT.info(leader.ip + " has become the LEADER");
Loggers.RAFT.info("{} has become the LEADER", leader.ip);
}
}
@ -150,7 +150,8 @@ public class PeerSet {
public RaftPeer makeLeader(RaftPeer candidate) {
if (!Objects.equals(leader, candidate)) {
leader = candidate;
Loggers.RAFT.info(leader.ip + " has become the LEADER" + ",local :" + JSON.toJSONString(local()) + ", leader: " + JSON.toJSONString(leader));
Loggers.RAFT.info("{} has become the LEADER, local: {}, leader: {}",
leader.ip, JSON.toJSONString(local()), JSON.toJSONString(leader));
}
for (final RaftPeer peer : peers.values()) {
@ -162,7 +163,8 @@ public class PeerSet {
@Override
public Integer onCompleted(Response response) throws Exception {
if (response.getStatusCode() != HttpURLConnection.HTTP_OK) {
Loggers.RAFT.error("VIPSRV-RAFT", "get peer failed: " + response.getResponseBody() + ", peer: " + peer.ip);
Loggers.RAFT.error("[NACOS-RAFT] get peer failed: {}, peer: {}",
response.getResponseBody(), peer.ip);
peer.state = RaftPeer.State.FOLLOWER;
return 1;
}
@ -174,7 +176,7 @@ public class PeerSet {
});
} catch (Exception e) {
peer.state = RaftPeer.State.FOLLOWER;
Loggers.RAFT.error("VIPSRV-RAFT", "error while getting peer from peer: " + peer.ip);
Loggers.RAFT.error("[NACOS-RAFT] error while getting peer from peer: {}", peer.ip);
}
}
}
@ -186,7 +188,7 @@ public class PeerSet {
RaftPeer peer = peers.get(NetUtils.localServer());
if (peer == null) {
throw new IllegalStateException("unable to find local peer: " + NetUtils.localServer() + ", all peers: "
+ Arrays.toString(peers.keySet().toArray()));
+ Arrays.toString(peers.keySet().toArray()));
}
return peer;

View File

@ -515,15 +515,13 @@ public class RaftCore {
@Override
public Integer onCompleted(Response response) throws Exception {
if (response.getStatusCode() != HttpURLConnection.HTTP_OK) {
Loggers.RAFT.error("NACOS-RAFT {}", "vote failed: "
, response.getResponseBody() + " url:" + url);
Loggers.RAFT.error("NACOS-RAFT vote failed: {}, url: {}", response.getResponseBody(), url);
return 1;
}
RaftPeer peer = JSON.parseObject(response.getResponseBody(), RaftPeer.class);
Loggers.RAFT.info("received approve from peer: " + JSON.toJSONString(peer));
Loggers.RAFT.info("received approve from peer: {}", JSON.toJSONString(peer));
peers.decideLeader(peer);
@ -531,7 +529,7 @@ public class RaftCore {
}
});
} catch (Exception e) {
Loggers.RAFT.warn("error while sending vote to server:" + server);
Loggers.RAFT.warn("error while sending vote to server: {}", server);
}
}
}
@ -564,7 +562,7 @@ public class RaftCore {
local.voteFor = remote.ip;
local.term.set(remote.term.get());
Loggers.RAFT.info("vote " + remote.ip + " as leader, term:" + remote.term);
Loggers.RAFT.info("vote {} as leader, term: {}", remote.ip, remote.term);
return local;
}
@ -584,7 +582,7 @@ public class RaftCore {
sendBeat();
} catch (Exception e) {
Loggers.RAFT.warn("RAFT", "error while sending beat", e);
Loggers.RAFT.warn("[RAFT] error while sending beat {}", e);
}
}
@ -595,7 +593,7 @@ public class RaftCore {
return;
}
Loggers.RAFT.info("[RAFT] send beat with " + datums.size() + " keys.");
Loggers.RAFT.info("[RAFT] send beat with {} keys.", datums.size());
local.resetLeaderDue();
@ -606,7 +604,7 @@ public class RaftCore {
JSONArray array = new JSONArray();
if (Switch.isSendBeatOnly()) {
Loggers.RAFT.info("[SEND-BEAT-ONLY] " + String.valueOf(Switch.isSendBeatOnly()));
Loggers.RAFT.info("[SEND-BEAT-ONLY] {}", String.valueOf(Switch.isSendBeatOnly()));
}
if (!Switch.isSendBeatOnly()) {
@ -650,7 +648,8 @@ public class RaftCore {
byte[] compressedBytes = out.toByteArray();
String compressedContent = new String(compressedBytes, "UTF-8");
Loggers.RAFT.info("raw beat data size: " + content.length() + ", size of compressed data: " + compressedContent.length());
Loggers.RAFT.info("raw beat data size: {}, size of compressed data: {}",
content.length(), compressedContent.length());
for (final String server : peers.allServersWithoutMySelf()) {
try {
@ -660,22 +659,23 @@ public class RaftCore {
@Override
public Integer onCompleted(Response response) throws Exception {
if (response.getStatusCode() != HttpURLConnection.HTTP_OK) {
Loggers.RAFT.error("NACOS-RAFT {}", "beat failed: " + response.getResponseBody() + ", peer: " + server);
Loggers.RAFT.error("NACOS-RAFT beat failed: {}, peer: {}",
response.getResponseBody(), server);
return 1;
}
peers.update(JSON.parseObject(response.getResponseBody(), RaftPeer.class));
Loggers.RAFT.info("receive beat response from: " + url);
Loggers.RAFT.info("receive beat response from: {}", url);
return 0;
}
@Override
public void onThrowable(Throwable t) {
Loggers.RAFT.error("NACOS-RAFT {} {}", "error while sending heart-beat to peer: " + server, t);
Loggers.RAFT.error("NACOS-RAFT error while sending heart-beat to peer: {} {}", server, t);
}
});
} catch (Exception e) {
Loggers.RAFT.error("VIPSRV {} {}", "error while sending heart-beat to peer: " + server, e);
Loggers.RAFT.error("VIPSRV error while sending heart-beat to peer: {} {}", server, e);
}
}
@ -692,20 +692,21 @@ public class RaftCore {
remote.voteFor = beat.getJSONObject("peer").getString("voteFor");
if (remote.state != RaftPeer.State.LEADER) {
Loggers.RAFT.info("[RAFT] invalid state from master, state=" + remote.state + ", remote peer: " + JSON.toJSONString(remote));
throw new IllegalArgumentException("invalid state from master, state=" + remote.state);
Loggers.RAFT.info("[RAFT] invalid state from master, state: {}, remote peer: {}",
remote.state, JSON.toJSONString(remote));
throw new IllegalArgumentException("invalid state from master, state: " + remote.state);
}
if (local.term.get() > remote.term.get()) {
Loggers.RAFT.info("[RAFT] out of date beat, beat-from-term: " + remote.term.get()
+ ", beat-to-term: " + local.term.get() + ", remote peer: " + JSON.toJSONString(remote) + ", and leaderDueMs: " + local.leaderDueMs);
Loggers.RAFT.info("[RAFT] out of date beat, beat-from-term: {}, beat-to-term: {}, remote peer: {}, and leaderDueMs: {}"
, remote.term.get(), local.term.get(), JSON.toJSONString(remote), local.leaderDueMs);
throw new IllegalArgumentException("out of date beat, beat-from-term: " + remote.term.get()
+ ", beat-to-term: " + local.term.get());
}
if (local.state != RaftPeer.State.FOLLOWER) {
Loggers.RAFT.info("[RAFT] make remote as leader " + ", remote peer: " + JSON.toJSONString(remote));
Loggers.RAFT.info("[RAFT] make remote as leader, remote peer: {}", JSON.toJSONString(remote));
// mk follower
local.state = RaftPeer.State.FOLLOWER;
local.voteFor = remote.ip;
@ -727,8 +728,8 @@ public class RaftCore {
List<String> batch = new ArrayList<String>();
if (!Switch.isSendBeatOnly()) {
int processedCount = 0;
Loggers.RAFT.info("[RAFT] received beat with " + beatDatums.size() + " keys, RaftCore.datums' size is "
+ RaftCore.datums.size() + ", remote server: " + remote.ip + ", term: " + remote.term + ", local term: " + local.term);
Loggers.RAFT.info("[RAFT] received beat with {} keys, RaftCore.datums' size is {}, remote server: {}, term: {}, local term: {}",
beatDatums.size(), RaftCore.datums.size(), remote.ip, remote.term, local.term);
for (Object object : beatDatums) {
processedCount = processedCount + 1;
@ -773,8 +774,8 @@ public class RaftCore {
continue;
}
Loggers.RAFT.info("get datums from leader: " + getLeader().ip + " , batch size is " + batch.size() + ", processedCount is " + processedCount
+ ", datums' size is " + beatDatums.size() + ", RaftCore.datums' size is " + RaftCore.datums.size());
Loggers.RAFT.info("get datums from leader: {}, batch size is {}, processedCount is {}, datums' size is {}, RaftCore.datums' size is {}"
, getLeader().ip, batch.size(), processedCount, beatDatums.size(), RaftCore.datums.size());
// update datum entry
String url = buildURL(remote.ip, API_GET) + "?keys=" + URLEncoder.encode(keys, "UTF-8");
@ -795,8 +796,8 @@ public class RaftCore {
Datum oldDatum = RaftCore.getDatum(datum.key);
if (oldDatum != null && datum.timestamp.get() <= oldDatum.timestamp.get()) {
Loggers.RAFT.info("[NACOS-RAFT] timestamp is smaller than that of mine, key: " + datum.key
+ ",remote: " + datum.timestamp + ", local: " + oldDatum.timestamp);
Loggers.RAFT.info("[NACOS-RAFT] timestamp is smaller than that of mine, key: {}, remote: {}, local: {}",
datum.key, datum.timestamp, oldDatum.timestamp);
continue;
}
@ -819,12 +820,12 @@ public class RaftCore {
RaftStore.updateTerm(local.term.get());
}
Loggers.RAFT.info("data updated" + ", key=" + datum.key
+ ", timestamp=" + datum.timestamp + ",from " + JSON.toJSONString(remote) + ", local term: " + local.term);
Loggers.RAFT.info("data updated, key: {}, timestamp: {}, from {}, local term: {}",
datum.key, datum.timestamp, JSON.toJSONString(remote), local.term);
notifier.addTask(datum, Notifier.ApplyAction.CHANGE);
} catch (Throwable e) {
Loggers.RAFT.error("RAFT-BEAT", "failed to sync datum from leader, key: " + datum.key, e);
Loggers.RAFT.error("[RAFT-BEAT] failed to sync datum from leader, key: {} {}", datum.key, e);
} finally {
OPERATE_LOCK.unlock();
}
@ -837,7 +838,7 @@ public class RaftCore {
batch.clear();
} catch (Exception e) {
Loggers.RAFT.error("NACOS-RAFT {}", "failed to handle beat entry, key=" + datumKey);
Loggers.RAFT.error("[NACOS-RAFT] failed to handle beat entry, key: {}", datumKey);
}
}
@ -853,7 +854,7 @@ public class RaftCore {
try {
deleteDatum(deadKey);
} catch (Exception e) {
Loggers.RAFT.error("NACOS-RAFT {}", "failed to remove entry, key=" + deadKey, e);
Loggers.RAFT.error("[NACOS-RAFT] failed to remove entry, key={} {}", deadKey, e);
}
}
@ -884,13 +885,13 @@ public class RaftCore {
List<String> newServers = (List<String>) CollectionUtils.subtract(servers, oldServers);
if (!CollectionUtils.isEmpty(newServers)) {
peers.add(newServers);
Loggers.RAFT.info("server list is updated, new (" + newServers.size() + ") servers: " + newServers);
Loggers.RAFT.info("server list is updated, new: {} servers: {}", newServers.size(), newServers);
}
List<String> deadServers = (List<String>) CollectionUtils.subtract(oldServers, servers);
if (!CollectionUtils.isEmpty(deadServers)) {
peers.remove(deadServers);
Loggers.RAFT.info("server list is updated, dead (" + deadServers.size() + ") servers: " + deadServers);
Loggers.RAFT.info("server list is updated, dead: {}, servers: {}", deadServers.size(), deadServers);
}
} catch (Exception e) {
Loggers.RAFT.info("error while updating server list.", e);
@ -907,18 +908,18 @@ public class RaftCore {
for (RaftListener listener1 : listeners) {
if (listener1 instanceof VirtualClusterDomain) {
Loggers.RAFT.debug("listener in listeners: " + ((VirtualClusterDomain) listener1).getName());
Loggers.RAFT.debug("listener in listeners: {}", ((VirtualClusterDomain) listener1).getName());
}
}
if (listeners.contains(listener)) {
if (listener instanceof VirtualClusterDomain) {
Loggers.RAFT.info("add listener: " + ((VirtualClusterDomain) listener).getName());
Loggers.RAFT.info("add listener: {}", ((VirtualClusterDomain) listener).getName());
} else {
Loggers.RAFT.info("add listener for switch or domain meta. ");
}
} else {
Loggers.RAFT.error("NACOS-RAFT {}", "faild to add listener: " + JSON.toJSONString(listener));
Loggers.RAFT.error("[NACOS-RAFT] faild to add listener: {}", JSON.toJSONString(listener));
}
// if data present, notify immediately
for (Datum datum : datums.values()) {
@ -929,7 +930,7 @@ public class RaftCore {
try {
listener.onChange(datum.key, datum.value);
} catch (Exception e) {
Loggers.RAFT.error("NACOS-RAFT {}", "failed to notify listener", e);
Loggers.RAFT.error("NACOS-RAFT failed to notify listener", e);
}
}
}
@ -1005,7 +1006,7 @@ public class RaftCore {
RaftStore.delete(deleted);
}
notifier.addTask(deleted, Notifier.ApplyAction.DELETE);
Loggers.RAFT.info("datum deleted, key=" + key);
Loggers.RAFT.info("datum deleted, key: {}", key);
}
}
@ -1068,17 +1069,15 @@ public class RaftCore {
continue;
}
} catch (Throwable e) {
Loggers.RAFT.error("NACOS-RAFT {}", "error while notifying listener of key: "
+ datum.key, e);
Loggers.RAFT.error("[NACOS-RAFT] error while notifying listener of key: {} {}", datum.key, e);
}
}
if (Loggers.RAFT.isDebugEnabled()) {
Loggers.RAFT.debug("NACOS-RAFT {}", "datum change notified" +
", key: " + datum.key + "; listener count: " + count);
Loggers.RAFT.debug("[NACOS-RAFT] datum change notified, key: {}, listener count: {}", datum.key, count);
}
} catch (Throwable e) {
Loggers.RAFT.error("NACOS-RAFT {} {}", "Error while handling notifying task", e);
Loggers.RAFT.error("[NACOS-RAFT] Error while handling notifying task", e);
}
}
}

View File

@ -78,7 +78,7 @@ public class RaftStore {
RaftCore.setTerm(NumberUtils.toLong(RaftStore.meta.getProperty("term"), 0L));
}
Loggers.RAFT.info("finish loading all datums, size: " + RaftCore.datumSize() + " cost " + (System.currentTimeMillis() - start) + "ms.");
Loggers.RAFT.info("finish loading all datums, size: {} cost {} ms.", RaftCore.datumSize(), (System.currentTimeMillis() - start));
}
public synchronized static void load(String key) throws Exception {
@ -86,7 +86,7 @@ public class RaftStore {
// load data
for (File cache : listCaches()) {
if (!cache.isFile()) {
Loggers.RAFT.warn("warning: encountered directory in cache dir: " + cache.getAbsolutePath());
Loggers.RAFT.warn("warning: encountered directory in cache dir: {}", cache.getAbsolutePath());
}
if (!StringUtils.equals(decodeFileName(cache.getName()), key)) {
@ -95,7 +95,8 @@ public class RaftStore {
readDatum(cache);
}
Loggers.RAFT.info("finish loading datum, key: " + key + " cost " + (System.currentTimeMillis() - start) + "ms.");
Loggers.RAFT.info("finish loading datum, key: {} cost {} ms.",
key, (System.currentTimeMillis() - start));
}
public synchronized static void readDatum(File file) throws IOException {
@ -115,7 +116,7 @@ public class RaftStore {
Datum datum = JSON.parseObject(json, Datum.class);
RaftCore.addDatum(datum);
} catch (Exception e) {
Loggers.RAFT.warn("waning: failed to deserialize key: " + file.getName());
Loggers.RAFT.warn("waning: failed to deserialize key: {}", file.getName());
throw e;
} finally {
if (fc != null) {
@ -187,7 +188,7 @@ public class RaftStore {
public static void delete(Datum datum) {
File cacheFile = new File(CACHE_DIR + File.separator + encodeFileName(datum.key));
if (!cacheFile.delete()) {
Loggers.RAFT.error("RAFT-DELETE", "failed to delete datum: " + datum.key + ", value: " + datum.value);
Loggers.RAFT.error("[RAFT-DELETE] failed to delete datum: {}, value: {}", datum.key, datum.value);
throw new IllegalStateException("failed to delete datum: " + datum.key);
}
}

View File

@ -256,7 +256,7 @@ public class ApiCommands {
result.put("ips", ipArray);
} catch (Throwable e) {
Loggers.SRV_LOG.warn("VIPSRV-IP4DOM", "failed to call ip4Dom, caused " + e.getMessage());
Loggers.SRV_LOG.warn("[NACOS-IP4DOM] failed to call ip4Dom, caused ", e);
throw new IllegalArgumentException(e);
}
@ -297,7 +297,9 @@ public class ApiCommands {
clusterName = UtilsAndCommons.DEFAULT_CLUSTER_NAME;
}
Loggers.DEBUG_LOG.debug("[CLIENT-BEAT] full arguments: beat: " + clientBeat + ", serviceName:" + dom);
if (Loggers.DEBUG_LOG.isDebugEnabled()) {
Loggers.DEBUG_LOG.debug("[CLIENT-BEAT] full arguments: beat: {}, serviceName: {}", clientBeat, dom);
}
VirtualClusterDomain virtualClusterDomain = (VirtualClusterDomain) domainsManager.getDomain(namespaceId, dom);
Map<String, String[]> stringMap = new HashMap<>(16);
@ -310,7 +312,7 @@ public class ApiCommands {
//if domain does not exist, register it.
if (virtualClusterDomain == null) {
regDom(OverrideParameterRequestWrapper.buildRequest(request, stringMap));
Loggers.SRV_LOG.warn("dom not found, register it, dom:" + dom);
Loggers.SRV_LOG.warn("dom not found, register it, dom: {}", dom);
}
virtualClusterDomain = (VirtualClusterDomain) domainsManager.getDomain(namespaceId, dom);
@ -336,12 +338,12 @@ public class ApiCommands {
stringMap.put("json", Arrays.asList("true").toArray(new String[1]));
stringMap.put("dom", Arrays.asList(dom).toArray(new String[1]));
addIP4Dom(OverrideParameterRequestWrapper.buildRequest(request, stringMap));
Loggers.SRV_LOG.warn("ip not found, register it, dom:" + dom + ", ip:" + ipAddress);
Loggers.SRV_LOG.warn("ip not found, register it, dom: {}, ip: {}", dom, ipAddress);
}
if (!DistroMapper.responsible(dom)) {
String server = DistroMapper.mapSrv(dom);
Loggers.EVT_LOG.info("I'm not responsible for " + dom + ", proxy it to " + server);
Loggers.EVT_LOG.info("I'm not responsible for {}, proxy it to {}", dom, server);
Map<String, String> proxyParams = new HashMap<>(16);
for (Map.Entry<String, String[]> entry : request.getParameterMap().entrySet()) {
String key = entry.getKey();
@ -399,7 +401,7 @@ public class ApiCommands {
String serviceMetadataJson = WebUtils.optional(request, "serviceMetadata", StringUtils.EMPTY);
String clusterMetadataJson = WebUtils.optional(request, "clusterMetadata", StringUtils.EMPTY);
Loggers.SRV_LOG.info("[RESET-WEIGHT] " + String.valueOf(resetWeight));
Loggers.SRV_LOG.info("[RESET-WEIGHT] {}", String.valueOf(resetWeight));
VirtualClusterDomain domObj = new VirtualClusterDomain();
domObj.setName(dom);
@ -569,7 +571,7 @@ public class ApiCommands {
try {
regDom(request);
} catch (Exception e) {
Loggers.SRV_LOG.error("REG-SERIVCE", "register service failed, service:" + dom, e);
Loggers.SRV_LOG.error("[REG-SERIVCE] register service failed, service:" + dom, e);
}
}
});
@ -590,7 +592,7 @@ public class ApiCommands {
}
if (Loggers.SRV_LOG.isDebugEnabled()) {
Loggers.SRV_LOG.debug("reg-service {}", "add ip: " + dom + "|" + ipAddress.toJSON());
Loggers.SRV_LOG.debug("reg-service add ip: {}|{}", dom, ipAddress.toJSON());
}
Map<String, String[]> stringMap = new HashMap<>(16);
@ -838,15 +840,15 @@ public class ApiCommands {
long term = Long.parseLong(WebUtils.required(request, "term"));
if (!RaftCore.isLeader(clientIP)) {
Loggers.RAFT.warn("peer(" + JSON.toJSONString(clientIP) + ") tried to publish " +
"data but wasn't leader, leader: " + JSON.toJSONString(RaftCore.getLeader()));
Loggers.RAFT.warn("peer {} tried to publish data but wasn't leader, leader: {}",
JSON.toJSONString(clientIP), JSON.toJSONString(RaftCore.getLeader()));
throw new IllegalStateException("peer(" + clientIP + ") tried to publish " +
"data but wasn't leader");
}
if (term < RaftCore.getPeerSet().local().term.get()) {
Loggers.RAFT.warn("out of date publish, pub-term: "
+ JSON.toJSONString(clientIP) + ", cur-term: " + JSON.toJSONString(RaftCore.getPeerSet().local()));
Loggers.RAFT.warn("out of date publish, pub-term: {}, cur-term: {}",
JSON.toJSONString(clientIP), JSON.toJSONString(RaftCore.getPeerSet().local()));
throw new IllegalStateException("out of date publish, pub-term:"
+ term + ", cur-term: " + RaftCore.getPeerSet().local().term.get());
}
@ -912,7 +914,9 @@ public class ApiCommands {
proxyParams.put(entry.getKey(), entry.getValue()[0]);
}
Loggers.DEBUG_LOG.debug("[ADD-IP] full arguments:" + proxyParams);
if (Loggers.DEBUG_LOG.isDebugEnabled()) {
Loggers.DEBUG_LOG.debug("[ADD-IP] full arguments: {}", proxyParams);
}
String ipListString = WebUtils.required(request, "ipList");
final List<String> ipList;
@ -948,7 +952,7 @@ public class ApiCommands {
HttpClient.HttpResult result1 = HttpClient.httpPost(url, null, proxyParams);
if (result1.code != HttpURLConnection.HTTP_OK) {
Loggers.SRV_LOG.warn("failed to add ip for dom, caused " + result1.content);
Loggers.SRV_LOG.warn("failed to add ip for dom, caused {}", result1.content);
throw new IllegalArgumentException("failed to add ip for dom, caused " + result1.content);
}
@ -1033,15 +1037,15 @@ public class ApiCommands {
}
});
} catch (Exception e) {
Loggers.SRV_LOG.error("ADD-IP", "failed when publish to peer." + url, e);
Loggers.SRV_LOG.error("[ADD-IP] failed when publish to peer. " + url, e);
}
}
});
}
Loggers.EVT_LOG.info("{" + dom + "} {POS} {IP-ADD}" + " new: "
+ Arrays.toString(ipList.toArray()) + " operatorIP: "
+ WebUtils.optional(request, "clientIP", "unknown"));
Loggers.EVT_LOG.info("dom: {} {POS} {IP-ADD} new: {} operatorIP: {}",
dom, Arrays.toString(ipList.toArray()), WebUtils.optional(request, "clientIP", "unknown"));
} finally {
domainsManager.getDom2LockMap().get(UtilsAndCommons.assembleFullServiceName(namespaceId, dom)).unlock();
}
@ -1083,7 +1087,7 @@ public class ApiCommands {
cacheMillis = Switch.getPushCacheMillis(dom);
}
} catch (Exception e) {
Loggers.SRV_LOG.error("VIPSRV-API", "failed to added push client", e);
Loggers.SRV_LOG.error("[NACOS-API] failed to added push client", e);
cacheMillis = Switch.getCacheMillis(dom);
}
@ -1118,8 +1122,7 @@ public class ApiCommands {
if ((float) ipMap.get(Boolean.TRUE).size() / srvedIPs.size() <= threshold) {
Loggers.SRV_LOG.warn("protect threshold reached, return all ips, " +
"dom: " + dom);
Loggers.SRV_LOG.warn("protect threshold reached, return all ips, dom: {}", dom);
if (isCheck) {
result.put("reachProtectThreshold", true);
}
@ -1241,9 +1244,8 @@ public class ApiCommands {
domainsManager.easyRemvIP4Dom(namespaceId, dom, ipObjList);
Loggers.EVT_LOG.info("{" + dom + "} {POS} {IP-REMV}" + " dead: "
+ Arrays.toString(ipList.toArray()) + " operator: "
+ WebUtils.optional(request, "clientIP", "unknown"));
Loggers.EVT_LOG.info("dom: {} {POS} {IP-REMV} dead: {}operator: {}",
dom, Arrays.toString(ipList.toArray()), WebUtils.optional(request, "clientIP", "unknown"));
return "ok";
}
@ -1316,7 +1318,7 @@ public class ApiCommands {
if (datum != null) {
switchDomain = JSON.parseObject(datum.value, SwitchDomain.class);
} else {
Loggers.SRV_LOG.warn("datum: " + UtilsAndCommons.DOMAINS_DATA_ID + ".00-00---000-VIPSRV_SWITCH_DOMAIN-000---00-00 is null");
Loggers.SRV_LOG.warn("datum: {}.00-00---000-VIPSRV_SWITCH_DOMAIN-000---00-00 is null", UtilsAndCommons.DOMAINS_DATA_ID);
}
if (SwitchEntry.BATCH.equals(entry)) {
@ -1824,7 +1826,7 @@ public class ApiCommands {
cluster = getClusterFromJson(json);
} catch (Exception e) {
Loggers.SRV_LOG.warn("ADD-CLUSTER", "failed to parse json, try old format.");
Loggers.SRV_LOG.warn("[ADD-CLUSTER] failed to parse json, try old format.");
}
} else {
String cktype = WebUtils.optional(request, "cktype", "TCP");
@ -1968,7 +1970,7 @@ public class ApiCommands {
sb.append(ip).append("\r\n");
}
Loggers.SRV_LOG.info("[UPDATE-CLUSTER] new ips:" + sb.toString());
Loggers.SRV_LOG.info("[UPDATE-CLUSTER] new ips: {}", sb.toString());
writeClusterConf(sb.toString());
return result;
}
@ -1979,7 +1981,7 @@ public class ApiCommands {
for (String ip : ips.split(ipSpliter)) {
sb.append(ip).append("\r\n");
}
Loggers.SRV_LOG.info("[UPDATE-CLUSTER] new ips:" + sb.toString());
Loggers.SRV_LOG.info("[UPDATE-CLUSTER] new ips: {}", sb.toString());
writeClusterConf(sb.toString());
return result;
}
@ -2112,7 +2114,7 @@ public class ApiCommands {
try {
DomainsManager.DomainChecksum checksums = JSON.parseObject(domsStatusString, DomainsManager.DomainChecksum.class);
if (checksums == null) {
Loggers.SRV_LOG.warn("DOMAIN-STATUS", "receive malformed data: " + null);
Loggers.SRV_LOG.warn("[DOMAIN-STATUS] receive malformed data: null");
return "fail";
}
@ -2131,12 +2133,15 @@ public class ApiCommands {
domain.recalculateChecksum();
if (!checksum.equals(domain.getChecksum())) {
Loggers.SRV_LOG.debug("checksum of " + dom + " is not consistent, remote: " + serverIP + ",checksum: " + checksum + ", local: " + domain.getChecksum());
if (Loggers.SRV_LOG.isDebugEnabled()) {
Loggers.SRV_LOG.debug("checksum of {} is not consistent, remote: {}, checksum: {}, local: {}",
dom, serverIP, checksum, domain.getChecksum());
}
domainsManager.addUpdatedDom2Queue(checksums.namespaceId, dom, serverIP, checksum);
}
}
} catch (Exception e) {
Loggers.SRV_LOG.warn("DOMAIN-STATUS", "receive malformed data: " + domsStatusString, e);
Loggers.SRV_LOG.warn("[DOMAIN-STATUS] receive malformed data: " + domsStatusString, e);
}
return "ok";
@ -2151,8 +2156,8 @@ public class ApiCommands {
String port = WebUtils.required(request, "port");
String state = WebUtils.optional(request, "state", StringUtils.EMPTY);
Loggers.SRV_LOG.info("[CONTAINER_NOTFY] received notify event, type:" + type + ", domain:" + domain +
", ip:" + ip + ", port:" + port + ", state:" + state);
Loggers.SRV_LOG.info("[CONTAINER_NOTFY] received notify event, type: {}, domain: {}, ip: {}, port: {}, state: {}",
type, domain, ip, port, state);
return "ok";
}

View File

@ -66,7 +66,7 @@ public class DistroFilter implements Filter {
try {
resp.sendRedirect(url);
} catch (Exception ignore) {
Loggers.SRV_LOG.warn("DISTRO-FILTER", "request failed: " + url);
Loggers.SRV_LOG.warn("[DISTRO-FILTER] request failed: " + url);
}
return;
}
@ -102,7 +102,7 @@ public class DistroFilter implements Filter {
try {
resp.sendRedirect(url);
} catch (Exception ignore) {
Loggers.SRV_LOG.warn("DISTRO-FILTER", "request failed: " + url);
Loggers.SRV_LOG.warn("[DISTRO-FILTER] request failed: " + url);
}
}
}

View File

@ -1,50 +0,0 @@
/*
* Copyright 1999-2018 Alibaba Group Holding Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.nacos.naming.web;
import com.alibaba.nacos.api.common.Constants;
import com.alibaba.nacos.core.utils.WebUtils;
import com.alibaba.nacos.naming.misc.UtilsAndCommons;
import org.apache.commons.lang3.StringUtils;
import javax.servlet.*;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
/**
* A filter to intercept tenant parameter.
*
* @author <a href="mailto:zpf.073@gmail.com">nkorange</a>
* @since 0.8.0
*/
public class NamespaceFilter implements Filter {
@Override
public void init(FilterConfig filterConfig) throws ServletException {
}
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException {
chain.doFilter(request, response);
}
@Override
public void destroy() {
}
}

View File

@ -51,18 +51,6 @@ public class NamingConfig {
return registration;
}
@Bean
public FilterRegistrationBean tenantFilterRegistration() {
FilterRegistrationBean registration = new FilterRegistrationBean();
registration.setFilter(namespaceFilter());
registration.addUrlPatterns("/v1/ns/instance/*", "/v1/ns/service/*", "/v1/ns/cluster/*", "/v1/ns/health/*");
registration.setName("namespaceFilter");
registration.setOrder(4);
return registration;
}
@Bean
public Filter distroFilter() {
return new DistroFilter();
@ -73,8 +61,4 @@ public class NamingConfig {
return new AuthFilter();
}
@Bean
public Filter namespaceFilter() {
return new NamespaceFilter();
}
}