#603 Other files formatted

This commit is contained in:
nkorange 2019-01-10 18:53:18 +08:00
parent 7307108ca7
commit 791b7c3739
30 changed files with 298 additions and 369 deletions

View File

@ -42,8 +42,8 @@ public class RunningConfig implements ApplicationListener<WebServerInitializedEv
@Override @Override
public void onApplicationEvent(WebServerInitializedEvent event) { public void onApplicationEvent(WebServerInitializedEvent event) {
Loggers.SRV_LOG.info("[SERVER-INIT] got port:" + event.getWebServer().getPort()); Loggers.SRV_LOG.info("[SERVER-INIT] got port: {}", event.getWebServer().getPort());
Loggers.SRV_LOG.info("[SERVER-INIT] got path:" + servletContext.getContextPath()); Loggers.SRV_LOG.info("[SERVER-INIT] got path: {}", servletContext.getContextPath());
serverPort = event.getWebServer().getPort(); serverPort = event.getWebServer().getPort();
contextPath = servletContext.getContextPath(); contextPath = servletContext.getContextPath();
@ -51,7 +51,7 @@ public class RunningConfig implements ApplicationListener<WebServerInitializedEv
try { try {
RaftCore.init(); RaftCore.init();
} catch (Exception e) { } catch (Exception e) {
Loggers.RAFT.error("VIPSRV-RAFT", "failed to initialize raft sub system", e); Loggers.RAFT.error("[NACOS-RAFT] {} {}", "failed to initialize raft sub system", e);
} }
} }

View File

@ -65,11 +65,9 @@ public class ClusterController {
Cluster cluster = domain.getClusterMap().get(clusterName); Cluster cluster = domain.getClusterMap().get(clusterName);
if (cluster == null) { if (cluster == null) {
Loggers.SRV_LOG.warn("UPDATE-CLUSTER", "cluster not exist, will create it: " + clusterName + ", service:" + serviceName); Loggers.SRV_LOG.warn("[UPDATE-CLUSTER] cluster not exist, will create it: {}, service: {}", clusterName, serviceName);
cluster = new Cluster(); cluster = new Cluster();
cluster.setName(clusterName); cluster.setName(clusterName);
// throw new NacosException(NacosException.INVALID_PARAM, "cluster not found:"+ clusterName + ", " + serviceName);
} }
cluster.setDefCkport(NumberUtils.toInt(checkPort)); cluster.setDefCkport(NumberUtils.toInt(checkPort));

View File

@ -187,24 +187,22 @@ public class Cluster extends com.alibaba.nacos.api.naming.pojo.Cluster implement
} else { } else {
if (ip.isValid() != oldIP.isValid()) { if (ip.isValid() != oldIP.isValid()) {
// ip validation status updated // ip validation status updated
Loggers.EVT_LOG.info("{" + getDom().getName() + "} {SYNC} " + Loggers.EVT_LOG.info("{} {SYNC} IP-{} {}:{}@{}",
"{IP-" + (ip.isValid() ? "ENABLED" : "DISABLED") + "} " + ip.getIp() getDom().getName(), (ip.isValid() ? "ENABLED" : "DISABLED"), ip.getIp(), ip.getPort(), getName());
+ ":" + ip.getPort() + "@" + getName());
} }
} }
if (ip.getWeight() != oldIP.getWeight()) { if (ip.getWeight() != oldIP.getWeight()) {
// ip validation status updated // ip validation status updated
Loggers.EVT_LOG.info("{" + getDom().getName() + "} {SYNC} " + Loggers.EVT_LOG.info("{} {SYNC} {IP-UPDATED} {}->{}", getDom().getName(), oldIP.toString(), ip.toString());
"{IP-UPDATED} " + oldIP.toString() + "->" + ip.toString());
} }
} }
} }
List<IpAddress> newIPs = subtract(ips, oldIPMap.values()); List<IpAddress> newIPs = subtract(ips, oldIPMap.values());
if (newIPs.size() > 0) { if (newIPs.size() > 0) {
Loggers.EVT_LOG.info("{" + getDom().getName() + "} {SYNC} {IP-NEW} cluster: " + getName() Loggers.EVT_LOG.info("{} {SYNC} {IP-NEW} cluster: {}, new ips size: {}, content: {}",
+ ", new ips(" + newIPs.size() + "): " + newIPs.toString()); getDom().getName(), getName(), newIPs.size(), newIPs.toString());
for (IpAddress ip : newIPs) { for (IpAddress ip : newIPs) {
HealthCheckStatus.reset(ip); HealthCheckStatus.reset(ip);
@ -214,8 +212,8 @@ public class Cluster extends com.alibaba.nacos.api.naming.pojo.Cluster implement
List<IpAddress> deadIPs = subtract(oldIPMap.values(), ips); List<IpAddress> deadIPs = subtract(oldIPMap.values(), ips);
if (deadIPs.size() > 0) { if (deadIPs.size() > 0) {
Loggers.EVT_LOG.info("{" + getDom().getName() + "} {SYNC} {IP-DEAD} cluster: " + getName() Loggers.EVT_LOG.info("{} {SYNC} {IP-DEAD} cluster: {}, dead ips size: {}, content: {}",
+ ", dead ips(" + deadIPs.size() + "): " + deadIPs.toString()); getDom().getName(), getName(), deadIPs.size(), deadIPs.toString());
for (IpAddress ip : deadIPs) { for (IpAddress ip : deadIPs) {
HealthCheckStatus.remv(ip); HealthCheckStatus.remv(ip);
@ -333,32 +331,38 @@ public class Cluster extends com.alibaba.nacos.api.naming.pojo.Cluster implement
public void update(Cluster cluster) { public void update(Cluster cluster) {
if (!healthChecker.equals(cluster.getHealthChecker())) { if (!healthChecker.equals(cluster.getHealthChecker())) {
Loggers.SRV_LOG.info("[CLUSTER-UPDATE] " + cluster.getDom().getName() + ":" + cluster.getName() + ", healthChecker: " + healthChecker.toString() + " -> " + cluster.getHealthChecker().toString()); Loggers.SRV_LOG.info("[CLUSTER-UPDATE] {}:{}:, healthChecker: {} -> {}",
cluster.getDom().getName(), cluster.getName(), healthChecker.toString(), cluster.getHealthChecker().toString());
healthChecker = cluster.getHealthChecker(); healthChecker = cluster.getHealthChecker();
} }
if (defCkport != cluster.getDefCkport()) { if (defCkport != cluster.getDefCkport()) {
Loggers.SRV_LOG.info("[CLUSTER-UPDATE] " + cluster.getDom().getName() + ":" + cluster.getName() + ", defCkport: " + defCkport + " -> " + cluster.getDefCkport()); Loggers.SRV_LOG.info("[CLUSTER-UPDATE] {}:{}, defCkport: {} -> {}",
cluster.getDom().getName(), cluster.getName(), defCkport, cluster.getDefCkport());
defCkport = cluster.getDefCkport(); defCkport = cluster.getDefCkport();
} }
if (defIPPort != cluster.getDefIPPort()) { if (defIPPort != cluster.getDefIPPort()) {
Loggers.SRV_LOG.info("[CLUSTER-UPDATE] " + cluster.getDom().getName() + ":" + cluster.getName() + ", defIPPort: " + defIPPort + " -> " + cluster.getDefIPPort()); Loggers.SRV_LOG.info("[CLUSTER-UPDATE] {}:{}, defIPPort: {} -> {}",
cluster.getDom().getName(), cluster.getName(), defIPPort, cluster.getDefIPPort());
defIPPort = cluster.getDefIPPort(); defIPPort = cluster.getDefIPPort();
} }
if (!StringUtils.equals(submask, cluster.getSubmask())) { if (!StringUtils.equals(submask, cluster.getSubmask())) {
Loggers.SRV_LOG.info("[CLUSTER-UPDATE] " + cluster.getDom().getName() + ":" + cluster.getName() + ", submask: " + submask + " -> " + cluster.getSubmask()); Loggers.SRV_LOG.info("[CLUSTER-UPDATE] {}:{}, submask: {} -> {}",
cluster.getDom().getName(), cluster.getName(), submask, cluster.getSubmask());
submask = cluster.getSubmask(); submask = cluster.getSubmask();
} }
if (!StringUtils.equals(sitegroup, cluster.getSitegroup())) { if (!StringUtils.equals(sitegroup, cluster.getSitegroup())) {
Loggers.SRV_LOG.info("[CLUSTER-UPDATE] " + cluster.getDom().getName() + ":" + cluster.getName() + ", sitegroup: " + sitegroup + " -> " + cluster.getSitegroup()); Loggers.SRV_LOG.info("[CLUSTER-UPDATE] {}:{}, sitegroup: {} -> {}",
cluster.getDom().getName(), cluster.getName(), sitegroup, cluster.getSitegroup());
sitegroup = cluster.getSitegroup(); sitegroup = cluster.getSitegroup();
} }
if (isUseIPPort4Check() != cluster.isUseIPPort4Check()) { if (isUseIPPort4Check() != cluster.isUseIPPort4Check()) {
Loggers.SRV_LOG.info("[CLUSTER-UPDATE] " + cluster.getDom().getName() + ":" + cluster.getName() + ", useIPPort4Check: " + isUseIPPort4Check() + " -> " + cluster.isUseIPPort4Check()); Loggers.SRV_LOG.info("[CLUSTER-UPDATE] {}:{}, useIPPort4Check: {} -> {}",
cluster.getDom().getName(), cluster.getName(), isUseIPPort4Check(), cluster.isUseIPPort4Check());
setUseIPPort4Check(cluster.isUseIPPort4Check()); setUseIPPort4Check(cluster.isUseIPPort4Check());
} }
@ -387,9 +391,9 @@ public class Cluster extends com.alibaba.nacos.api.naming.pojo.Cluster implement
public boolean responsible(IpAddress ip) { public boolean responsible(IpAddress ip) {
return Switch.isHealthCheckEnabled(dom.getName()) return Switch.isHealthCheckEnabled(dom.getName())
&& !getHealthCheckTask().isCancelled() && !getHealthCheckTask().isCancelled()
&& DistroMapper.responsible(getDom().getName()) && DistroMapper.responsible(getDom().getName())
&& ipContains.containsKey(ip.toIPAddr()); && ipContains.containsKey(ip.toIPAddr());
} }
public void valid() { public void valid() {

View File

@ -21,8 +21,6 @@ import com.alibaba.nacos.naming.misc.*;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.CollectionUtils;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.text.SimpleDateFormat; import java.text.SimpleDateFormat;
import java.util.*; import java.util.*;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
@ -61,7 +59,7 @@ public class DistroMapper {
init(); init();
UtilsAndCommons.SERVER_STATUS_EXECUTOR.schedule(new ServerStatusReporter(), UtilsAndCommons.SERVER_STATUS_EXECUTOR.schedule(new ServerStatusReporter(),
60000, TimeUnit.MILLISECONDS); 60000, TimeUnit.MILLISECONDS);
} }
/** /**
@ -110,7 +108,7 @@ public class DistroMapper {
// site:ip:lastReportTime:weight // site:ip:lastReportTime:weight
String[] params = config.split("#"); String[] params = config.split("#");
if (params.length <= 3) { if (params.length <= 3) {
Loggers.SRV_LOG.warn("received malformed distro map data: " + config); Loggers.SRV_LOG.warn("received malformed distro map data: {}", config);
continue; continue;
} }
@ -159,10 +157,10 @@ public class DistroMapper {
float curRatio = (float) newHealthyList.size() / allSiteSrvs.size(); float curRatio = (float) newHealthyList.size() / allSiteSrvs.size();
if (AUTO_DISABLED_HEALTH_CHECK if (AUTO_DISABLED_HEALTH_CHECK
&& curRatio > Switch.getDistroThreshold() && curRatio > Switch.getDistroThreshold()
&& System.currentTimeMillis() - LAST_HEALTH_SERVER_MILLIS > STABLE_PERIOD) { && System.currentTimeMillis() - LAST_HEALTH_SERVER_MILLIS > STABLE_PERIOD) {
Loggers.SRV_LOG.info("[VIPSRV-DISTRO] distro threshold restored and " + Loggers.SRV_LOG.info("[VIPSRV-DISTRO] distro threshold restored and " +
"stable now, enable health check. current ratio: " + curRatio); "stable now, enable health check. current ratio: {}", curRatio);
Switch.setHeathCheckEnabled(true); Switch.setHeathCheckEnabled(true);
@ -174,7 +172,8 @@ public class DistroMapper {
// for every change disable healthy check for some while // for every change disable healthy check for some while
if (Switch.isHealthCheckEnabled()) { if (Switch.isHealthCheckEnabled()) {
Loggers.SRV_LOG.info("[VIPSRV-DISTRO] healthy server list changed, " + Loggers.SRV_LOG.info("[VIPSRV-DISTRO] healthy server list changed, " +
"disable health check for " + STABLE_PERIOD + "ms from now on, healthList: " + healthyList + ",newHealthyList " + newHealthyList); "disable health check for {} ms from now on, healthList: {}, newHealthyList {}",
STABLE_PERIOD, healthyList, newHealthyList);
Switch.setHeathCheckEnabled(false); Switch.setHeathCheckEnabled(false);
AUTO_DISABLED_HEALTH_CHECK = true; AUTO_DISABLED_HEALTH_CHECK = true;
@ -200,7 +199,7 @@ public class DistroMapper {
// site:ip:lastReportTime:weight // site:ip:lastReportTime:weight
String[] params = config.split("#"); String[] params = config.split("#");
if (params.length <= 3) { if (params.length <= 3) {
Loggers.SRV_LOG.warn("received malformed distro map data: " + config); Loggers.SRV_LOG.warn("received malformed distro map data: {}", config);
continue; continue;
} }
@ -232,8 +231,8 @@ public class DistroMapper {
if (serverId.equals(newServerId)) { if (serverId.equals(newServerId)) {
if (s.alive != server.alive || s.weight != server.weight) { if (s.alive != server.alive || s.weight != server.weight) {
Loggers.SRV_LOG.warn("server beat out of date, current: " + JSON.toJSONString(server) Loggers.SRV_LOG.warn("server beat out of date, current: {}, last: {}",
+ ", last: " + JSON.toJSONString(s)); JSON.toJSONString(server), JSON.toJSONString(s));
} }
tmpServerList.add(server); tmpServerList.add(server);
continue; continue;
@ -281,10 +280,10 @@ public class DistroMapper {
float curRatio = (float) newHealthyList.size() / allLocalSiteSrvs.size(); float curRatio = (float) newHealthyList.size() / allLocalSiteSrvs.size();
if (AUTO_DISABLED_HEALTH_CHECK if (AUTO_DISABLED_HEALTH_CHECK
&& curRatio > Switch.getDistroThreshold() && curRatio > Switch.getDistroThreshold()
&& System.currentTimeMillis() - LAST_HEALTH_SERVER_MILLIS > STABLE_PERIOD) { && System.currentTimeMillis() - LAST_HEALTH_SERVER_MILLIS > STABLE_PERIOD) {
Loggers.SRV_LOG.info("[VIPSRV-DISTRO] distro threshold restored and " + Loggers.SRV_LOG.info("[VIPSRV-DISTRO] distro threshold restored and " +
"stable now, enable health check. current ratio: " + curRatio); "stable now, enable health check. current ratio: {}", curRatio);
Switch.setHeathCheckEnabled(true); Switch.setHeathCheckEnabled(true);
@ -296,7 +295,7 @@ public class DistroMapper {
// for every change disable healthy check for some while // for every change disable healthy check for some while
if (Switch.isHealthCheckEnabled()) { if (Switch.isHealthCheckEnabled()) {
Loggers.SRV_LOG.info("[VIPSRV-DISTRO] healthy server list changed, " + Loggers.SRV_LOG.info("[VIPSRV-DISTRO] healthy server list changed, " +
"disable health check for " + STABLE_PERIOD + "ms from now on"); "disable health check for {} ms from now on", STABLE_PERIOD);
Switch.setHeathCheckEnabled(false); Switch.setHeathCheckEnabled(false);
AUTO_DISABLED_HEALTH_CHECK = true; AUTO_DISABLED_HEALTH_CHECK = true;
@ -402,7 +401,7 @@ public class DistroMapper {
try { try {
NamingProxy.reqAPI("distroStatus", params, serverIP, false); NamingProxy.reqAPI("distroStatus", params, serverIP, false);
} catch (Exception e) { } catch (Exception e) {
Loggers.SRV_LOG.warn("DISTRO-STATUS-CLEAN", "Failed to request to clean server status to " + serverIP, e); Loggers.SRV_LOG.warn("[DISTRO-STATUS-CLEAN] Failed to request to clean server status to " + serverIP, e);
} }
} }
@ -488,7 +487,7 @@ public class DistroMapper {
} }
if (!allServers.contains(localhostIP)) { if (!allServers.contains(localhostIP)) {
Loggers.SRV_LOG.error("NA", "local ip is not in serverlist, ip: " + localhostIP + ", serverlist: " + allServers); Loggers.SRV_LOG.error("local ip is not in serverlist, ip: {}, serverlist: {}", localhostIP, allServers);
return; return;
} }
@ -500,7 +499,7 @@ public class DistroMapper {
} }
} }
} catch (Exception e) { } catch (Exception e) {
Loggers.SRV_LOG.error("SERVER-STATUS", "Exception while sending server status: ", e); Loggers.SRV_LOG.error("[SERVER-STATUS] Exception while sending server status", e);
} finally { } finally {
UtilsAndCommons.SERVER_STATUS_EXECUTOR.schedule(this, Switch.getServerStatusSynchronizationPeriodMillis(), TimeUnit.MILLISECONDS); UtilsAndCommons.SERVER_STATUS_EXECUTOR.schedule(this, Switch.getServerStatusSynchronizationPeriodMillis(), TimeUnit.MILLISECONDS);
} }

View File

@ -93,18 +93,18 @@ public class DomainsManager {
try { try {
TimeUnit.SECONDS.sleep(5); TimeUnit.SECONDS.sleep(5);
} catch (InterruptedException e) { } catch (InterruptedException e) {
Loggers.SRV_LOG.error("AUTO-INIT", "failed to auto init", e); Loggers.SRV_LOG.error("[AUTO-INIT] failed to auto init", e);
} }
try { try {
leader = RaftCore.getPeerSet().getLeader(); leader = RaftCore.getPeerSet().getLeader();
if (leader != null) { if (leader != null) {
Loggers.SRV_LOG.info("[AUTO-INIT] leader is: " + leader.ip); Loggers.SRV_LOG.info("[AUTO-INIT] leader is: {}", leader.ip);
break; break;
} }
} catch (Throwable throwable) { } catch (Throwable throwable) {
Loggers.SRV_LOG.error("AUTO-INIT", "failed to auto init", throwable); Loggers.SRV_LOG.error("[AUTO-INIT] failed to auto init", throwable);
} }
} }
@ -118,7 +118,7 @@ public class DomainsManager {
} catch (Exception e) { } catch (Exception e) {
toBeUpdatedDomsQueue.poll(); toBeUpdatedDomsQueue.poll();
toBeUpdatedDomsQueue.add(new DomainKey(namespaceId, domName, serverIP, checksum)); toBeUpdatedDomsQueue.add(new DomainKey(namespaceId, domName, serverIP, checksum));
Loggers.SRV_LOG.error("DOMAIN-STATUS", "Failed to add domain to be updatd to queue.", e); Loggers.SRV_LOG.error("[DOMAIN-STATUS] Failed to add domain to be updatd to queue.", e);
} finally { } finally {
lock.unlock(); lock.unlock();
} }
@ -139,7 +139,7 @@ public class DomainsManager {
try { try {
domainKey = toBeUpdatedDomsQueue.take(); domainKey = toBeUpdatedDomsQueue.take();
} catch (Exception e) { } catch (Exception e) {
Loggers.EVT_LOG.error("UPDATE-DOMAIN", "Exception while taking item from LinkedBlockingDeque."); Loggers.EVT_LOG.error("[UPDATE-DOMAIN] Exception while taking item from LinkedBlockingDeque.");
} }
if (domainKey == null) { if (domainKey == null) {
@ -152,7 +152,7 @@ public class DomainsManager {
domainUpdateExecutor.execute(new DomUpdater(domainKey.getNamespaceId(), domName, serverIP)); domainUpdateExecutor.execute(new DomUpdater(domainKey.getNamespaceId(), domName, serverIP));
} }
} catch (Exception e) { } catch (Exception e) {
Loggers.EVT_LOG.error("UPDATE-DOMAIN", "Exception while update dom: " + domName + "from " + serverIP, e); Loggers.EVT_LOG.error("[UPDATE-DOMAIN] Exception while update dom: {} from {}, error: {}", domName, serverIP, e);
} }
} }
} }
@ -174,7 +174,8 @@ public class DomainsManager {
try { try {
updatedDom2(namespaceId, domName, serverIP); updatedDom2(namespaceId, domName, serverIP);
} catch (Exception e) { } catch (Exception e) {
Loggers.SRV_LOG.warn("DOMAIN-UPDATER", "Exception while update dom: " + domName + "from " + serverIP, e); Loggers.SRV_LOG.warn("[DOMAIN-UPDATER] Exception while update dom: {} from {}, error: {}",
domName, serverIP, e);
} }
} }
} }
@ -204,9 +205,9 @@ public class DomainsManager {
Boolean valid = Boolean.parseBoolean(ipsMap.get(ipAddress.toIPAddr())); Boolean valid = Boolean.parseBoolean(ipsMap.get(ipAddress.toIPAddr()));
if (valid != ipAddress.isValid()) { if (valid != ipAddress.isValid()) {
ipAddress.setValid(valid); ipAddress.setValid(valid);
Loggers.EVT_LOG.info("{" + domName + "} {SYNC} " + Loggers.EVT_LOG.info("{} {SYNC} IP-{} : {}@{}",
"{IP-" + (ipAddress.isValid() ? "ENABLED" : "DISABLED") + "} " + ipAddress.getIp() domName, (ipAddress.isValid() ? "ENABLED" : "DISABLED"),
+ ":" + ipAddress.getPort() + "@" + ipAddress.getClusterName()); ipAddress.getIp(), ipAddress.getPort(), ipAddress.getClusterName());
} }
} }
@ -217,7 +218,7 @@ public class DomainsManager {
stringBuilder.append(ipAddress.toIPAddr()).append("_").append(ipAddress.isValid()).append(","); stringBuilder.append(ipAddress.toIPAddr()).append("_").append(ipAddress.isValid()).append(",");
} }
Loggers.EVT_LOG.info("[IP-UPDATED] dom: " + raftVirtualClusterDomain.getName() + ", ips: " + stringBuilder.toString()); Loggers.EVT_LOG.info("[IP-UPDATED] dom: {}, ips: {}", raftVirtualClusterDomain.getName(), stringBuilder.toString());
} }
@ -257,7 +258,7 @@ public class DomainsManager {
for (String namespaceId : serviceMap.keySet()) { for (String namespaceId : serviceMap.keySet()) {
for (Map.Entry<String, Domain> entry : serviceMap.get(namespaceId).entrySet()) { for (Map.Entry<String, Domain> entry : serviceMap.get(namespaceId).entrySet()) {
if (DistroMapper.responsible(entry.getKey())) { if (DistroMapper.responsible(entry.getKey())) {
domCount ++; domCount++;
} }
} }
} }
@ -329,8 +330,8 @@ public class DomainsManager {
Cluster cluster = new Cluster(ipAddress.getClusterName()); Cluster cluster = new Cluster(ipAddress.getClusterName());
cluster.setDom(dom); cluster.setDom(dom);
dom.getClusterMap().put(ipAddress.getClusterName(), cluster); dom.getClusterMap().put(ipAddress.getClusterName(), cluster);
Loggers.SRV_LOG.warn("cluster: " + ipAddress.getClusterName() + " not found, ip: " + ipAddress.toJSON() Loggers.SRV_LOG.warn("cluster: {} not found, ip: {}, will create new cluster with default configuration.",
+ ", will create new cluster with default configuration."); ipAddress.getClusterName(), ipAddress.toJSON());
} }
ipAddressMap.put(ipAddress.getDatumKey(), ipAddress); ipAddressMap.put(ipAddress.getDatumKey(), ipAddress);
@ -374,7 +375,7 @@ public class DomainsManager {
} }
} }
} catch (Throwable throwable) { } catch (Throwable throwable) {
Loggers.RAFT.error("NA", "error while processing json: " + oldJson, throwable); Loggers.RAFT.error("error while processing json: " + oldJson, throwable);
} finally { } finally {
if (ipAddresses == null) { if (ipAddresses == null) {
ipAddresses = new ArrayList<>(); ipAddresses = new ArrayList<>();
@ -517,7 +518,8 @@ public class DomainsManager {
public void addItem(String domName, String checksum) { public void addItem(String domName, String checksum) {
if (StringUtils.isEmpty(domName) || StringUtils.isEmpty(checksum)) { if (StringUtils.isEmpty(domName) || StringUtils.isEmpty(checksum)) {
Loggers.SRV_LOG.warn("DOMAIN-CHECKSUM", "domName or checksum is empty,domName: " + domName + " checksum: " + checksum); Loggers.SRV_LOG.warn("[DOMAIN-CHECKSUM] domName or checksum is empty,domName: {}, checksum: {}",
domName, checksum);
return; return;
} }
@ -531,7 +533,6 @@ public class DomainsManager {
public void run() { public void run() {
try { try {
Map<String, Set<String>> allDomainNames = getAllDomNames(); Map<String, Set<String>> allDomainNames = getAllDomNames();
if (allDomainNames.size() <= 0) { if (allDomainNames.size() <= 0) {
@ -577,7 +578,7 @@ public class DomainsManager {
} }
} }
} catch (Exception e) { } catch (Exception e) {
Loggers.SRV_LOG.error("DOMAIN-STATUS", "Exception while sending domain status: ", e); Loggers.SRV_LOG.error("[DOMAIN-STATUS] Exception while sending domain status", e);
} finally { } finally {
UtilsAndCommons.DOMAIN_SYNCHRONIZATION_EXECUTOR.schedule(this, Switch.getDomStatusSynchronizationPeriodMillis(), TimeUnit.MILLISECONDS); UtilsAndCommons.DOMAIN_SYNCHRONIZATION_EXECUTOR.schedule(this, Switch.getDomStatusSynchronizationPeriodMillis(), TimeUnit.MILLISECONDS);
} }
@ -623,7 +624,7 @@ public class DomainsManager {
public void onChange(String key, String value) throws Exception { public void onChange(String key, String value) throws Exception {
try { try {
if (StringUtils.isEmpty(value)) { if (StringUtils.isEmpty(value)) {
Loggers.SRV_LOG.warn("received empty push from raft, key=" + key); Loggers.SRV_LOG.warn("received empty push from raft, key: {}", key);
return; return;
} }
@ -636,7 +637,7 @@ public class DomainsManager {
dom.setNamespaceId(UtilsAndCommons.getDefaultNamespaceId()); dom.setNamespaceId(UtilsAndCommons.getDefaultNamespaceId());
} }
Loggers.RAFT.info("[RAFT-NOTIFIER] datum is changed, key:" + key + ", value:" + value); Loggers.RAFT.info("[RAFT-NOTIFIER] datum is changed, key: {}, value: {}", key, value);
Domain oldDom = getDomain(dom.getNamespaceId(), dom.getName()); Domain oldDom = getDomain(dom.getNamespaceId(), dom.getName());
@ -648,13 +649,13 @@ public class DomainsManager {
putDomain(dom); putDomain(dom);
dom.init(); dom.init();
Loggers.SRV_LOG.info("[NEW-DOM-raft] " + dom.toJSON()); Loggers.SRV_LOG.info("[NEW-DOM-RAFT] {}", dom.toJSON());
} }
wakeUp(UtilsAndCommons.assembleFullServiceName(dom.getNamespaceId(), dom.getName())); wakeUp(UtilsAndCommons.assembleFullServiceName(dom.getNamespaceId(), dom.getName()));
} catch (Throwable e) { } catch (Throwable e) {
Loggers.SRV_LOG.error("VIPSRV-DOM", "error while processing dom update", e); Loggers.SRV_LOG.error("[NACOS-DOM] error while processing dom update", e);
} }
} }
@ -664,11 +665,11 @@ public class DomainsManager {
String namespace = domKey.split(UtilsAndCommons.SERVICE_GROUP_CONNECTOR)[0]; String namespace = domKey.split(UtilsAndCommons.SERVICE_GROUP_CONNECTOR)[0];
String name = domKey.split(UtilsAndCommons.SERVICE_GROUP_CONNECTOR)[1]; String name = domKey.split(UtilsAndCommons.SERVICE_GROUP_CONNECTOR)[1];
Domain dom = chooseDomMap(namespace).remove(name); Domain dom = chooseDomMap(namespace).remove(name);
Loggers.RAFT.info("[RAFT-NOTIFIER] datum is deleted, key:" + key + ", value:" + value); Loggers.RAFT.info("[RAFT-NOTIFIER] datum is deleted, key: {}, value: {}", key, value);
if (dom != null) { if (dom != null) {
dom.destroy(); dom.destroy();
Loggers.SRV_LOG.info("[DEAD-DOM] " + dom.toJSON()); Loggers.SRV_LOG.info("[DEAD-DOM] {}", dom.toJSON());
} }
} }
}; };

View File

@ -301,7 +301,7 @@ public class IpAddress extends Instance implements Comparable {
@Override @Override
public int compareTo(Object o) { public int compareTo(Object o) {
if (!(o instanceof IpAddress)) { if (!(o instanceof IpAddress)) {
Loggers.SRV_LOG.error("IPADDRESS-COMPARE", "Object is not an instance of IPAdress,object: " + o.getClass()); Loggers.SRV_LOG.error("[IPADDRESS-COMPARE] Object is not an instance of IPAdress, object: {}", o.getClass());
throw new IllegalArgumentException("Object is not an instance of IPAdress,object: " + o.getClass()); throw new IllegalArgumentException("Object is not an instance of IPAdress,object: " + o.getClass());
} }

View File

@ -68,7 +68,7 @@ public class VirtualClusterDomain implements Domain, RaftListener {
private String namespaceId; private String namespaceId;
/** /**
* IP will be deleted if it has not send beat for some time, default timeout is half an hour . * IP will be deleted if it has not send beat for some time, default timeout is 30 seconds.
*/ */
private long ipDeleteTimeout = 30 * 1000; private long ipDeleteTimeout = 30 * 1000;
@ -172,11 +172,11 @@ public class VirtualClusterDomain implements Domain, RaftListener {
public void onChange(String key, String value) throws Exception { public void onChange(String key, String value) throws Exception {
if (StringUtils.isEmpty(value)) { if (StringUtils.isEmpty(value)) {
Loggers.SRV_LOG.warn("[VIPSRV-DOM] received empty iplist config for dom: " + name); Loggers.SRV_LOG.warn("[NACOS-DOM] received empty iplist config for dom: {}", name);
return; return;
} }
Loggers.RAFT.info("[VIPSRV-RAFT] datum is changed, key: " + key + ", value: " + value); Loggers.RAFT.info("[NACOS-RAFT] datum is changed, key: {}, value: {}", key, value);
List<IpAddress> ips = JSON.parseObject(value, new TypeReference<List<IpAddress>>() { List<IpAddress> ips = JSON.parseObject(value, new TypeReference<List<IpAddress>>() {
}); });
@ -214,7 +214,7 @@ public class VirtualClusterDomain implements Domain, RaftListener {
for (IpAddress ip : ips) { for (IpAddress ip : ips) {
try { try {
if (ip == null) { if (ip == null) {
Loggers.SRV_LOG.error("VIPSRV-DOM", "received malformed ip"); Loggers.SRV_LOG.error("[NACOS-DOM] received malformed ip: null");
continue; continue;
} }
@ -224,7 +224,7 @@ public class VirtualClusterDomain implements Domain, RaftListener {
// put wild ip into DEFAULT cluster // put wild ip into DEFAULT cluster
if (!clusterMap.containsKey(ip.getClusterName())) { if (!clusterMap.containsKey(ip.getClusterName())) {
Loggers.SRV_LOG.warn("cluster of IP not found: " + ip.toJSON()); Loggers.SRV_LOG.warn("cluster of IP not found: {}", ip.toJSON());
continue; continue;
} }
@ -236,7 +236,7 @@ public class VirtualClusterDomain implements Domain, RaftListener {
clusterIPs.add(ip); clusterIPs.add(ip);
} catch (Exception e) { } catch (Exception e) {
Loggers.SRV_LOG.error("VIPSRV-DOM", "failed to process ip: " + ip, e); Loggers.SRV_LOG.error("[NACOS-DOM] failed to process ip: " + ip, e);
} }
} }
@ -253,7 +253,7 @@ public class VirtualClusterDomain implements Domain, RaftListener {
stringBuilder.append(ipAddress.toIPAddr()).append("_").append(ipAddress.isValid()).append(","); stringBuilder.append(ipAddress.toIPAddr()).append("_").append(ipAddress.isValid()).append(",");
} }
Loggers.EVT_LOG.info("[IP-UPDATED] dom: " + getName() + ", ips: " + stringBuilder.toString()); Loggers.EVT_LOG.info("[IP-UPDATED] dom: {}, ips: {}", getName(), stringBuilder.toString());
} }
@ -346,7 +346,7 @@ public class VirtualClusterDomain implements Domain, RaftListener {
return vDom; return vDom;
} catch (Exception e) { } catch (Exception e) {
Loggers.SRV_LOG.error("VIPSRV-DOM", "parse cluster json error, " + e.toString() + ", content=" + json, e); Loggers.SRV_LOG.error("[NACOS-DOM] parse cluster json content: {}, error: {}", json, e);
return null; return null;
} }
} }
@ -470,42 +470,42 @@ public class VirtualClusterDomain implements Domain, RaftListener {
VirtualClusterDomain vDom = (VirtualClusterDomain) dom; VirtualClusterDomain vDom = (VirtualClusterDomain) dom;
if (!StringUtils.equals(token, vDom.getToken())) { if (!StringUtils.equals(token, vDom.getToken())) {
Loggers.SRV_LOG.info("[DOM-UPDATE] dom: " + name + ",token" + token + " -> " + vDom.getToken()); Loggers.SRV_LOG.info("[DOM-UPDATE] dom: {}, token: {} -> {}", name, token, vDom.getToken());
token = vDom.getToken(); token = vDom.getToken();
} }
if (!ListUtils.isEqualList(owners, vDom.getOwners())) { if (!ListUtils.isEqualList(owners, vDom.getOwners())) {
Loggers.SRV_LOG.info("[DOM-UPDATE] dom: " + name + ",owners: " + owners + " -> " + vDom.getToken()); Loggers.SRV_LOG.info("[DOM-UPDATE] dom: {}, owners: {} -> {}", name, owners, vDom.getOwners());
owners = vDom.getOwners(); owners = vDom.getOwners();
} }
if (protectThreshold != vDom.getProtectThreshold()) { if (protectThreshold != vDom.getProtectThreshold()) {
Loggers.SRV_LOG.info("[DOM-UPDATE] dom: " + name + ",protectThreshold: " + protectThreshold + " -> " + vDom.getProtectThreshold()); Loggers.SRV_LOG.info("[DOM-UPDATE] dom: {}, protectThreshold: {} -> {}", name, protectThreshold, vDom.getProtectThreshold());
protectThreshold = vDom.getProtectThreshold(); protectThreshold = vDom.getProtectThreshold();
} }
if (useSpecifiedURL != vDom.isUseSpecifiedURL()) { if (useSpecifiedURL != vDom.isUseSpecifiedURL()) {
Loggers.SRV_LOG.info("[DOM-UPDATE] dom: " + name + ",useSpecifiedURL: " + useSpecifiedURL + " -> " + vDom.isUseSpecifiedURL()); Loggers.SRV_LOG.info("[DOM-UPDATE] dom: {}, useSpecifiedURL: {} -> {}", name, useSpecifiedURL, vDom.isUseSpecifiedURL());
useSpecifiedURL = vDom.isUseSpecifiedURL(); useSpecifiedURL = vDom.isUseSpecifiedURL();
} }
if (resetWeight != vDom.getResetWeight().booleanValue()) { if (resetWeight != vDom.getResetWeight().booleanValue()) {
Loggers.SRV_LOG.info("[DOM-UPDATE] dom: " + name + ",resetWeight: " + resetWeight + " -> " + vDom.getResetWeight()); Loggers.SRV_LOG.info("[DOM-UPDATE] dom: {}, resetWeight: {} -> {}", name, resetWeight, vDom.getResetWeight());
resetWeight = vDom.getResetWeight(); resetWeight = vDom.getResetWeight();
} }
if (enableHealthCheck != vDom.getEnableHealthCheck().booleanValue()) { if (enableHealthCheck != vDom.getEnableHealthCheck().booleanValue()) {
Loggers.SRV_LOG.info("[DOM-UPDATE] dom: " + name + ", enableHealthCheck: " + enableHealthCheck + " -> " + vDom.getEnableHealthCheck()); Loggers.SRV_LOG.info("[DOM-UPDATE] dom: {}, enableHealthCheck: {} -> {}", name, enableHealthCheck, vDom.getEnableHealthCheck());
enableHealthCheck = vDom.getEnableHealthCheck(); enableHealthCheck = vDom.getEnableHealthCheck();
} }
if (enableClientBeat != vDom.getEnableClientBeat().booleanValue()) { if (enableClientBeat != vDom.getEnableClientBeat().booleanValue()) {
Loggers.SRV_LOG.info("[DOM-UPDATE] dom: " + name + ", enableClientBeat: " + enableClientBeat + " -> " + vDom.getEnableClientBeat()); Loggers.SRV_LOG.info("[DOM-UPDATE] dom: {}, enableClientBeat: {} -> {}", name, enableClientBeat, vDom.getEnableClientBeat());
enableClientBeat = vDom.getEnableClientBeat(); enableClientBeat = vDom.getEnableClientBeat();
} }
if (enabled != vDom.getEnabled().booleanValue()) { if (enabled != vDom.getEnabled().booleanValue()) {
Loggers.SRV_LOG.info("[DOM-UPDATE] dom: " + name + ", enabled: " + enabled + " -> " + vDom.getEnabled()); Loggers.SRV_LOG.info("[DOM-UPDATE] dom: {}, enabled: {} -> {}", name, enabled, vDom.getEnabled());
enabled = vDom.getEnabled(); enabled = vDom.getEnabled();
} }
@ -552,13 +552,13 @@ public class VirtualClusterDomain implements Domain, RaftListener {
MessageDigest md5 = MessageDigest.getInstance("MD5"); MessageDigest md5 = MessageDigest.getInstance("MD5");
result = new BigInteger(1, md5.digest((ipsString.toString()).getBytes(Charset.forName("UTF-8")))).toString(16); result = new BigInteger(1, md5.digest((ipsString.toString()).getBytes(Charset.forName("UTF-8")))).toString(16);
} catch (Exception e) { } catch (Exception e) {
Loggers.SRV_LOG.error("VIPSRV-DOM", "error while calculating checksum(md5)", e); Loggers.SRV_LOG.error("[NACOS-DOM] error while calculating checksum(md5)", e);
result = RandomStringUtils.randomAscii(32); result = RandomStringUtils.randomAscii(32);
} }
checksum = result; checksum = result;
} catch (Exception e) { } catch (Exception e) {
Loggers.SRV_LOG.error("VIPSRV-DOM", "error while calculating checksum(md5)", e); Loggers.SRV_LOG.error("[NACOS-DOM] error while calculating checksum(md5)", e);
checksum = RandomStringUtils.randomAscii(32); checksum = RandomStringUtils.randomAscii(32);
} }
} }

View File

@ -76,7 +76,7 @@ public abstract class AbstractHealthCheckProcessor {
} }
if (!healthCheckResults.offer(result)) { if (!healthCheckResults.offer(result)) {
Loggers.EVT_LOG.warn("HEALTH-CHECK-SYNC", "failed to add check result to queue, queue size: " + healthCheckResults.size()); Loggers.EVT_LOG.warn("[HEALTH-CHECK-SYNC] failed to add check result to queue, queue size: {}", healthCheckResults.size());
} }
} }
@ -110,17 +110,20 @@ public abstract class AbstractHealthCheckProcessor {
} }
Map<String, String> params = new HashMap<>(10); Map<String, String> params = new HashMap<>(10);
params.put("result", JSON.toJSONString(list)); params.put("result", JSON.toJSONString(list));
Loggers.DEBUG_LOG.debug("[HEALTH-SYNC]" + server + ", " + JSON.toJSONString(list)); if (Loggers.DEBUG_LOG.isDebugEnabled()) {
Loggers.DEBUG_LOG.debug("[HEALTH-SYNC] server: {}, healthCheckResults: {}",
server, JSON.toJSONString(list));
}
if (!server.contains(":")) { if (!server.contains(":")) {
server = server + ":" + RunningConfig.getServerPort(); server = server + ":" + RunningConfig.getServerPort();
} }
HttpClient.HttpResult httpResult = HttpClient.httpPost("http://" + server HttpClient.HttpResult httpResult = HttpClient.httpPost("http://" + server
+ RunningConfig.getContextPath() + UtilsAndCommons.NACOS_NAMING_CONTEXT + RunningConfig.getContextPath() + UtilsAndCommons.NACOS_NAMING_CONTEXT
+ "/api/healthCheckResult", null, params); + "/api/healthCheckResult", null, params);
if (httpResult.code != HttpURLConnection.HTTP_OK) { if (httpResult.code != HttpURLConnection.HTTP_OK) {
Loggers.EVT_LOG.warn("HEALTH-CHECK-SYNC", "failed to send result to " + server Loggers.EVT_LOG.warn("[HEALTH-CHECK-SYNC] failed to send result to {}, result: {}",
+ ", result: " + JSON.toJSONString(list)); server, JSON.toJSONString(list));
} }
} }
@ -215,25 +218,22 @@ public abstract class AbstractHealthCheckProcessor {
PushService.domChanged(vDom.getNamespaceId(), vDom.getName()); PushService.domChanged(vDom.getNamespaceId(), vDom.getName());
addResult(new HealthCheckResult(vDom.getName(), ip)); addResult(new HealthCheckResult(vDom.getName(), ip));
Loggers.EVT_LOG.info("{" + cluster.getDom().getName() + "} {POS} {IP-ENABLED} valid: " Loggers.EVT_LOG.info("dom: {} {POS} {IP-ENABLED} valid: {}:{}@{}, region: {}, msg: {}",
+ ip.getIp() + ":" + ip.getPort() + "@" + cluster.getName() cluster.getDom().getName(), ip.getIp(), ip.getPort(), cluster.getName(), DistroMapper.LOCALHOST_SITE, msg);
+ ", region: " + DistroMapper.LOCALHOST_SITE + ", msg: " + msg);
} else { } else {
if (!ip.isMockValid()) { if (!ip.isMockValid()) {
ip.setMockValid(true); ip.setMockValid(true);
Loggers.EVT_LOG.info("{" + cluster.getDom().getName() + "} {PROBE} {IP-ENABLED} valid: " Loggers.EVT_LOG.info("dom: {} {PROBE} {IP-ENABLED} valid: {}:{}@{}, region: {}, msg: {}",
+ ip.getIp() + ":" + ip.getPort() + "@" + cluster.getName() cluster.getDom().getName(), ip.getIp(), ip.getPort(), cluster.getName(), DistroMapper.LOCALHOST_SITE, msg);
+ ", region: " + DistroMapper.LOCALHOST_SITE + ", msg: " + msg);
} }
} }
} else { } else {
Loggers.EVT_LOG.info("{" + cluster.getDom().getName() + "} {OTHER} " + Loggers.EVT_LOG.info("dom: {} {OTHER} {IP-ENABLED} pre-valid: {}:{}@{} in {}, msg: {}",
"{IP-ENABLED} pre-valid: " + ip.getIp() + ":" + ip.getPort() + "@" cluster.getDom().getName(), ip.getIp(), ip.getPort(), cluster.getName(), ip.getOKCount(), msg);
+ cluster.getName() + " in " + ip.getOKCount() + ", msg: " + msg);
} }
} }
} catch (Throwable t) { } catch (Throwable t) {
Loggers.SRV_LOG.error("CHECK-OK", "error when close check task.", t); Loggers.SRV_LOG.error("[CHECK-OK] error when close check task.", t);
} }
ip.getFailCount().set(0); ip.getFailCount().set(0);
@ -256,23 +256,20 @@ public abstract class AbstractHealthCheckProcessor {
PushService.domChanged(vDom.getNamespaceId(), vDom.getName()); PushService.domChanged(vDom.getNamespaceId(), vDom.getName());
Loggers.EVT_LOG.info("{" + cluster.getDom().getName() + "} {POS} {IP-DISABLED} invalid: " Loggers.EVT_LOG.info("dom: {} {POS} {IP-DISABLED} invalid: {}:{}@{}, region: {}, msg: {}",
+ ip.getIp() + ":" + ip.getPort() + "@" + cluster.getName() cluster.getDom().getName(), ip.getIp(), ip.getPort(), cluster.getName(), DistroMapper.LOCALHOST_SITE, msg);
+ ", region: " + DistroMapper.LOCALHOST_SITE + ", msg: " + msg);
} else { } else {
Loggers.EVT_LOG.info("{" + cluster.getDom().getName() + "} {PROBE} {IP-DISABLED} invalid: " Loggers.EVT_LOG.info("dom: {} {PROBE} {IP-DISABLED} invalid: {}:{}@{}, region: {}, msg: {}",
+ ip.getIp() + ":" + ip.getPort() + "@" + cluster.getName() cluster.getDom().getName(), ip.getIp(), ip.getPort(), cluster.getName(), DistroMapper.LOCALHOST_SITE, msg);
+ ", region: " + DistroMapper.LOCALHOST_SITE + ", msg: " + msg);
} }
} else { } else {
Loggers.EVT_LOG.info("{" + cluster.getDom().getName() + "} {OTHER} " + Loggers.EVT_LOG.info("dom: {} {OTHER} {IP-DISABLED} pre-invalid: {}:{}@{} in {}, msg: {}",
"{IP-DISABLED} pre-invalid: " + ip.getIp() + ":" + ip.getPort() cluster.getDom().getName(), ip.getIp(), ip.getPort(), cluster.getName(), ip.getFailCount(), msg);
+ "@" + cluster.getName() + " in " + ip.getFailCount() + ", msg: " + msg);
} }
} }
} catch (Throwable t) { } catch (Throwable t) {
Loggers.SRV_LOG.error("CHECK-FAIL", "error when close check task.", t); Loggers.SRV_LOG.error("[CHECK-FAIL] error when close check task.", t);
} }
ip.getOKCount().set(0); ip.getOKCount().set(0);
@ -294,21 +291,19 @@ public abstract class AbstractHealthCheckProcessor {
PushService.domChanged(vDom.getNamespaceId(), vDom.getName()); PushService.domChanged(vDom.getNamespaceId(), vDom.getName());
addResult(new HealthCheckResult(vDom.getName(), ip)); addResult(new HealthCheckResult(vDom.getName(), ip));
Loggers.EVT_LOG.info("{" + cluster.getDom().getName() + "} {POS} {IP-DISABLED} invalid-now: " Loggers.EVT_LOG.info("dom: {} {POS} {IP-DISABLED} invalid-now: {}:{}@{}, region: {}, msg: {}",
+ ip.getIp() + ":" + ip.getPort() + "@" + cluster.getName() cluster.getDom().getName(), ip.getIp(), ip.getPort(), cluster.getName(), DistroMapper.LOCALHOST_SITE, msg);
+ ", region: " + DistroMapper.LOCALHOST_SITE + ", msg: " + msg);
} else { } else {
if (ip.isMockValid()) { if (ip.isMockValid()) {
ip.setMockValid(false); ip.setMockValid(false);
Loggers.EVT_LOG.info("{" + cluster.getDom().getName() + "} {PROBE} {IP-DISABLED} invalid-now: " Loggers.EVT_LOG.info("dom: {} {PROBE} {IP-DISABLED} invalid-now: {}:{}@{}, region: {}, msg: {}",
+ ip.getIp() + ":" + ip.getPort() + "@" + cluster.getName() cluster.getDom().getName(), ip.getIp(), ip.getPort(), cluster.getName(), DistroMapper.LOCALHOST_SITE, msg);
+ ", region: " + DistroMapper.LOCALHOST_SITE + ", msg: " + msg);
} }
} }
} }
} catch (Throwable t) { } catch (Throwable t) {
Loggers.SRV_LOG.error("CHECK-FAIL-NOW", "error when close check task.", t); Loggers.SRV_LOG.error("[CHECK-FAIL-NOW] error when close check task.", t);
} }
ip.getOKCount().set(0); ip.getOKCount().set(0);

View File

@ -56,10 +56,9 @@ public class ClientBeatCheckTask implements Runnable {
if (!ipAddress.isMarked()) { if (!ipAddress.isMarked()) {
if (ipAddress.isValid()) { if (ipAddress.isValid()) {
ipAddress.setValid(false); ipAddress.setValid(false);
Loggers.EVT_LOG.info("{" + ipAddress.getClusterName() + "} {POS} {IP-DISABLED} valid: " Loggers.EVT_LOG.info("{POS} {IP-DISABLED} valid: {}:{}@{}, region: {}, msg: client timeout after {}, last beat: {}",
+ ipAddress.getIp() + ":" + ipAddress.getPort() + "@" + ipAddress.getClusterName() ipAddress.getIp(), ipAddress.getPort(), ipAddress.getClusterName(),
+ ", region: " + DistroMapper.LOCALHOST_SITE + ", msg: " + "client timeout after " DistroMapper.LOCALHOST_SITE, ClientBeatProcessor.CLIENT_BEAT_TIMEOUT, ipAddress.getLastBeat());
+ ClientBeatProcessor.CLIENT_BEAT_TIMEOUT + ", last beat: " + ipAddress.getLastBeat());
PushService.domChanged(domain.getNamespaceId(), domain.getName()); PushService.domChanged(domain.getNamespaceId(), domain.getName());
} }
} }
@ -67,16 +66,12 @@ public class ClientBeatCheckTask implements Runnable {
if (System.currentTimeMillis() - ipAddress.getLastBeat() > domain.getIpDeleteTimeout()) { if (System.currentTimeMillis() - ipAddress.getLastBeat() > domain.getIpDeleteTimeout()) {
// delete ip // delete ip
// if (domain.allIPs().size() > 1) { Loggers.SRV_LOG.info("[AUTO-DELETE-IP] dom: {}, ip: {}", domain.getName(), JSON.toJSONString(ipAddress));
Loggers.SRV_LOG.info("[AUTO-DELETE-IP] dom: " + domain.getName() + ", ip: " + JSON.toJSONString(ipAddress));
deleteIP(ipAddress); deleteIP(ipAddress);
// }
} }
} }
} catch (Exception e) { } catch (Exception e) {
Loggers.SRV_LOG.warn("Exception while processing client beat time out.", e); Loggers.SRV_LOG.warn("Exception while processing client beat time out.", e);
} finally {
// HealthCheckReactor.scheduleCheck(this);
} }
} }
@ -84,17 +79,17 @@ public class ClientBeatCheckTask implements Runnable {
private void deleteIP(IpAddress ipAddress) { private void deleteIP(IpAddress ipAddress) {
try { try {
String ipList = ipAddress.getIp() + ":" + ipAddress.getPort() + "_" String ipList = ipAddress.getIp() + ":" + ipAddress.getPort() + "_"
+ ipAddress.getWeight() + "_" + ipAddress.getClusterName(); + ipAddress.getWeight() + "_" + ipAddress.getClusterName();
String url = "http://127.0.0.1:" + RunningConfig.getServerPort() + RunningConfig.getContextPath() String url = "http://127.0.0.1:" + RunningConfig.getServerPort() + RunningConfig.getContextPath()
+ UtilsAndCommons.NACOS_NAMING_CONTEXT + "/api/remvIP4Dom?dom=" + UtilsAndCommons.NACOS_NAMING_CONTEXT + "/api/remvIP4Dom?dom="
+ domain.getName() + "&ipList=" + ipList + "&token=" + domain.getToken(); + domain.getName() + "&ipList=" + ipList + "&token=" + domain.getToken();
HttpClient.HttpResult result = HttpClient.httpGet(url, null, null); HttpClient.HttpResult result = HttpClient.httpGet(url, null, null);
if (result.code != HttpURLConnection.HTTP_OK) { if (result.code != HttpURLConnection.HTTP_OK) {
Loggers.SRV_LOG.error("IP-DEAD", "failed to delete ip automatically, ip: " Loggers.SRV_LOG.error("[IP-DEAD] failed to delete ip automatically, ip: {}, caused {}, resp code: {}",
+ ipAddress.toJSON() + ", caused " + result.content + ",resp code: " + result.code); ipAddress.toJSON(), result.content, result.code);
} }
} catch (Exception e) { } catch (Exception e) {
Loggers.SRV_LOG.error("IP-DEAD", "failed to delete ip automatically, ip: " + ipAddress.toJSON(), e); Loggers.SRV_LOG.error("[IP-DEAD] failed to delete ip automatically, ip: {}, error: {}", ipAddress.toJSON(), e);
} }
} }

View File

@ -61,7 +61,7 @@ public class ClientBeatProcessor implements Runnable {
return; return;
} }
Loggers.EVT_LOG.debug("[CLIENT-BEAT] processing beat: " + rsInfo.toString()); Loggers.EVT_LOG.debug("[CLIENT-BEAT] processing beat: {}", rsInfo.toString());
String ip = rsInfo.getIp(); String ip = rsInfo.getIp();
String clusterName = rsInfo.getCluster(); String clusterName = rsInfo.getCluster();
@ -69,19 +69,15 @@ public class ClientBeatProcessor implements Runnable {
Cluster cluster = virtualClusterDomain.getClusterMap().get(clusterName); Cluster cluster = virtualClusterDomain.getClusterMap().get(clusterName);
List<IpAddress> ipAddresses = cluster.allIPs(); List<IpAddress> ipAddresses = cluster.allIPs();
boolean processed = false;
for (IpAddress ipAddress: ipAddresses) { for (IpAddress ipAddress: ipAddresses) {
if (ipAddress.getIp().equals(ip) && ipAddress.getPort() == port) { if (ipAddress.getIp().equals(ip) && ipAddress.getPort() == port) {
processed = true; Loggers.EVT_LOG.debug("[CLIENT-BEAT] refresh beat: {}", rsInfo.toString());
Loggers.EVT_LOG.debug("[CLIENT-BEAT] refresh beat: " + rsInfo.toString());
ipAddress.setLastBeat(System.currentTimeMillis()); ipAddress.setLastBeat(System.currentTimeMillis());
if (!ipAddress.isMarked()) { if (!ipAddress.isMarked()) {
if (!ipAddress.isValid()) { if (!ipAddress.isValid()) {
ipAddress.setValid(true); ipAddress.setValid(true);
Loggers.EVT_LOG.info("{" + cluster.getDom().getName() + "} {POS} {IP-ENABLED} valid: " Loggers.EVT_LOG.info("dom: {} {POS} {IP-ENABLED} valid: {}:{}@{}, region: {}, msg: client beat ok",
+ ip+ ":" + port+ "@" + cluster.getName() cluster.getDom().getName(), ip, port, cluster.getName(), DistroMapper.LOCALHOST_SITE);
+ ", region: " + DistroMapper.LOCALHOST_SITE + ", msg: " + "client beat ok");
PushService.domChanged(virtualClusterDomain.getNamespaceId(), domain.getName()); PushService.domChanged(virtualClusterDomain.getNamespaceId(), domain.getName());
} }
} }

View File

@ -62,7 +62,7 @@ public class HealthCheckReactor {
try { try {
scheduledFuture.cancel(true); scheduledFuture.cancel(true);
} catch (Exception e) { } catch (Exception e) {
Loggers.EVT_LOG.error("CANCEL-CHECK", "cancel failed!", e); Loggers.EVT_LOG.error("[CANCEL-CHECK] cancel failed!", e);
} }
} }

View File

@ -64,7 +64,7 @@ public class HealthCheckStatus {
+ clusterName + ":" + clusterName + ":"
+ datumKey; + datumKey;
} catch (Throwable e) { } catch (Throwable e) {
Loggers.SRV_LOG.error("BUILD-KEY", "Exception while set rt, ip " + ip.toJSON(), e); Loggers.SRV_LOG.error("[BUILD-KEY] Exception while set rt, ip {}, error: {}", ip.toJSON(), e);
} }
return ip.getDefaultKey(); return ip.getDefaultKey();

View File

@ -31,7 +31,7 @@ public class HealthCheckTask implements Runnable {
private long checkRTNormalized = -1; private long checkRTNormalized = -1;
private long checkRTBest = -1; private long checkRTBest = -1;
private long checkRTWorst= -1; private long checkRTWorst = -1;
private long checkRTLast = -1; private long checkRTLast = -1;
private long checkRTLastLast = -1; private long checkRTLastLast = -1;
@ -55,38 +55,36 @@ public class HealthCheckTask implements Runnable {
@Override @Override
public void run() { public void run() {
AbstractHealthCheckProcessor processor = AbstractHealthCheckProcessor.getProcessor(cluster.getHealthChecker()); AbstractHealthCheckProcessor processor = AbstractHealthCheckProcessor.getProcessor(cluster.getHealthChecker());
try { try {
if (DistroMapper.responsible(cluster.getDom().getName())) { if (DistroMapper.responsible(cluster.getDom().getName())) {
processor.process(this); processor.process(this);
Loggers.EVT_LOG.debug("[HEALTH-CHECK] schedule health check task: " + cluster.getDom().getName()); Loggers.EVT_LOG.debug("[HEALTH-CHECK] schedule health check task: {}", cluster.getDom().getName());
} }
} catch (Throwable e) { } catch (Throwable e) {
Loggers.SRV_LOG.error("VIPSRV-HEALTH-CHECK", "error while process health check for " + cluster.getDom().getName() + ":" + cluster.getName(), e); Loggers.SRV_LOG.error("[HEALTH-CHECK] error while process health check for {}:{}, error: {}",
cluster.getDom().getName(), cluster.getName(), e);
} finally { } finally {
if (!cancelled) { if (!cancelled) {
HealthCheckReactor.scheduleCheck(this); HealthCheckReactor.scheduleCheck(this);
// worst == 0 means never checked // worst == 0 means never checked
if (this.getCheckRTWorst() > 0 if (this.getCheckRTWorst() > 0
&& Switch.isHealthCheckEnabled(cluster.getDom().getName()) && Switch.isHealthCheckEnabled(cluster.getDom().getName())
&& DistroMapper.responsible(cluster.getDom().getName())) { && DistroMapper.responsible(cluster.getDom().getName())) {
// TLog doesn't support float so we must convert it into long // TLog doesn't support float so we must convert it into long
long diff = ((this.getCheckRTLast() - this.getCheckRTLastLast()) * 10000) long diff = ((this.getCheckRTLast() - this.getCheckRTLastLast()) * 10000)
/ this.getCheckRTLastLast(); / this.getCheckRTLastLast();
this.setCheckRTLastLast(this.getCheckRTLast()); this.setCheckRTLastLast(this.getCheckRTLast());
Cluster cluster = this.getCluster(); Cluster cluster = this.getCluster();
if (((VirtualClusterDomain)cluster.getDom()).getEnableHealthCheck()) { if (((VirtualClusterDomain) cluster.getDom()).getEnableHealthCheck()) {
Loggers.CHECK_RT.info(cluster.getDom().getName() + ":" + cluster.getName() Loggers.CHECK_RT.info("{}:{}@{}->normalized: {}, worst: {}, best: {}, last: {}, diff: {}",
+ "@" + processor.getType() cluster.getDom().getName(), cluster.getName(), processor.getType(),
+ "->normalized: " + this.getCheckRTNormalized() this.getCheckRTNormalized(), this.getCheckRTWorst(), this.getCheckRTBest(),
+ ", worst: " + this.getCheckRTWorst() this.getCheckRTLast(), diff);
+ ", best: " + this.getCheckRTBest()
+ ", last: " + this.getCheckRTLast()
+ ", diff: " + diff);
} }
} }
} }

View File

@ -64,7 +64,7 @@ public class HttpHealthCheckProcessor extends AbstractHealthCheckProcessor {
builder.setUserAgent("VIPServer"); builder.setUserAgent("VIPServer");
asyncHttpClient = new AsyncHttpClient(builder.build()); asyncHttpClient = new AsyncHttpClient(builder.build());
} catch (Throwable e) { } catch (Throwable e) {
SRV_LOG.error("VIPSRV-HEALTH-CHECK", "Error while constructing HTTP asynchronous client, " + e.toString(), e); SRV_LOG.error("[HEALTH-CHECK] Error while constructing HTTP asynchronous client", e);
} }
} }
@ -93,16 +93,14 @@ public class HttpHealthCheckProcessor extends AbstractHealthCheckProcessor {
if (ip.isMarked()) { if (ip.isMarked()) {
if (SRV_LOG.isDebugEnabled()) { if (SRV_LOG.isDebugEnabled()) {
SRV_LOG.debug("http check, ip is marked as to skip health check, ip:" + ip.getIp()); SRV_LOG.debug("http check, ip is marked as to skip health check, ip: {}" + ip.getIp());
} }
continue; continue;
} }
if (!ip.markChecking()) { if (!ip.markChecking()) {
SRV_LOG.warn("http check started before last one finished, dom: " SRV_LOG.warn("http check started before last one finished, dom: {}:{}:{}",
+ task.getCluster().getDom().getName() + ":" task.getCluster().getDom().getName(), task.getCluster().getName(), ip.getIp());
+ task.getCluster().getName() + ":"
+ ip.getIp());
reEvaluateCheckRT(task.getCheckRTNormalized() * 2, task, Switch.getHttpHealthParams()); reEvaluateCheckRT(task.getCheckRTNormalized() * 2, task, Switch.getHttpHealthParams());
continue; continue;

View File

@ -95,16 +95,14 @@ public class MysqlHealthCheckProcessor extends AbstractHealthCheckProcessor {
if (ip.isMarked()) { if (ip.isMarked()) {
if (SRV_LOG.isDebugEnabled()) { if (SRV_LOG.isDebugEnabled()) {
SRV_LOG.debug("mysql check, ip is marked as to skip health check, ip:" + ip.getIp()); SRV_LOG.debug("mysql check, ip is marked as to skip health check, ip: {}", ip.getIp());
} }
continue; continue;
} }
if (!ip.markChecking()) { if (!ip.markChecking()) {
SRV_LOG.warn("mysql check started before last one finished, dom: " SRV_LOG.warn("mysql check started before last one finished, dom: {}:{}:{}",
+ task.getCluster().getDom().getName() + ":" task.getCluster().getDom().getName(), task.getCluster().getName(), ip.getIp());
+ task.getCluster().getName() + ":"
+ ip.getIp());
reEvaluateCheckRT(task.getCheckRTNormalized() * 2, task, Switch.getMysqlHealthParams()); reEvaluateCheckRT(task.getCheckRTNormalized() * 2, task, Switch.getMysqlHealthParams());
continue; continue;
@ -202,14 +200,14 @@ public class MysqlHealthCheckProcessor extends AbstractHealthCheckProcessor {
try { try {
statement.close(); statement.close();
} catch (SQLException e) { } catch (SQLException e) {
Loggers.SRV_LOG.error("MYSQL-CHECK", "failed to close statement:" + statement, e); Loggers.SRV_LOG.error("[MYSQL-CHECK] failed to close statement:" + statement, e);
} }
} }
if (resultSet != null) { if (resultSet != null) {
try { try {
resultSet.close(); resultSet.close();
} catch (SQLException e) { } catch (SQLException e) {
Loggers.SRV_LOG.error("MYSQL-CHECK", "failed to close resultSet:" + resultSet, e); Loggers.SRV_LOG.error("[MYSQL-CHECK] failed to close resultSet:" + resultSet, e);
} }
} }
} }

View File

@ -167,7 +167,7 @@ public class TcpSuperSenseProcessor extends AbstractHealthCheckProcessor impleme
NIO_EXECUTOR.execute(new PostProcessor(key)); NIO_EXECUTOR.execute(new PostProcessor(key));
} }
} catch (Throwable e) { } catch (Throwable e) {
SRV_LOG.error("VIPSRV-HEALTH-CHECK", "error while processing NIO task", e); SRV_LOG.error("[HEALTH-CHECK] error while processing NIO task", e);
} }
} }
} }

View File

@ -53,7 +53,7 @@ public class DomainStatusSynchronizer implements Synchronizer {
@Override @Override
public Integer onCompleted(Response response) throws Exception { public Integer onCompleted(Response response) throws Exception {
if (response.getStatusCode() != HttpURLConnection.HTTP_OK) { if (response.getStatusCode() != HttpURLConnection.HTTP_OK) {
Loggers.SRV_LOG.warn("STATUS-SYNCHRONIZE", "failed to request domStatus, remote server: " + serverIP); Loggers.SRV_LOG.warn("[STATUS-SYNCHRONIZE] failed to request domStatus, remote server: {}", serverIP);
return 1; return 1;
} }
@ -61,7 +61,7 @@ public class DomainStatusSynchronizer implements Synchronizer {
} }
}); });
} catch (Exception e) { } catch (Exception e) {
Loggers.SRV_LOG.warn("STATUS-SYNCHRONIZE", "failed to request domStatus, remote server: " + serverIP, e); Loggers.SRV_LOG.warn("[STATUS-SYNCHRONIZE] failed to request domStatus, remote server: " + serverIP, e);
} }
} }
@ -78,11 +78,10 @@ public class DomainStatusSynchronizer implements Synchronizer {
String result; String result;
try { try {
Loggers.SRV_LOG.info("[STATUS-SYNCHRONIZE] sync dom status from: " Loggers.SRV_LOG.info("[STATUS-SYNCHRONIZE] sync dom status from: {}, dom: {}", serverIP, key);
+ serverIP + ", dom: " + key);
result = NamingProxy.reqAPI("ip4Dom2", params, serverIP, false); result = NamingProxy.reqAPI("ip4Dom2", params, serverIP, false);
} catch (Exception e) { } catch (Exception e) {
Loggers.SRV_LOG.warn("STATUS-SYNCHRONIZE", "Failed to get domain status from " + serverIP, e); Loggers.SRV_LOG.warn("[STATUS-SYNCHRONIZE] Failed to get domain status from " + serverIP, e);
return null; return null;
} }

View File

@ -112,7 +112,7 @@ public class HttpClient {
return getResult(conn); return getResult(conn);
} catch (Exception e) { } catch (Exception e) {
Loggers.SRV_LOG.warn("VIPSRV", "Exception while request: " + url + ", caused: " + e.getMessage()); Loggers.SRV_LOG.warn("[VIPSRV] Exception while request: {}, caused: {}", url, e);
return new HttpResult(500, e.toString(), Collections.<String, String>emptyMap()); return new HttpResult(500, e.toString(), Collections.<String, String>emptyMap());
} finally { } finally {
if (conn != null) { if (conn != null) {

View File

@ -80,7 +80,8 @@ public class NamingProxy {
if (System.currentTimeMillis() - lastSrvSiteRefreshTime > VIP_SRV_SITE_REF_INTER_MILLIS || if (System.currentTimeMillis() - lastSrvSiteRefreshTime > VIP_SRV_SITE_REF_INTER_MILLIS ||
!CollectionUtils.isEqualCollection(servers, lastServers)) { !CollectionUtils.isEqualCollection(servers, lastServers)) {
if (!CollectionUtils.isEqualCollection(servers, lastServers)) { if (!CollectionUtils.isEqualCollection(servers, lastServers)) {
Loggers.SRV_LOG.info("[REFRESH-SERVER-SITE] server list is changed, old: " + lastServers + ", new: " + servers); Loggers.SRV_LOG.info("[REFRESH-SERVER-SITE] server list is changed, old: {}, new: {}",
lastServers, servers);
} }
lastServers = servers; lastServers = servers;
@ -144,15 +145,21 @@ public class NamingProxy {
Loggers.SRV_LOG.warn("failed to get config: " + CLUSTER_CONF_FILE_PATH, e); Loggers.SRV_LOG.warn("failed to get config: " + CLUSTER_CONF_FILE_PATH, e);
} }
Loggers.DEBUG_LOG.debug("REFRESH-SERVER-LIST1", result); if (Loggers.DEBUG_LOG.isDebugEnabled()) {
Loggers.DEBUG_LOG.debug("REFRESH-SERVER-LIST1 {}", result);
}
//use system env //use system env
if (CollectionUtils.isEmpty(result)) { if (CollectionUtils.isEmpty(result)) {
result = SystemUtils.getIPsBySystemEnv(UtilsAndCommons.SELF_SERVICE_CLUSTER_ENV); result = SystemUtils.getIPsBySystemEnv(UtilsAndCommons.SELF_SERVICE_CLUSTER_ENV);
Loggers.DEBUG_LOG.debug("REFRESH-SERVER-LIST4: " + result); if (Loggers.DEBUG_LOG.isDebugEnabled()) {
Loggers.DEBUG_LOG.debug("REFRESH-SERVER-LIST4: {}", result);
}
} }
Loggers.DEBUG_LOG.debug("REFRESH-SERVER-LIST2" + result); if (Loggers.DEBUG_LOG.isDebugEnabled()) {
Loggers.DEBUG_LOG.debug("REFRESH-SERVER-LIST2 {}", result);
}
if (!result.isEmpty() && !result.get(0).contains(UtilsAndCommons.CLUSTER_CONF_IP_SPLITER)) { if (!result.isEmpty() && !result.get(0).contains(UtilsAndCommons.CLUSTER_CONF_IP_SPLITER)) {
for (int i = 0; i < result.size(); i++) { for (int i = 0; i < result.size(); i++) {
@ -175,7 +182,9 @@ public class NamingProxy {
servers.put("sameSite", snapshot); servers.put("sameSite", snapshot);
servers.put("otherSite", new ArrayList<String>()); servers.put("otherSite", new ArrayList<String>());
Loggers.SRV_LOG.debug("sameSiteServers:" + servers.toString()); if (Loggers.SRV_LOG.isDebugEnabled()) {
Loggers.SRV_LOG.debug("sameSiteServers: {}", servers.toString());
}
return servers; return servers;
} }

View File

@ -50,7 +50,8 @@ public class ServerStatusSynchronizer implements Synchronizer {
@Override @Override
public Integer onCompleted(Response response) throws Exception { public Integer onCompleted(Response response) throws Exception {
if (response.getStatusCode() != HttpURLConnection.HTTP_OK) { if (response.getStatusCode() != HttpURLConnection.HTTP_OK) {
Loggers.SRV_LOG.warn("STATUS-SYNCHRONIZE", "failed to request serverStatus, remote server: " + serverIP); Loggers.SRV_LOG.warn("[STATUS-SYNCHRONIZE] failed to request serverStatus, remote server: {}",
serverIP);
return 1; return 1;
} }
@ -58,7 +59,7 @@ public class ServerStatusSynchronizer implements Synchronizer {
} }
}); });
} catch (Exception e) { } catch (Exception e) {
Loggers.SRV_LOG.warn("STATUS-SYNCHRONIZE", "failed to request serverStatus, remote server: " + serverIP, e); Loggers.SRV_LOG.warn("[STATUS-SYNCHRONIZE] failed to request serverStatus, remote server: " + serverIP, e);
} }
} }

View File

@ -58,7 +58,7 @@ public class Switch {
@Override @Override
public void onChange(String key, String value) throws Exception { public void onChange(String key, String value) throws Exception {
Loggers.RAFT.info("[VIPSRV-RAFT] datum is changed, key: " + key + ", value: " + value); Loggers.RAFT.info("[NACOS-RAFT] datum is changed, key: {}, value: {}", key, value);
if (StringUtils.isEmpty(value)) { if (StringUtils.isEmpty(value)) {
return; return;
} }
@ -135,7 +135,7 @@ public class Switch {
try { try {
RaftCore.doSignalPublish(UtilsAndCommons.getDomStoreKey(dom), JSON.toJSONString(dom)); RaftCore.doSignalPublish(UtilsAndCommons.getDomStoreKey(dom), JSON.toJSONString(dom));
} catch (Exception e) { } catch (Exception e) {
Loggers.SRV_LOG.error("VIPSRV-SWITCH", "failed to save switch", e); Loggers.SRV_LOG.error("[SWITCH] failed to save switch", e);
} }
} }

View File

@ -54,7 +54,7 @@ public class PerformanceLoggerThread {
} }
private void freshHealthCheckSwitch() { private void freshHealthCheckSwitch() {
Loggers.SRV_LOG.info("[HEALTH-CHECK] health check is " + Switch.isHealthCheckEnabled()); Loggers.SRV_LOG.info("[HEALTH-CHECK] health check is {}", Switch.isHealthCheckEnabled());
} }
class HealthCheckSwitchTask implements Runnable { class HealthCheckSwitchTask implements Runnable {
@ -85,9 +85,9 @@ public class PerformanceLoggerThread {
int ipCount = domainsManager.getInstanceCount(); int ipCount = domainsManager.getInstanceCount();
long maxPushMaxCost = getMaxPushCost(); long maxPushMaxCost = getMaxPushCost();
long avgPushCost = getAvgPushCost(); long avgPushCost = getAvgPushCost();
Loggers.PERFORMANCE_LOG.info("PERFORMANCE:" + "|" + domCount + "|" + ipCount + "|" + maxPushMaxCost + "|" + avgPushCost); Loggers.PERFORMANCE_LOG.info("[PERFORMANCE] " + "|" + domCount + "|" + ipCount + "|" + maxPushMaxCost + "|" + avgPushCost);
} catch (Exception e) { } catch (Exception e) {
Loggers.SRV_LOG.warn("PERFORMANCE", "Exception while print performance log.", e); Loggers.SRV_LOG.warn("[PERFORMANCE] Exception while print performance log.", e);
} }
} }

View File

@ -105,13 +105,13 @@ public class PushService {
try { try {
removeClientIfZombie(); removeClientIfZombie();
} catch (Throwable e) { } catch (Throwable e) {
Loggers.PUSH.warn("VIPSRV-PUSH", "failed to remove client zombied"); Loggers.PUSH.warn("[NACOS-PUSH] failed to remove client zombie");
} }
} }
}, 0, 20, TimeUnit.SECONDS); }, 0, 20, TimeUnit.SECONDS);
} catch (SocketException e) { } catch (SocketException e) {
Loggers.SRV_LOG.error("VIPSRV-PUSH", "failed to init push service"); Loggers.SRV_LOG.error("[NACOS-PUSH] failed to init push service");
} }
} }
@ -155,9 +155,9 @@ public class PushService {
} else { } else {
PushClient res = clients.putIfAbsent(client.toString(), client); PushClient res = clients.putIfAbsent(client.toString(), client);
if (res != null) { if (res != null) {
Loggers.PUSH.warn("client:" + res.getAddrStr() + " already associated with key " + res.toString()); Loggers.PUSH.warn("client: {} already associated with key {}", res.getAddrStr(), res.toString());
} }
Loggers.PUSH.debug("client: " + client.getAddrStr() + " added for dom: " + client.getDom()); Loggers.PUSH.debug("client: {} added for dom: {}", client.getAddrStr(), client.getDom());
} }
} }
@ -176,7 +176,7 @@ public class PushService {
size += clientConcurrentMap.size(); size += clientConcurrentMap.size();
} }
Loggers.PUSH.info("VIPSRV-PUSH", "clientMap size: " + size); Loggers.PUSH.info("[NACOS-PUSH] clientMap size: {}", size);
} }
@ -197,8 +197,8 @@ public class PushService {
return ackEntry; return ackEntry;
} catch (Exception e) { } catch (Exception e) {
Loggers.PUSH.error("VIPSRV-PUSH", "failed to prepare data: [" + data + "] to client: [" + Loggers.PUSH.error("[NACOS-PUSH] failed to prepare data: {} to client: {}, error: {}",
client.getSocketAddr() + "]", e); data, client.getSocketAddr(), e);
} }
return null; return null;
@ -233,7 +233,7 @@ public class PushService {
} }
Receiver.AckEntry ackEntry; Receiver.AckEntry ackEntry;
Loggers.PUSH.debug("push dom: " + dom + " to cleint: " + client.toString()); Loggers.PUSH.debug("push dom: {} to client: {}", dom, client.toString());
String key = getPushCacheKey(dom, client.getIp(), client.getAgent()); String key = getPushCacheKey(dom, client.getIp(), client.getAgent());
byte[] compressData = null; byte[] compressData = null;
Map<String, Object> data = null; Map<String, Object> data = null;
@ -242,7 +242,7 @@ public class PushService {
compressData = (byte[]) (pair.getValue0()); compressData = (byte[]) (pair.getValue0());
data = (Map<String, Object>) pair.getValue1(); data = (Map<String, Object>) pair.getValue1();
Loggers.PUSH.debug("PUSH-CACHE", "cache hit: " + dom + ":" + client.getAddrStr()); Loggers.PUSH.debug("[PUSH-CACHE] cache hit: {}:{}", dom, client.getAddrStr());
} }
if (compressData != null) { if (compressData != null) {
@ -254,14 +254,13 @@ public class PushService {
} }
} }
Loggers.PUSH.info("dom: " + client.getDom() + " changed, schedule push for: " Loggers.PUSH.info("dom: {} changed, schedule push for: {}, agent: {}, key: {}",
+ client.getAddrStr() + ", agent: " + client.getAgent() + ", key: " client.getDom(), client.getAddrStr(), client.getAgent(), (ackEntry == null ? null : ackEntry.key));
+ (ackEntry == null ? null : ackEntry.key));
udpPush(ackEntry); udpPush(ackEntry);
} }
} catch (Exception e) { } catch (Exception e) {
Loggers.PUSH.error("VIPSRV-PUSH", "failed to push dom: " + dom + " to cleint", e); Loggers.PUSH.error("[NACOS-PUSH] failed to push dom: {} to client, error: {}", dom, e);
} finally { } finally {
futureMap.remove(UtilsAndCommons.assembleFullServiceName(namespaceId, dom)); futureMap.remove(UtilsAndCommons.assembleFullServiceName(namespaceId, dom));
@ -467,7 +466,7 @@ public class PushService {
private static Receiver.AckEntry prepareAckEntry(PushClient client, Map<String, Object> data, long lastRefTime) { private static Receiver.AckEntry prepareAckEntry(PushClient client, Map<String, Object> data, long lastRefTime) {
if (MapUtils.isEmpty(data)) { if (MapUtils.isEmpty(data)) {
Loggers.PUSH.error("VIPSRV-PUSH", "pushing empty data for client is not allowed: " + client); Loggers.PUSH.error("[NACOS-PUSH] pushing empty data for client is not allowed: {}", client);
return null; return null;
} }
@ -493,20 +492,20 @@ public class PushService {
return ackEntry; return ackEntry;
} catch (Exception e) { } catch (Exception e) {
Loggers.PUSH.error("VIPSRV-PUSH", "failed to prepare data: [" + data + "] to client: [" + Loggers.PUSH.error("[NACOS-PUSH] failed to prepare data: {} to client: {}, error: {}",
client.getSocketAddr() + "]", e); data, client.getSocketAddr(), e);
return null; return null;
} }
} }
private static Receiver.AckEntry udpPush(Receiver.AckEntry ackEntry) { private static Receiver.AckEntry udpPush(Receiver.AckEntry ackEntry) {
if (ackEntry == null) { if (ackEntry == null) {
Loggers.PUSH.error("VIPSRV-PUSH", "ackEntry is null "); Loggers.PUSH.error("[NACOS-PUSH] ackEntry is null.");
return null; return null;
} }
if (ackEntry.getRetryTimes() > MAX_RETRY_TIMES) { if (ackEntry.getRetryTimes() > MAX_RETRY_TIMES) {
Loggers.PUSH.warn("max re-push times reached, retry times " + ackEntry.retryTimes + ", key: " + ackEntry.key); Loggers.PUSH.warn("max re-push times reached, retry times {}, key: {}", ackEntry.retryTimes, ackEntry.key);
ackMap.remove(ackEntry.key); ackMap.remove(ackEntry.key);
failedPush += 1; failedPush += 1;
return ackEntry; return ackEntry;
@ -529,8 +528,8 @@ public class PushService {
return ackEntry; return ackEntry;
} catch (Exception e) { } catch (Exception e) {
Loggers.PUSH.error("VIPSRV-PUSH", "failed to push data: [" + ackEntry.data + "] to client: [" + Loggers.PUSH.error("[NACOS-PUSH] failed to push data: {} to client: {}, error: {}",
ackEntry.origin.getAddress().getHostAddress() + "]", e); ackEntry.data, ackEntry.origin.getAddress().getHostAddress(), e);
ackMap.remove(ackEntry.key); ackMap.remove(ackEntry.key);
failedPush += 1; failedPush += 1;
@ -576,8 +575,7 @@ public class PushService {
int port = socketAddress.getPort(); int port = socketAddress.getPort();
if (System.nanoTime() - ackPacket.lastRefTime > ACK_TIMEOUT_NANOS) { if (System.nanoTime() - ackPacket.lastRefTime > ACK_TIMEOUT_NANOS) {
Loggers.PUSH.warn("ack takes too long from" + packet.getSocketAddress() Loggers.PUSH.warn("ack takes too long from {} ack json: {}",packet.getSocketAddress(), json);
+ " ack json: " + json);
} }
String ackKey = getACKKey(ip, port, ackPacket.lastRefTime); String ackKey = getACKKey(ip, port, ackPacket.lastRefTime);
@ -589,16 +587,15 @@ public class PushService {
long pushCost = System.currentTimeMillis() - udpSendTimeMap.get(ackKey); long pushCost = System.currentTimeMillis() - udpSendTimeMap.get(ackKey);
Loggers.PUSH.info("received ack: " + json + " from: " + ip Loggers.PUSH.info("received ack: {} from: {}:, cost: {} ms, unacked: {}, total push: {}",
+ ":" + port + ", cost: " + pushCost + "ms" + ", unacked: " + ackMap.size() + json, ip, port, pushCost, ackMap.size(), totalPush);
",total push: " + totalPush);
pushCostMap.put(ackKey, pushCost); pushCostMap.put(ackKey, pushCost);
udpSendTimeMap.remove(ackKey); udpSendTimeMap.remove(ackKey);
} catch (Throwable e) { } catch (Throwable e) {
Loggers.PUSH.error("VIPSRV-PUSH", "error while receiving ack data", e); Loggers.PUSH.error("[NACOS-PUSH] error while receiving ack data", e);
} }
} }
} }

View File

@ -140,7 +140,7 @@ public class PeerSet {
if (!Objects.equals(leader, peer)) { if (!Objects.equals(leader, peer)) {
leader = peer; leader = peer;
Loggers.RAFT.info(leader.ip + " has become the LEADER"); Loggers.RAFT.info("{} has become the LEADER", leader.ip);
} }
} }
@ -150,7 +150,8 @@ public class PeerSet {
public RaftPeer makeLeader(RaftPeer candidate) { public RaftPeer makeLeader(RaftPeer candidate) {
if (!Objects.equals(leader, candidate)) { if (!Objects.equals(leader, candidate)) {
leader = candidate; leader = candidate;
Loggers.RAFT.info(leader.ip + " has become the LEADER" + ",local :" + JSON.toJSONString(local()) + ", leader: " + JSON.toJSONString(leader)); Loggers.RAFT.info("{} has become the LEADER, local: {}, leader: {}",
leader.ip, JSON.toJSONString(local()), JSON.toJSONString(leader));
} }
for (final RaftPeer peer : peers.values()) { for (final RaftPeer peer : peers.values()) {
@ -162,7 +163,8 @@ public class PeerSet {
@Override @Override
public Integer onCompleted(Response response) throws Exception { public Integer onCompleted(Response response) throws Exception {
if (response.getStatusCode() != HttpURLConnection.HTTP_OK) { if (response.getStatusCode() != HttpURLConnection.HTTP_OK) {
Loggers.RAFT.error("VIPSRV-RAFT", "get peer failed: " + response.getResponseBody() + ", peer: " + peer.ip); Loggers.RAFT.error("[NACOS-RAFT] get peer failed: {}, peer: {}",
response.getResponseBody(), peer.ip);
peer.state = RaftPeer.State.FOLLOWER; peer.state = RaftPeer.State.FOLLOWER;
return 1; return 1;
} }
@ -174,7 +176,7 @@ public class PeerSet {
}); });
} catch (Exception e) { } catch (Exception e) {
peer.state = RaftPeer.State.FOLLOWER; peer.state = RaftPeer.State.FOLLOWER;
Loggers.RAFT.error("VIPSRV-RAFT", "error while getting peer from peer: " + peer.ip); Loggers.RAFT.error("[NACOS-RAFT] error while getting peer from peer: {}", peer.ip);
} }
} }
} }
@ -186,7 +188,7 @@ public class PeerSet {
RaftPeer peer = peers.get(NetUtils.localServer()); RaftPeer peer = peers.get(NetUtils.localServer());
if (peer == null) { if (peer == null) {
throw new IllegalStateException("unable to find local peer: " + NetUtils.localServer() + ", all peers: " throw new IllegalStateException("unable to find local peer: " + NetUtils.localServer() + ", all peers: "
+ Arrays.toString(peers.keySet().toArray())); + Arrays.toString(peers.keySet().toArray()));
} }
return peer; return peer;

View File

@ -515,15 +515,13 @@ public class RaftCore {
@Override @Override
public Integer onCompleted(Response response) throws Exception { public Integer onCompleted(Response response) throws Exception {
if (response.getStatusCode() != HttpURLConnection.HTTP_OK) { if (response.getStatusCode() != HttpURLConnection.HTTP_OK) {
Loggers.RAFT.error("NACOS-RAFT {}", "vote failed: " Loggers.RAFT.error("NACOS-RAFT vote failed: {}, url: {}", response.getResponseBody(), url);
, response.getResponseBody() + " url:" + url);
return 1; return 1;
} }
RaftPeer peer = JSON.parseObject(response.getResponseBody(), RaftPeer.class); RaftPeer peer = JSON.parseObject(response.getResponseBody(), RaftPeer.class);
Loggers.RAFT.info("received approve from peer: " + JSON.toJSONString(peer)); Loggers.RAFT.info("received approve from peer: {}", JSON.toJSONString(peer));
peers.decideLeader(peer); peers.decideLeader(peer);
@ -531,7 +529,7 @@ public class RaftCore {
} }
}); });
} catch (Exception e) { } catch (Exception e) {
Loggers.RAFT.warn("error while sending vote to server:" + server); Loggers.RAFT.warn("error while sending vote to server: {}", server);
} }
} }
} }
@ -564,7 +562,7 @@ public class RaftCore {
local.voteFor = remote.ip; local.voteFor = remote.ip;
local.term.set(remote.term.get()); local.term.set(remote.term.get());
Loggers.RAFT.info("vote " + remote.ip + " as leader, term:" + remote.term); Loggers.RAFT.info("vote {} as leader, term: {}", remote.ip, remote.term);
return local; return local;
} }
@ -584,7 +582,7 @@ public class RaftCore {
sendBeat(); sendBeat();
} catch (Exception e) { } catch (Exception e) {
Loggers.RAFT.warn("RAFT", "error while sending beat", e); Loggers.RAFT.warn("[RAFT] error while sending beat {}", e);
} }
} }
@ -595,7 +593,7 @@ public class RaftCore {
return; return;
} }
Loggers.RAFT.info("[RAFT] send beat with " + datums.size() + " keys."); Loggers.RAFT.info("[RAFT] send beat with {} keys.", datums.size());
local.resetLeaderDue(); local.resetLeaderDue();
@ -606,7 +604,7 @@ public class RaftCore {
JSONArray array = new JSONArray(); JSONArray array = new JSONArray();
if (Switch.isSendBeatOnly()) { if (Switch.isSendBeatOnly()) {
Loggers.RAFT.info("[SEND-BEAT-ONLY] " + String.valueOf(Switch.isSendBeatOnly())); Loggers.RAFT.info("[SEND-BEAT-ONLY] {}", String.valueOf(Switch.isSendBeatOnly()));
} }
if (!Switch.isSendBeatOnly()) { if (!Switch.isSendBeatOnly()) {
@ -650,7 +648,8 @@ public class RaftCore {
byte[] compressedBytes = out.toByteArray(); byte[] compressedBytes = out.toByteArray();
String compressedContent = new String(compressedBytes, "UTF-8"); String compressedContent = new String(compressedBytes, "UTF-8");
Loggers.RAFT.info("raw beat data size: " + content.length() + ", size of compressed data: " + compressedContent.length()); Loggers.RAFT.info("raw beat data size: {}, size of compressed data: {}",
content.length(), compressedContent.length());
for (final String server : peers.allServersWithoutMySelf()) { for (final String server : peers.allServersWithoutMySelf()) {
try { try {
@ -660,22 +659,23 @@ public class RaftCore {
@Override @Override
public Integer onCompleted(Response response) throws Exception { public Integer onCompleted(Response response) throws Exception {
if (response.getStatusCode() != HttpURLConnection.HTTP_OK) { if (response.getStatusCode() != HttpURLConnection.HTTP_OK) {
Loggers.RAFT.error("NACOS-RAFT {}", "beat failed: " + response.getResponseBody() + ", peer: " + server); Loggers.RAFT.error("NACOS-RAFT beat failed: {}, peer: {}",
response.getResponseBody(), server);
return 1; return 1;
} }
peers.update(JSON.parseObject(response.getResponseBody(), RaftPeer.class)); peers.update(JSON.parseObject(response.getResponseBody(), RaftPeer.class));
Loggers.RAFT.info("receive beat response from: " + url); Loggers.RAFT.info("receive beat response from: {}", url);
return 0; return 0;
} }
@Override @Override
public void onThrowable(Throwable t) { public void onThrowable(Throwable t) {
Loggers.RAFT.error("NACOS-RAFT {} {}", "error while sending heart-beat to peer: " + server, t); Loggers.RAFT.error("NACOS-RAFT error while sending heart-beat to peer: {} {}", server, t);
} }
}); });
} catch (Exception e) { } catch (Exception e) {
Loggers.RAFT.error("VIPSRV {} {}", "error while sending heart-beat to peer: " + server, e); Loggers.RAFT.error("VIPSRV error while sending heart-beat to peer: {} {}", server, e);
} }
} }
@ -692,20 +692,21 @@ public class RaftCore {
remote.voteFor = beat.getJSONObject("peer").getString("voteFor"); remote.voteFor = beat.getJSONObject("peer").getString("voteFor");
if (remote.state != RaftPeer.State.LEADER) { if (remote.state != RaftPeer.State.LEADER) {
Loggers.RAFT.info("[RAFT] invalid state from master, state=" + remote.state + ", remote peer: " + JSON.toJSONString(remote)); Loggers.RAFT.info("[RAFT] invalid state from master, state: {}, remote peer: {}",
throw new IllegalArgumentException("invalid state from master, state=" + remote.state); remote.state, JSON.toJSONString(remote));
throw new IllegalArgumentException("invalid state from master, state: " + remote.state);
} }
if (local.term.get() > remote.term.get()) { if (local.term.get() > remote.term.get()) {
Loggers.RAFT.info("[RAFT] out of date beat, beat-from-term: " + remote.term.get() Loggers.RAFT.info("[RAFT] out of date beat, beat-from-term: {}, beat-to-term: {}, remote peer: {}, and leaderDueMs: {}"
+ ", beat-to-term: " + local.term.get() + ", remote peer: " + JSON.toJSONString(remote) + ", and leaderDueMs: " + local.leaderDueMs); , remote.term.get(), local.term.get(), JSON.toJSONString(remote), local.leaderDueMs);
throw new IllegalArgumentException("out of date beat, beat-from-term: " + remote.term.get() throw new IllegalArgumentException("out of date beat, beat-from-term: " + remote.term.get()
+ ", beat-to-term: " + local.term.get()); + ", beat-to-term: " + local.term.get());
} }
if (local.state != RaftPeer.State.FOLLOWER) { if (local.state != RaftPeer.State.FOLLOWER) {
Loggers.RAFT.info("[RAFT] make remote as leader " + ", remote peer: " + JSON.toJSONString(remote)); Loggers.RAFT.info("[RAFT] make remote as leader, remote peer: {}", JSON.toJSONString(remote));
// mk follower // mk follower
local.state = RaftPeer.State.FOLLOWER; local.state = RaftPeer.State.FOLLOWER;
local.voteFor = remote.ip; local.voteFor = remote.ip;
@ -727,8 +728,8 @@ public class RaftCore {
List<String> batch = new ArrayList<String>(); List<String> batch = new ArrayList<String>();
if (!Switch.isSendBeatOnly()) { if (!Switch.isSendBeatOnly()) {
int processedCount = 0; int processedCount = 0;
Loggers.RAFT.info("[RAFT] received beat with " + beatDatums.size() + " keys, RaftCore.datums' size is " Loggers.RAFT.info("[RAFT] received beat with {} keys, RaftCore.datums' size is {}, remote server: {}, term: {}, local term: {}",
+ RaftCore.datums.size() + ", remote server: " + remote.ip + ", term: " + remote.term + ", local term: " + local.term); beatDatums.size(), RaftCore.datums.size(), remote.ip, remote.term, local.term);
for (Object object : beatDatums) { for (Object object : beatDatums) {
processedCount = processedCount + 1; processedCount = processedCount + 1;
@ -773,8 +774,8 @@ public class RaftCore {
continue; continue;
} }
Loggers.RAFT.info("get datums from leader: " + getLeader().ip + " , batch size is " + batch.size() + ", processedCount is " + processedCount Loggers.RAFT.info("get datums from leader: {}, batch size is {}, processedCount is {}, datums' size is {}, RaftCore.datums' size is {}"
+ ", datums' size is " + beatDatums.size() + ", RaftCore.datums' size is " + RaftCore.datums.size()); , getLeader().ip, batch.size(), processedCount, beatDatums.size(), RaftCore.datums.size());
// update datum entry // update datum entry
String url = buildURL(remote.ip, API_GET) + "?keys=" + URLEncoder.encode(keys, "UTF-8"); String url = buildURL(remote.ip, API_GET) + "?keys=" + URLEncoder.encode(keys, "UTF-8");
@ -795,8 +796,8 @@ public class RaftCore {
Datum oldDatum = RaftCore.getDatum(datum.key); Datum oldDatum = RaftCore.getDatum(datum.key);
if (oldDatum != null && datum.timestamp.get() <= oldDatum.timestamp.get()) { if (oldDatum != null && datum.timestamp.get() <= oldDatum.timestamp.get()) {
Loggers.RAFT.info("[NACOS-RAFT] timestamp is smaller than that of mine, key: " + datum.key Loggers.RAFT.info("[NACOS-RAFT] timestamp is smaller than that of mine, key: {}, remote: {}, local: {}",
+ ",remote: " + datum.timestamp + ", local: " + oldDatum.timestamp); datum.key, datum.timestamp, oldDatum.timestamp);
continue; continue;
} }
@ -819,12 +820,12 @@ public class RaftCore {
RaftStore.updateTerm(local.term.get()); RaftStore.updateTerm(local.term.get());
} }
Loggers.RAFT.info("data updated" + ", key=" + datum.key Loggers.RAFT.info("data updated, key: {}, timestamp: {}, from {}, local term: {}",
+ ", timestamp=" + datum.timestamp + ",from " + JSON.toJSONString(remote) + ", local term: " + local.term); datum.key, datum.timestamp, JSON.toJSONString(remote), local.term);
notifier.addTask(datum, Notifier.ApplyAction.CHANGE); notifier.addTask(datum, Notifier.ApplyAction.CHANGE);
} catch (Throwable e) { } catch (Throwable e) {
Loggers.RAFT.error("RAFT-BEAT", "failed to sync datum from leader, key: " + datum.key, e); Loggers.RAFT.error("[RAFT-BEAT] failed to sync datum from leader, key: {} {}", datum.key, e);
} finally { } finally {
OPERATE_LOCK.unlock(); OPERATE_LOCK.unlock();
} }
@ -837,7 +838,7 @@ public class RaftCore {
batch.clear(); batch.clear();
} catch (Exception e) { } catch (Exception e) {
Loggers.RAFT.error("NACOS-RAFT {}", "failed to handle beat entry, key=" + datumKey); Loggers.RAFT.error("[NACOS-RAFT] failed to handle beat entry, key: {}", datumKey);
} }
} }
@ -853,7 +854,7 @@ public class RaftCore {
try { try {
deleteDatum(deadKey); deleteDatum(deadKey);
} catch (Exception e) { } catch (Exception e) {
Loggers.RAFT.error("NACOS-RAFT {}", "failed to remove entry, key=" + deadKey, e); Loggers.RAFT.error("[NACOS-RAFT] failed to remove entry, key={} {}", deadKey, e);
} }
} }
@ -884,13 +885,13 @@ public class RaftCore {
List<String> newServers = (List<String>) CollectionUtils.subtract(servers, oldServers); List<String> newServers = (List<String>) CollectionUtils.subtract(servers, oldServers);
if (!CollectionUtils.isEmpty(newServers)) { if (!CollectionUtils.isEmpty(newServers)) {
peers.add(newServers); peers.add(newServers);
Loggers.RAFT.info("server list is updated, new (" + newServers.size() + ") servers: " + newServers); Loggers.RAFT.info("server list is updated, new: {} servers: {}", newServers.size(), newServers);
} }
List<String> deadServers = (List<String>) CollectionUtils.subtract(oldServers, servers); List<String> deadServers = (List<String>) CollectionUtils.subtract(oldServers, servers);
if (!CollectionUtils.isEmpty(deadServers)) { if (!CollectionUtils.isEmpty(deadServers)) {
peers.remove(deadServers); peers.remove(deadServers);
Loggers.RAFT.info("server list is updated, dead (" + deadServers.size() + ") servers: " + deadServers); Loggers.RAFT.info("server list is updated, dead: {}, servers: {}", deadServers.size(), deadServers);
} }
} catch (Exception e) { } catch (Exception e) {
Loggers.RAFT.info("error while updating server list.", e); Loggers.RAFT.info("error while updating server list.", e);
@ -907,18 +908,18 @@ public class RaftCore {
for (RaftListener listener1 : listeners) { for (RaftListener listener1 : listeners) {
if (listener1 instanceof VirtualClusterDomain) { if (listener1 instanceof VirtualClusterDomain) {
Loggers.RAFT.debug("listener in listeners: " + ((VirtualClusterDomain) listener1).getName()); Loggers.RAFT.debug("listener in listeners: {}", ((VirtualClusterDomain) listener1).getName());
} }
} }
if (listeners.contains(listener)) { if (listeners.contains(listener)) {
if (listener instanceof VirtualClusterDomain) { if (listener instanceof VirtualClusterDomain) {
Loggers.RAFT.info("add listener: " + ((VirtualClusterDomain) listener).getName()); Loggers.RAFT.info("add listener: {}", ((VirtualClusterDomain) listener).getName());
} else { } else {
Loggers.RAFT.info("add listener for switch or domain meta. "); Loggers.RAFT.info("add listener for switch or domain meta. ");
} }
} else { } else {
Loggers.RAFT.error("NACOS-RAFT {}", "faild to add listener: " + JSON.toJSONString(listener)); Loggers.RAFT.error("[NACOS-RAFT] faild to add listener: {}", JSON.toJSONString(listener));
} }
// if data present, notify immediately // if data present, notify immediately
for (Datum datum : datums.values()) { for (Datum datum : datums.values()) {
@ -929,7 +930,7 @@ public class RaftCore {
try { try {
listener.onChange(datum.key, datum.value); listener.onChange(datum.key, datum.value);
} catch (Exception e) { } catch (Exception e) {
Loggers.RAFT.error("NACOS-RAFT {}", "failed to notify listener", e); Loggers.RAFT.error("NACOS-RAFT failed to notify listener", e);
} }
} }
} }
@ -1005,7 +1006,7 @@ public class RaftCore {
RaftStore.delete(deleted); RaftStore.delete(deleted);
} }
notifier.addTask(deleted, Notifier.ApplyAction.DELETE); notifier.addTask(deleted, Notifier.ApplyAction.DELETE);
Loggers.RAFT.info("datum deleted, key=" + key); Loggers.RAFT.info("datum deleted, key: {}", key);
} }
} }
@ -1068,17 +1069,15 @@ public class RaftCore {
continue; continue;
} }
} catch (Throwable e) { } catch (Throwable e) {
Loggers.RAFT.error("NACOS-RAFT {}", "error while notifying listener of key: " Loggers.RAFT.error("[NACOS-RAFT] error while notifying listener of key: {} {}", datum.key, e);
+ datum.key, e);
} }
} }
if (Loggers.RAFT.isDebugEnabled()) { if (Loggers.RAFT.isDebugEnabled()) {
Loggers.RAFT.debug("NACOS-RAFT {}", "datum change notified" + Loggers.RAFT.debug("[NACOS-RAFT] datum change notified, key: {}, listener count: {}", datum.key, count);
", key: " + datum.key + "; listener count: " + count);
} }
} catch (Throwable e) { } catch (Throwable e) {
Loggers.RAFT.error("NACOS-RAFT {} {}", "Error while handling notifying task", e); Loggers.RAFT.error("[NACOS-RAFT] Error while handling notifying task", e);
} }
} }
} }

View File

@ -78,7 +78,7 @@ public class RaftStore {
RaftCore.setTerm(NumberUtils.toLong(RaftStore.meta.getProperty("term"), 0L)); RaftCore.setTerm(NumberUtils.toLong(RaftStore.meta.getProperty("term"), 0L));
} }
Loggers.RAFT.info("finish loading all datums, size: " + RaftCore.datumSize() + " cost " + (System.currentTimeMillis() - start) + "ms."); Loggers.RAFT.info("finish loading all datums, size: {} cost {} ms.", RaftCore.datumSize(), (System.currentTimeMillis() - start));
} }
public synchronized static void load(String key) throws Exception { public synchronized static void load(String key) throws Exception {
@ -86,7 +86,7 @@ public class RaftStore {
// load data // load data
for (File cache : listCaches()) { for (File cache : listCaches()) {
if (!cache.isFile()) { if (!cache.isFile()) {
Loggers.RAFT.warn("warning: encountered directory in cache dir: " + cache.getAbsolutePath()); Loggers.RAFT.warn("warning: encountered directory in cache dir: {}", cache.getAbsolutePath());
} }
if (!StringUtils.equals(decodeFileName(cache.getName()), key)) { if (!StringUtils.equals(decodeFileName(cache.getName()), key)) {
@ -95,7 +95,8 @@ public class RaftStore {
readDatum(cache); readDatum(cache);
} }
Loggers.RAFT.info("finish loading datum, key: " + key + " cost " + (System.currentTimeMillis() - start) + "ms."); Loggers.RAFT.info("finish loading datum, key: {} cost {} ms.",
key, (System.currentTimeMillis() - start));
} }
public synchronized static void readDatum(File file) throws IOException { public synchronized static void readDatum(File file) throws IOException {
@ -115,7 +116,7 @@ public class RaftStore {
Datum datum = JSON.parseObject(json, Datum.class); Datum datum = JSON.parseObject(json, Datum.class);
RaftCore.addDatum(datum); RaftCore.addDatum(datum);
} catch (Exception e) { } catch (Exception e) {
Loggers.RAFT.warn("waning: failed to deserialize key: " + file.getName()); Loggers.RAFT.warn("waning: failed to deserialize key: {}", file.getName());
throw e; throw e;
} finally { } finally {
if (fc != null) { if (fc != null) {
@ -187,7 +188,7 @@ public class RaftStore {
public static void delete(Datum datum) { public static void delete(Datum datum) {
File cacheFile = new File(CACHE_DIR + File.separator + encodeFileName(datum.key)); File cacheFile = new File(CACHE_DIR + File.separator + encodeFileName(datum.key));
if (!cacheFile.delete()) { if (!cacheFile.delete()) {
Loggers.RAFT.error("RAFT-DELETE", "failed to delete datum: " + datum.key + ", value: " + datum.value); Loggers.RAFT.error("[RAFT-DELETE] failed to delete datum: {}, value: {}", datum.key, datum.value);
throw new IllegalStateException("failed to delete datum: " + datum.key); throw new IllegalStateException("failed to delete datum: " + datum.key);
} }
} }

View File

@ -256,7 +256,7 @@ public class ApiCommands {
result.put("ips", ipArray); result.put("ips", ipArray);
} catch (Throwable e) { } catch (Throwable e) {
Loggers.SRV_LOG.warn("VIPSRV-IP4DOM", "failed to call ip4Dom, caused " + e.getMessage()); Loggers.SRV_LOG.warn("[NACOS-IP4DOM] failed to call ip4Dom, caused ", e);
throw new IllegalArgumentException(e); throw new IllegalArgumentException(e);
} }
@ -297,7 +297,9 @@ public class ApiCommands {
clusterName = UtilsAndCommons.DEFAULT_CLUSTER_NAME; clusterName = UtilsAndCommons.DEFAULT_CLUSTER_NAME;
} }
Loggers.DEBUG_LOG.debug("[CLIENT-BEAT] full arguments: beat: " + clientBeat + ", serviceName:" + dom); if (Loggers.DEBUG_LOG.isDebugEnabled()) {
Loggers.DEBUG_LOG.debug("[CLIENT-BEAT] full arguments: beat: {}, serviceName: {}", clientBeat, dom);
}
VirtualClusterDomain virtualClusterDomain = (VirtualClusterDomain) domainsManager.getDomain(namespaceId, dom); VirtualClusterDomain virtualClusterDomain = (VirtualClusterDomain) domainsManager.getDomain(namespaceId, dom);
Map<String, String[]> stringMap = new HashMap<>(16); Map<String, String[]> stringMap = new HashMap<>(16);
@ -310,7 +312,7 @@ public class ApiCommands {
//if domain does not exist, register it. //if domain does not exist, register it.
if (virtualClusterDomain == null) { if (virtualClusterDomain == null) {
regDom(OverrideParameterRequestWrapper.buildRequest(request, stringMap)); regDom(OverrideParameterRequestWrapper.buildRequest(request, stringMap));
Loggers.SRV_LOG.warn("dom not found, register it, dom:" + dom); Loggers.SRV_LOG.warn("dom not found, register it, dom: {}", dom);
} }
virtualClusterDomain = (VirtualClusterDomain) domainsManager.getDomain(namespaceId, dom); virtualClusterDomain = (VirtualClusterDomain) domainsManager.getDomain(namespaceId, dom);
@ -336,12 +338,12 @@ public class ApiCommands {
stringMap.put("json", Arrays.asList("true").toArray(new String[1])); stringMap.put("json", Arrays.asList("true").toArray(new String[1]));
stringMap.put("dom", Arrays.asList(dom).toArray(new String[1])); stringMap.put("dom", Arrays.asList(dom).toArray(new String[1]));
addIP4Dom(OverrideParameterRequestWrapper.buildRequest(request, stringMap)); addIP4Dom(OverrideParameterRequestWrapper.buildRequest(request, stringMap));
Loggers.SRV_LOG.warn("ip not found, register it, dom:" + dom + ", ip:" + ipAddress); Loggers.SRV_LOG.warn("ip not found, register it, dom: {}, ip: {}", dom, ipAddress);
} }
if (!DistroMapper.responsible(dom)) { if (!DistroMapper.responsible(dom)) {
String server = DistroMapper.mapSrv(dom); String server = DistroMapper.mapSrv(dom);
Loggers.EVT_LOG.info("I'm not responsible for " + dom + ", proxy it to " + server); Loggers.EVT_LOG.info("I'm not responsible for {}, proxy it to {}", dom, server);
Map<String, String> proxyParams = new HashMap<>(16); Map<String, String> proxyParams = new HashMap<>(16);
for (Map.Entry<String, String[]> entry : request.getParameterMap().entrySet()) { for (Map.Entry<String, String[]> entry : request.getParameterMap().entrySet()) {
String key = entry.getKey(); String key = entry.getKey();
@ -399,7 +401,7 @@ public class ApiCommands {
String serviceMetadataJson = WebUtils.optional(request, "serviceMetadata", StringUtils.EMPTY); String serviceMetadataJson = WebUtils.optional(request, "serviceMetadata", StringUtils.EMPTY);
String clusterMetadataJson = WebUtils.optional(request, "clusterMetadata", StringUtils.EMPTY); String clusterMetadataJson = WebUtils.optional(request, "clusterMetadata", StringUtils.EMPTY);
Loggers.SRV_LOG.info("[RESET-WEIGHT] " + String.valueOf(resetWeight)); Loggers.SRV_LOG.info("[RESET-WEIGHT] {}", String.valueOf(resetWeight));
VirtualClusterDomain domObj = new VirtualClusterDomain(); VirtualClusterDomain domObj = new VirtualClusterDomain();
domObj.setName(dom); domObj.setName(dom);
@ -569,7 +571,7 @@ public class ApiCommands {
try { try {
regDom(request); regDom(request);
} catch (Exception e) { } catch (Exception e) {
Loggers.SRV_LOG.error("REG-SERIVCE", "register service failed, service:" + dom, e); Loggers.SRV_LOG.error("[REG-SERIVCE] register service failed, service:" + dom, e);
} }
} }
}); });
@ -590,7 +592,7 @@ public class ApiCommands {
} }
if (Loggers.SRV_LOG.isDebugEnabled()) { if (Loggers.SRV_LOG.isDebugEnabled()) {
Loggers.SRV_LOG.debug("reg-service {}", "add ip: " + dom + "|" + ipAddress.toJSON()); Loggers.SRV_LOG.debug("reg-service add ip: {}|{}", dom, ipAddress.toJSON());
} }
Map<String, String[]> stringMap = new HashMap<>(16); Map<String, String[]> stringMap = new HashMap<>(16);
@ -838,15 +840,15 @@ public class ApiCommands {
long term = Long.parseLong(WebUtils.required(request, "term")); long term = Long.parseLong(WebUtils.required(request, "term"));
if (!RaftCore.isLeader(clientIP)) { if (!RaftCore.isLeader(clientIP)) {
Loggers.RAFT.warn("peer(" + JSON.toJSONString(clientIP) + ") tried to publish " + Loggers.RAFT.warn("peer {} tried to publish data but wasn't leader, leader: {}",
"data but wasn't leader, leader: " + JSON.toJSONString(RaftCore.getLeader())); JSON.toJSONString(clientIP), JSON.toJSONString(RaftCore.getLeader()));
throw new IllegalStateException("peer(" + clientIP + ") tried to publish " + throw new IllegalStateException("peer(" + clientIP + ") tried to publish " +
"data but wasn't leader"); "data but wasn't leader");
} }
if (term < RaftCore.getPeerSet().local().term.get()) { if (term < RaftCore.getPeerSet().local().term.get()) {
Loggers.RAFT.warn("out of date publish, pub-term: " Loggers.RAFT.warn("out of date publish, pub-term: {}, cur-term: {}",
+ JSON.toJSONString(clientIP) + ", cur-term: " + JSON.toJSONString(RaftCore.getPeerSet().local())); JSON.toJSONString(clientIP), JSON.toJSONString(RaftCore.getPeerSet().local()));
throw new IllegalStateException("out of date publish, pub-term:" throw new IllegalStateException("out of date publish, pub-term:"
+ term + ", cur-term: " + RaftCore.getPeerSet().local().term.get()); + term + ", cur-term: " + RaftCore.getPeerSet().local().term.get());
} }
@ -912,7 +914,9 @@ public class ApiCommands {
proxyParams.put(entry.getKey(), entry.getValue()[0]); proxyParams.put(entry.getKey(), entry.getValue()[0]);
} }
Loggers.DEBUG_LOG.debug("[ADD-IP] full arguments:" + proxyParams); if (Loggers.DEBUG_LOG.isDebugEnabled()) {
Loggers.DEBUG_LOG.debug("[ADD-IP] full arguments: {}", proxyParams);
}
String ipListString = WebUtils.required(request, "ipList"); String ipListString = WebUtils.required(request, "ipList");
final List<String> ipList; final List<String> ipList;
@ -948,7 +952,7 @@ public class ApiCommands {
HttpClient.HttpResult result1 = HttpClient.httpPost(url, null, proxyParams); HttpClient.HttpResult result1 = HttpClient.httpPost(url, null, proxyParams);
if (result1.code != HttpURLConnection.HTTP_OK) { if (result1.code != HttpURLConnection.HTTP_OK) {
Loggers.SRV_LOG.warn("failed to add ip for dom, caused " + result1.content); Loggers.SRV_LOG.warn("failed to add ip for dom, caused {}", result1.content);
throw new IllegalArgumentException("failed to add ip for dom, caused " + result1.content); throw new IllegalArgumentException("failed to add ip for dom, caused " + result1.content);
} }
@ -1033,15 +1037,15 @@ public class ApiCommands {
} }
}); });
} catch (Exception e) { } catch (Exception e) {
Loggers.SRV_LOG.error("ADD-IP", "failed when publish to peer." + url, e); Loggers.SRV_LOG.error("[ADD-IP] failed when publish to peer. " + url, e);
} }
} }
}); });
} }
Loggers.EVT_LOG.info("{" + dom + "} {POS} {IP-ADD}" + " new: " Loggers.EVT_LOG.info("dom: {} {POS} {IP-ADD} new: {} operatorIP: {}",
+ Arrays.toString(ipList.toArray()) + " operatorIP: " dom, Arrays.toString(ipList.toArray()), WebUtils.optional(request, "clientIP", "unknown"));
+ WebUtils.optional(request, "clientIP", "unknown"));
} finally { } finally {
domainsManager.getDom2LockMap().get(UtilsAndCommons.assembleFullServiceName(namespaceId, dom)).unlock(); domainsManager.getDom2LockMap().get(UtilsAndCommons.assembleFullServiceName(namespaceId, dom)).unlock();
} }
@ -1083,7 +1087,7 @@ public class ApiCommands {
cacheMillis = Switch.getPushCacheMillis(dom); cacheMillis = Switch.getPushCacheMillis(dom);
} }
} catch (Exception e) { } catch (Exception e) {
Loggers.SRV_LOG.error("VIPSRV-API", "failed to added push client", e); Loggers.SRV_LOG.error("[NACOS-API] failed to added push client", e);
cacheMillis = Switch.getCacheMillis(dom); cacheMillis = Switch.getCacheMillis(dom);
} }
@ -1118,8 +1122,7 @@ public class ApiCommands {
if ((float) ipMap.get(Boolean.TRUE).size() / srvedIPs.size() <= threshold) { if ((float) ipMap.get(Boolean.TRUE).size() / srvedIPs.size() <= threshold) {
Loggers.SRV_LOG.warn("protect threshold reached, return all ips, " + Loggers.SRV_LOG.warn("protect threshold reached, return all ips, dom: {}", dom);
"dom: " + dom);
if (isCheck) { if (isCheck) {
result.put("reachProtectThreshold", true); result.put("reachProtectThreshold", true);
} }
@ -1241,9 +1244,8 @@ public class ApiCommands {
domainsManager.easyRemvIP4Dom(namespaceId, dom, ipObjList); domainsManager.easyRemvIP4Dom(namespaceId, dom, ipObjList);
Loggers.EVT_LOG.info("{" + dom + "} {POS} {IP-REMV}" + " dead: " Loggers.EVT_LOG.info("dom: {} {POS} {IP-REMV} dead: {}operator: {}",
+ Arrays.toString(ipList.toArray()) + " operator: " dom, Arrays.toString(ipList.toArray()), WebUtils.optional(request, "clientIP", "unknown"));
+ WebUtils.optional(request, "clientIP", "unknown"));
return "ok"; return "ok";
} }
@ -1316,7 +1318,7 @@ public class ApiCommands {
if (datum != null) { if (datum != null) {
switchDomain = JSON.parseObject(datum.value, SwitchDomain.class); switchDomain = JSON.parseObject(datum.value, SwitchDomain.class);
} else { } else {
Loggers.SRV_LOG.warn("datum: " + UtilsAndCommons.DOMAINS_DATA_ID + ".00-00---000-VIPSRV_SWITCH_DOMAIN-000---00-00 is null"); Loggers.SRV_LOG.warn("datum: {}.00-00---000-VIPSRV_SWITCH_DOMAIN-000---00-00 is null", UtilsAndCommons.DOMAINS_DATA_ID);
} }
if (SwitchEntry.BATCH.equals(entry)) { if (SwitchEntry.BATCH.equals(entry)) {
@ -1824,7 +1826,7 @@ public class ApiCommands {
cluster = getClusterFromJson(json); cluster = getClusterFromJson(json);
} catch (Exception e) { } catch (Exception e) {
Loggers.SRV_LOG.warn("ADD-CLUSTER", "failed to parse json, try old format."); Loggers.SRV_LOG.warn("[ADD-CLUSTER] failed to parse json, try old format.");
} }
} else { } else {
String cktype = WebUtils.optional(request, "cktype", "TCP"); String cktype = WebUtils.optional(request, "cktype", "TCP");
@ -1968,7 +1970,7 @@ public class ApiCommands {
sb.append(ip).append("\r\n"); sb.append(ip).append("\r\n");
} }
Loggers.SRV_LOG.info("[UPDATE-CLUSTER] new ips:" + sb.toString()); Loggers.SRV_LOG.info("[UPDATE-CLUSTER] new ips: {}", sb.toString());
writeClusterConf(sb.toString()); writeClusterConf(sb.toString());
return result; return result;
} }
@ -1979,7 +1981,7 @@ public class ApiCommands {
for (String ip : ips.split(ipSpliter)) { for (String ip : ips.split(ipSpliter)) {
sb.append(ip).append("\r\n"); sb.append(ip).append("\r\n");
} }
Loggers.SRV_LOG.info("[UPDATE-CLUSTER] new ips:" + sb.toString()); Loggers.SRV_LOG.info("[UPDATE-CLUSTER] new ips: {}", sb.toString());
writeClusterConf(sb.toString()); writeClusterConf(sb.toString());
return result; return result;
} }
@ -2112,7 +2114,7 @@ public class ApiCommands {
try { try {
DomainsManager.DomainChecksum checksums = JSON.parseObject(domsStatusString, DomainsManager.DomainChecksum.class); DomainsManager.DomainChecksum checksums = JSON.parseObject(domsStatusString, DomainsManager.DomainChecksum.class);
if (checksums == null) { if (checksums == null) {
Loggers.SRV_LOG.warn("DOMAIN-STATUS", "receive malformed data: " + null); Loggers.SRV_LOG.warn("[DOMAIN-STATUS] receive malformed data: null");
return "fail"; return "fail";
} }
@ -2131,12 +2133,15 @@ public class ApiCommands {
domain.recalculateChecksum(); domain.recalculateChecksum();
if (!checksum.equals(domain.getChecksum())) { if (!checksum.equals(domain.getChecksum())) {
Loggers.SRV_LOG.debug("checksum of " + dom + " is not consistent, remote: " + serverIP + ",checksum: " + checksum + ", local: " + domain.getChecksum()); if (Loggers.SRV_LOG.isDebugEnabled()) {
Loggers.SRV_LOG.debug("checksum of {} is not consistent, remote: {}, checksum: {}, local: {}",
dom, serverIP, checksum, domain.getChecksum());
}
domainsManager.addUpdatedDom2Queue(checksums.namespaceId, dom, serverIP, checksum); domainsManager.addUpdatedDom2Queue(checksums.namespaceId, dom, serverIP, checksum);
} }
} }
} catch (Exception e) { } catch (Exception e) {
Loggers.SRV_LOG.warn("DOMAIN-STATUS", "receive malformed data: " + domsStatusString, e); Loggers.SRV_LOG.warn("[DOMAIN-STATUS] receive malformed data: " + domsStatusString, e);
} }
return "ok"; return "ok";
@ -2151,8 +2156,8 @@ public class ApiCommands {
String port = WebUtils.required(request, "port"); String port = WebUtils.required(request, "port");
String state = WebUtils.optional(request, "state", StringUtils.EMPTY); String state = WebUtils.optional(request, "state", StringUtils.EMPTY);
Loggers.SRV_LOG.info("[CONTAINER_NOTFY] received notify event, type:" + type + ", domain:" + domain + Loggers.SRV_LOG.info("[CONTAINER_NOTFY] received notify event, type: {}, domain: {}, ip: {}, port: {}, state: {}",
", ip:" + ip + ", port:" + port + ", state:" + state); type, domain, ip, port, state);
return "ok"; return "ok";
} }

View File

@ -66,7 +66,7 @@ public class DistroFilter implements Filter {
try { try {
resp.sendRedirect(url); resp.sendRedirect(url);
} catch (Exception ignore) { } catch (Exception ignore) {
Loggers.SRV_LOG.warn("DISTRO-FILTER", "request failed: " + url); Loggers.SRV_LOG.warn("[DISTRO-FILTER] request failed: " + url);
} }
return; return;
} }
@ -102,7 +102,7 @@ public class DistroFilter implements Filter {
try { try {
resp.sendRedirect(url); resp.sendRedirect(url);
} catch (Exception ignore) { } catch (Exception ignore) {
Loggers.SRV_LOG.warn("DISTRO-FILTER", "request failed: " + url); Loggers.SRV_LOG.warn("[DISTRO-FILTER] request failed: " + url);
} }
} }
} }

View File

@ -1,50 +0,0 @@
/*
* Copyright 1999-2018 Alibaba Group Holding Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.nacos.naming.web;
import com.alibaba.nacos.api.common.Constants;
import com.alibaba.nacos.core.utils.WebUtils;
import com.alibaba.nacos.naming.misc.UtilsAndCommons;
import org.apache.commons.lang3.StringUtils;
import javax.servlet.*;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
/**
* A filter to intercept tenant parameter.
*
* @author <a href="mailto:zpf.073@gmail.com">nkorange</a>
* @since 0.8.0
*/
public class NamespaceFilter implements Filter {
@Override
public void init(FilterConfig filterConfig) throws ServletException {
}
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException {
chain.doFilter(request, response);
}
@Override
public void destroy() {
}
}

View File

@ -51,18 +51,6 @@ public class NamingConfig {
return registration; return registration;
} }
@Bean
public FilterRegistrationBean tenantFilterRegistration() {
FilterRegistrationBean registration = new FilterRegistrationBean();
registration.setFilter(namespaceFilter());
registration.addUrlPatterns("/v1/ns/instance/*", "/v1/ns/service/*", "/v1/ns/cluster/*", "/v1/ns/health/*");
registration.setName("namespaceFilter");
registration.setOrder(4);
return registration;
}
@Bean @Bean
public Filter distroFilter() { public Filter distroFilter() {
return new DistroFilter(); return new DistroFilter();
@ -73,8 +61,4 @@ public class NamingConfig {
return new AuthFilter(); return new AuthFilter();
} }
@Bean
public Filter namespaceFilter() {
return new NamespaceFilter();
}
} }