forked from I2P_Developers/i2p.i2p
Stats: clean up addRateData() calls
This commit is contained in:
@@ -209,9 +209,9 @@ public class JobQueue {
|
||||
}
|
||||
}
|
||||
|
||||
_context.statManager().addRateData("jobQueue.readyJobs", numReady, 0);
|
||||
_context.statManager().addRateData("jobQueue.readyJobs", numReady);
|
||||
if (dropped) {
|
||||
_context.statManager().addRateData("jobQueue.droppedJobs", 1, 0);
|
||||
_context.statManager().addRateData("jobQueue.droppedJobs", 1);
|
||||
_log.logAlways(Log.WARN, "Dropping job due to overload! # ready jobs: "
|
||||
+ numReady + ": job = " + job);
|
||||
}
|
||||
|
@@ -87,7 +87,7 @@ class JobQueueRunner implements Runnable {
|
||||
|
||||
//_context.statManager().addRateData("jobQueue.jobRunnerInactive", betweenJobs, betweenJobs);
|
||||
_context.statManager().addRateData("jobQueue.jobRun", duration, duration);
|
||||
_context.statManager().addRateData("jobQueue.jobLag", lag, 0);
|
||||
_context.statManager().addRateData("jobQueue.jobLag", lag);
|
||||
_context.statManager().addRateData("jobQueue.jobWait", enqueuedTime, enqueuedTime);
|
||||
|
||||
if (duration > 1000) {
|
||||
|
@@ -42,7 +42,7 @@ public class MessageValidator {
|
||||
if (isDuplicate) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Rejecting message " + messageId + " because it is a duplicate", new Exception("Duplicate origin"));
|
||||
_context.statManager().addRateData("router.duplicateMessageId", 1, 0);
|
||||
_context.statManager().addRateData("router.duplicateMessageId", 1);
|
||||
return "duplicate";
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
@@ -59,12 +59,12 @@ public class MessageValidator {
|
||||
if (now - (Router.CLOCK_FUDGE_FACTOR * 3 / 2) >= expiration) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Rejecting message because it expired " + (now-expiration) + "ms ago");
|
||||
_context.statManager().addRateData("router.invalidMessageTime", (now-expiration), 0);
|
||||
_context.statManager().addRateData("router.invalidMessageTime", (now-expiration));
|
||||
return "expired " + (now-expiration) + "ms ago";
|
||||
} else if (now + 4*Router.CLOCK_FUDGE_FACTOR < expiration) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Rejecting message because it will expire too far in the future (" + (expiration-now) + "ms)");
|
||||
_context.statManager().addRateData("router.invalidMessageTime", (now-expiration), 0);
|
||||
_context.statManager().addRateData("router.invalidMessageTime", (now-expiration));
|
||||
return "expire too far in the future (" + (expiration-now) + "ms)";
|
||||
}
|
||||
return null;
|
||||
|
@@ -164,7 +164,7 @@ public class RouterClock extends Clock {
|
||||
_context.statManager().createRequiredRateStat("clock.skew", "Clock step adjustment (ms)", "Clock", new long[] { 10*60*1000, 3*60*60*1000, 24*60*60*60 });
|
||||
_statCreated = true;
|
||||
}
|
||||
_context.statManager().addRateData("clock.skew", delta, 0);
|
||||
_context.statManager().addRateData("clock.skew", delta);
|
||||
_desiredOffset = offsetMs;
|
||||
} else {
|
||||
getLog().log(Log.INFO, "Initializing clock offset to " + offsetMs + "ms, Stratum " + stratum);
|
||||
|
@@ -34,7 +34,7 @@ class RouterDoSThrottle extends RouterThrottleImpl {
|
||||
// same period, check for DoS
|
||||
int cnt = _currentLookupCount.incrementAndGet();
|
||||
if (cnt >= LOOKUP_THROTTLE_MAX) {
|
||||
_context.statManager().addRateData("router.throttleNetDbDoS", cnt, 0);
|
||||
_context.statManager().addRateData("router.throttleNetDbDoS", cnt);
|
||||
int rand = _context.random().nextInt(cnt);
|
||||
if (rand > LOOKUP_THROTTLE_MAX) {
|
||||
return false;
|
||||
|
@@ -190,7 +190,7 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Probabalistically refusing tunnel request (avg=" + avg
|
||||
+ " current=" + numTunnels + ")");
|
||||
_context.statManager().addRateData("router.throttleTunnelProbTooFast", (long)(numTunnels-avg), 0);
|
||||
_context.statManager().addRateData("router.throttleTunnelProbTooFast", (long)(numTunnels-avg));
|
||||
// hard to do {0} from here
|
||||
//setTunnelStatus("Rejecting " + (100 - (int) probAccept*100) + "% of tunnels: High number of requests");
|
||||
setTunnelStatus(_x("Rejecting most tunnels: High number of requests"));
|
||||
@@ -243,7 +243,7 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Refusing tunnel request since we are already participating in "
|
||||
+ numTunnels + " (our max is " + maxTunnels + ")");
|
||||
_context.statManager().addRateData("router.throttleTunnelMaxExceeded", numTunnels, 0);
|
||||
_context.statManager().addRateData("router.throttleTunnelMaxExceeded", numTunnels);
|
||||
setTunnelStatus(_x("Rejecting tunnels: Limit reached"));
|
||||
return TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
|
||||
}
|
||||
@@ -264,7 +264,7 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
double bytesAllocated = messagesPerTunnel * numTunnels * PREPROCESSED_SIZE;
|
||||
|
||||
if (!allowTunnel(bytesAllocated, numTunnels)) {
|
||||
_context.statManager().addRateData("router.throttleTunnelBandwidthExceeded", (long)bytesAllocated, 0);
|
||||
_context.statManager().addRateData("router.throttleTunnelBandwidthExceeded", (long)bytesAllocated);
|
||||
return TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
|
||||
}
|
||||
|
||||
|
@@ -18,7 +18,7 @@ public class RouterVersion {
|
||||
/** deprecated */
|
||||
public final static String ID = "Monotone";
|
||||
public final static String VERSION = CoreVersion.VERSION;
|
||||
public final static long BUILD = 4;
|
||||
public final static long BUILD = 5;
|
||||
|
||||
/** for example "-test" */
|
||||
public final static String EXTRA = "";
|
||||
|
@@ -104,7 +104,7 @@ class RequestLeaseSetJob extends JobImpl {
|
||||
_runner.doSend(msg);
|
||||
getContext().jobQueue().addJob(new CheckLeaseRequestStatus());
|
||||
} catch (I2CPMessageException ime) {
|
||||
getContext().statManager().addRateData("client.requestLeaseSetDropped", 1, 0);
|
||||
getContext().statManager().addRateData("client.requestLeaseSetDropped", 1);
|
||||
_log.error("Error sending I2CP message requesting the lease set", ime);
|
||||
_requestState.setIsSuccessful(false);
|
||||
if (_requestState.getOnFailed() != null)
|
||||
|
@@ -84,7 +84,7 @@ public class GarlicMessageReceiver {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("CloveMessageParser failed to decrypt the message [" + message.getUniqueId()
|
||||
+ "]", new Exception("Decrypt garlic failed"));
|
||||
_context.statManager().addRateData("crypto.garlic.decryptFail", 1, 0);
|
||||
_context.statManager().addRateData("crypto.garlic.decryptFail", 1);
|
||||
_context.messageHistory().messageProcessingError(message.getUniqueId(),
|
||||
message.getClass().getName(),
|
||||
"Garlic could not be decrypted");
|
||||
|
@@ -111,7 +111,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
// Local leasesets are not handled here
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("We have the published LS " + _message.getSearchKey() + ", answering query");
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedReceivedPublished", 1, 0);
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedReceivedPublished", 1);
|
||||
sendData(_message.getSearchKey(), ls, fromKey, _message.getReplyTunnel());
|
||||
} else if (shouldPublishLocal && answerAllQueries()) {
|
||||
// We are floodfill, and this is our local leaseset, and we publish it.
|
||||
@@ -124,13 +124,13 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
// It's in our keyspace, so give it to them
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("We have local LS " + _message.getSearchKey() + ", answering query, in our keyspace");
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedLocalClosest", 1, 0);
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedLocalClosest", 1);
|
||||
sendData(_message.getSearchKey(), ls, fromKey, _message.getReplyTunnel());
|
||||
} else {
|
||||
// Lie, pretend we don't have it
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("We have local LS " + _message.getSearchKey() + ", NOT answering query, out of our keyspace");
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedLocalNotClosest", 1, 0);
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedLocalNotClosest", 1);
|
||||
Set<Hash> routerHashSet = getNearestRouters();
|
||||
sendClosest(_message.getSearchKey(), routerHashSet, fromKey, _message.getReplyTunnel());
|
||||
}
|
||||
@@ -143,7 +143,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
_log.info("We have LS " + _message.getSearchKey() +
|
||||
", NOT answering query - local? " + isLocal + " shouldPublish? " + shouldPublishLocal +
|
||||
" RAP? " + ls.getReceivedAsPublished() + " RAR? " + ls.getReceivedAsReply());
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedRemoteNotClosest", 1, 0);
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedRemoteNotClosest", 1);
|
||||
Set<Hash> routerHashSet = getNearestRouters();
|
||||
sendClosest(_message.getSearchKey(), routerHashSet, fromKey, _message.getReplyTunnel());
|
||||
}
|
||||
@@ -246,11 +246,11 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
+ " tunnel " + replyTunnel);
|
||||
DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext());
|
||||
if (data.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedLeaseSet", 1, 0);
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedLeaseSet", 1);
|
||||
}
|
||||
msg.setEntry(data);
|
||||
getContext().statManager().addRateData("netDb.lookupsMatched", 1, 0);
|
||||
getContext().statManager().addRateData("netDb.lookupsHandled", 1, 0);
|
||||
getContext().statManager().addRateData("netDb.lookupsMatched", 1);
|
||||
getContext().statManager().addRateData("netDb.lookupsHandled", 1);
|
||||
sendMessage(msg, toPeer, replyTunnel);
|
||||
}
|
||||
|
||||
@@ -267,7 +267,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
if (++i >= MAX_ROUTERS_RETURNED)
|
||||
break;
|
||||
}
|
||||
getContext().statManager().addRateData("netDb.lookupsHandled", 1, 0);
|
||||
getContext().statManager().addRateData("netDb.lookupsHandled", 1);
|
||||
sendMessage(msg, toPeer, replyTunnel); // should this go via garlic messages instead?
|
||||
}
|
||||
|
||||
|
@@ -105,7 +105,7 @@ public class DBHistory {
|
||||
public void lookupSuccessful() {
|
||||
_successfulLookups++;
|
||||
_failedLookupRate.addData(0, 0);
|
||||
_context.statManager().addRateData("peer.failedLookupRate", 0, 0);
|
||||
_context.statManager().addRateData("peer.failedLookupRate", 0);
|
||||
_lastLookupSuccessful = _context.clock().now();
|
||||
}
|
||||
|
||||
@@ -115,7 +115,7 @@ public class DBHistory {
|
||||
public void lookupFailed() {
|
||||
_failedLookups++;
|
||||
_failedLookupRate.addData(1, 0);
|
||||
_context.statManager().addRateData("peer.failedLookupRate", 1, 0);
|
||||
_context.statManager().addRateData("peer.failedLookupRate", 1);
|
||||
_lastLookupFailed = _context.clock().now();
|
||||
}
|
||||
|
||||
@@ -128,7 +128,7 @@ public class DBHistory {
|
||||
// Fixme, redefined this to include both lookup and store fails,
|
||||
// need to fix the javadocs
|
||||
_failedLookupRate.addData(0, 0);
|
||||
_context.statManager().addRateData("peer.failedLookupRate", 0, 0);
|
||||
_context.statManager().addRateData("peer.failedLookupRate", 0);
|
||||
_lastStoreSuccessful = _context.clock().now();
|
||||
}
|
||||
|
||||
|
@@ -210,9 +210,9 @@ public class PeerTestJob extends JobImpl {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Took too long to get a reply from peer " + _peer.toBase64()
|
||||
+ ": " + (0-timeLeft) + "ms too slow");
|
||||
getContext().statManager().addRateData("peer.testTooSlow", 0-timeLeft, 0);
|
||||
getContext().statManager().addRateData("peer.testTooSlow", 0-timeLeft);
|
||||
} else {
|
||||
getContext().statManager().addRateData("peer.testOK", getTestTimeout() - timeLeft, 0);
|
||||
getContext().statManager().addRateData("peer.testOK", getTestTimeout() - timeLeft);
|
||||
}
|
||||
_matchFound = true;
|
||||
return true;
|
||||
@@ -297,7 +297,7 @@ public class PeerTestJob extends JobImpl {
|
||||
|
||||
// don't fail the tunnels, as the peer might just plain be down, or
|
||||
// otherwise overloaded
|
||||
getContext().statManager().addRateData("peer.testTimeout", 1, 0);
|
||||
getContext().statManager().addRateData("peer.testTimeout", 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -98,7 +98,7 @@ class BatchedPreprocessor extends TrivialPreprocessor {
|
||||
if (rv > defaultAmount)
|
||||
rv = defaultAmount;
|
||||
if (shouldStat)
|
||||
_context.statManager().addRateData("tunnel.batchDelayAmount", rv, 0);
|
||||
_context.statManager().addRateData("tunnel.batchDelayAmount", rv);
|
||||
return rv;
|
||||
}
|
||||
|
||||
@@ -159,7 +159,7 @@ class BatchedPreprocessor extends TrivialPreprocessor {
|
||||
long beforeSend = System.currentTimeMillis();
|
||||
_pendingSince = 0;
|
||||
send(pending, 0, i, sender, rec);
|
||||
_context.statManager().addRateData("tunnel.batchFullFragments", 1, 0);
|
||||
_context.statManager().addRateData("tunnel.batchFullFragments", 1);
|
||||
long afterSend = System.currentTimeMillis();
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
display(allocated, pending, "Sent the message with " + (i+1));
|
||||
@@ -176,7 +176,7 @@ class BatchedPreprocessor extends TrivialPreprocessor {
|
||||
if (timingBuf != null)
|
||||
timingBuf.append(" sent " + cur);
|
||||
notePreprocessing(cur.getMessageId(), cur.getFragmentNumber(), cur.getData().length, cur.getMessageIds(), "flushed allocated");
|
||||
_context.statManager().addRateData("tunnel.batchFragmentation", cur.getFragmentNumber() + 1, 0);
|
||||
_context.statManager().addRateData("tunnel.batchFragmentation", cur.getFragmentNumber() + 1);
|
||||
_context.statManager().addRateData("tunnel.writeDelay", cur.getLifetime(), cur.getData().length);
|
||||
}
|
||||
if (msg.getOffset() >= msg.getData().length) {
|
||||
@@ -185,11 +185,11 @@ class BatchedPreprocessor extends TrivialPreprocessor {
|
||||
if (timingBuf != null)
|
||||
timingBuf.append(" sent perfect fit " + cur).append(".");
|
||||
notePreprocessing(cur.getMessageId(), cur.getFragmentNumber(), msg.getData().length, msg.getMessageIds(), "flushed tail, remaining: " + pending);
|
||||
_context.statManager().addRateData("tunnel.batchFragmentation", cur.getFragmentNumber() + 1, 0);
|
||||
_context.statManager().addRateData("tunnel.batchFragmentation", cur.getFragmentNumber() + 1);
|
||||
_context.statManager().addRateData("tunnel.writeDelay", cur.getLifetime(), cur.getData().length);
|
||||
}
|
||||
if (i > 0)
|
||||
_context.statManager().addRateData("tunnel.batchMultipleCount", i+1, 0);
|
||||
_context.statManager().addRateData("tunnel.batchMultipleCount", i+1);
|
||||
allocated = 0;
|
||||
batchCount++;
|
||||
long pendingEnd = System.currentTimeMillis();
|
||||
@@ -221,11 +221,11 @@ class BatchedPreprocessor extends TrivialPreprocessor {
|
||||
// not even a full message, but we want to flush it anyway
|
||||
|
||||
if (pending.size() > 1)
|
||||
_context.statManager().addRateData("tunnel.batchMultipleCount", pending.size(), 0);
|
||||
_context.statManager().addRateData("tunnel.batchMultipleCount", pending.size());
|
||||
_context.statManager().addRateData("tunnel.batchDelaySent", pending.size(), 0);
|
||||
|
||||
send(pending, 0, pending.size()-1, sender, rec);
|
||||
_context.statManager().addRateData("tunnel.batchSmallFragments", FULL_SIZE - allocated, 0);
|
||||
_context.statManager().addRateData("tunnel.batchSmallFragments", FULL_SIZE - allocated);
|
||||
|
||||
// Remove everything in the outgoing message from the pending queue
|
||||
int beforeSize = pending.size();
|
||||
@@ -235,7 +235,7 @@ class BatchedPreprocessor extends TrivialPreprocessor {
|
||||
break;
|
||||
pending.remove(0);
|
||||
notePreprocessing(cur.getMessageId(), cur.getFragmentNumber(), cur.getData().length, cur.getMessageIds(), "flushed remaining");
|
||||
_context.statManager().addRateData("tunnel.batchFragmentation", cur.getFragmentNumber() + 1, 0);
|
||||
_context.statManager().addRateData("tunnel.batchFragmentation", cur.getFragmentNumber() + 1);
|
||||
_context.statManager().addRateData("tunnel.writeDelay", cur.getLifetime(), cur.getData().length);
|
||||
}
|
||||
|
||||
@@ -259,7 +259,7 @@ class BatchedPreprocessor extends TrivialPreprocessor {
|
||||
_pendingSince = 0;
|
||||
}
|
||||
if (batchCount > 1)
|
||||
_context.statManager().addRateData("tunnel.batchCount", batchCount, 0);
|
||||
_context.statManager().addRateData("tunnel.batchCount", batchCount);
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
display(allocated, pending, "flushed " + (beforeSize) + ", no remaining after " + delayAmount + "ms");
|
||||
|
||||
@@ -273,11 +273,11 @@ class BatchedPreprocessor extends TrivialPreprocessor {
|
||||
// won't get here, we returned
|
||||
} else {
|
||||
// We didn't flush. Note that the messages remain on the pending list.
|
||||
_context.statManager().addRateData("tunnel.batchDelay", pending.size(), 0);
|
||||
_context.statManager().addRateData("tunnel.batchDelay", pending.size());
|
||||
if (_pendingSince <= 0)
|
||||
_pendingSince = _context.clock().now();
|
||||
if (batchCount > 1)
|
||||
_context.statManager().addRateData("tunnel.batchCount", batchCount, 0);
|
||||
_context.statManager().addRateData("tunnel.batchCount", batchCount);
|
||||
// not yet time to send the delayed flush
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
display(allocated, pending, "dont flush");
|
||||
|
@@ -251,9 +251,9 @@ class FragmentHandler {
|
||||
if (eq) {
|
||||
int excessPadding = paddingEnd - (HopProcessor.IV_LENGTH + 4 + 1);
|
||||
if (excessPadding > 0) // suboptimal fragmentation
|
||||
_context.statManager().addRateData("tunnel.smallFragments", excessPadding, 0);
|
||||
_context.statManager().addRateData("tunnel.smallFragments", excessPadding);
|
||||
else
|
||||
_context.statManager().addRateData("tunnel.fullFragments", 1, 0);
|
||||
_context.statManager().addRateData("tunnel.fullFragments", 1);
|
||||
}
|
||||
|
||||
// ByteCache/ByteArray corruption detection
|
||||
|
@@ -38,7 +38,7 @@ class InboundGatewayReceiver implements TunnelGateway.Receiver {
|
||||
// It should be rare to forget the router info for the next peer
|
||||
ReceiveJob j = null;
|
||||
if (alreadySearched)
|
||||
_context.statManager().addRateData("tunnel.inboundLookupSuccess", 0, 0);
|
||||
_context.statManager().addRateData("tunnel.inboundLookupSuccess", 0);
|
||||
else
|
||||
j = new ReceiveJob(_context, encrypted);
|
||||
_context.netDb().lookupRouterInfo(_config.getSendTo(), j, j, MAX_LOOKUP_TIME);
|
||||
@@ -46,7 +46,7 @@ class InboundGatewayReceiver implements TunnelGateway.Receiver {
|
||||
}
|
||||
}
|
||||
if (alreadySearched)
|
||||
_context.statManager().addRateData("tunnel.inboundLookupSuccess", 1, 0);
|
||||
_context.statManager().addRateData("tunnel.inboundLookupSuccess", 1);
|
||||
|
||||
// We do this before the preprocessor now (i.e. before fragmentation)
|
||||
//if (_context.tunnelDispatcher().shouldDropParticipatingMessage("IBGW", encrypted.length))
|
||||
|
@@ -267,7 +267,7 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
|
||||
} else if (type == DataMessage.MESSAGE_TYPE) {
|
||||
// a data message targetting the local router is how we send load tests (real
|
||||
// data messages target destinations)
|
||||
_context.statManager().addRateData("tunnel.handleLoadClove", 1, 0);
|
||||
_context.statManager().addRateData("tunnel.handleLoadClove", 1);
|
||||
data = null;
|
||||
//_context.inNetMessagePool().add(data, null, null);
|
||||
} else if (_client != null && type != DeliveryStatusMessage.MESSAGE_TYPE) {
|
||||
|
@@ -99,7 +99,7 @@ class OutboundReceiver implements TunnelGateway.Receiver {
|
||||
} else {
|
||||
stat = 0;
|
||||
}
|
||||
_context.statManager().addRateData("tunnel.outboundLookupSuccess", stat, 0);
|
||||
_context.statManager().addRateData("tunnel.outboundLookupSuccess", stat);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,7 +114,7 @@ class OutboundReceiver implements TunnelGateway.Receiver {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("lookup of " + _config.getPeer(1)
|
||||
+ " failed for " + _config);
|
||||
_context.statManager().addRateData("tunnel.outboundLookupSuccess", 0, 0);
|
||||
_context.statManager().addRateData("tunnel.outboundLookupSuccess", 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -78,7 +78,7 @@ class TunnelParticipant {
|
||||
if (_nextHopCache == null) {
|
||||
_nextHopCache = _context.netDb().lookupRouterInfoLocally(_config.getSendTo());
|
||||
// nothing for failure since fail job is null
|
||||
_context.statManager().addRateData("tunnel.participantLookupSuccess", 1, 0);
|
||||
_context.statManager().addRateData("tunnel.participantLookupSuccess", 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -225,7 +225,7 @@ class TunnelParticipant {
|
||||
+ " failed! where do we go for " + _config + "? msg dropped: " + _msg);
|
||||
stat = 0;
|
||||
}
|
||||
_context.statManager().addRateData("tunnel.participantLookupSuccess", stat, 0);
|
||||
_context.statManager().addRateData("tunnel.participantLookupSuccess", stat);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -255,7 +255,7 @@ class TunnelParticipant {
|
||||
_log.warn("Lookup the nextHop (" + _config.getSendTo()
|
||||
+ " failed! where do we go for " + _config + "? msg dropped: " + _msg);
|
||||
}
|
||||
_context.statManager().addRateData("tunnel.participantLookupSuccess", 0, 0);
|
||||
_context.statManager().addRateData("tunnel.participantLookupSuccess", 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user