forked from I2P_Developers/i2p.i2p
BuildHandler: Disable removal of tunnel on next-hop timeout,
as it isn't reliable cleanup and log tweaks
This commit is contained in:
10
history.txt
10
history.txt
@@ -1,3 +1,13 @@
|
||||
2016-01-17 zzz
|
||||
* BuildHandler: Disable tunnel removal on next-hop timeout
|
||||
* Console:
|
||||
- Fix mime type for svg in themes directory
|
||||
- Add zh_TW translation
|
||||
* Fortuna: Add getByte() method
|
||||
* i2psnark: add opendocument mime types
|
||||
* i2ptunnel: Remove unused stats
|
||||
* Utils: Move CachedIteratorArrayList from core to router
|
||||
|
||||
2016-01-13 zzz
|
||||
* BuildHandler: More early-disconnect cases
|
||||
* Family: Add i2p-dev cert
|
||||
|
@@ -18,7 +18,7 @@ public class RouterVersion {
|
||||
/** deprecated */
|
||||
public final static String ID = "Monotone";
|
||||
public final static String VERSION = CoreVersion.VERSION;
|
||||
public final static long BUILD = 24;
|
||||
public final static long BUILD = 25;
|
||||
|
||||
/** for example "-test" */
|
||||
public final static String EXTRA = "-rc";
|
||||
|
@@ -451,8 +451,8 @@ public class TunnelDispatcher implements Service {
|
||||
public void remove(TunnelCreatorConfig cfg) {
|
||||
if (cfg.isInbound()) {
|
||||
TunnelId recvId = cfg.getConfig(cfg.getLength()-1).getReceiveTunnel();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("removing our own inbound " + cfg);
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("removing our own inbound " + cfg);
|
||||
TunnelParticipant participant = _participants.remove(recvId);
|
||||
if (participant == null) {
|
||||
_inboundGateways.remove(recvId);
|
||||
@@ -470,8 +470,8 @@ public class TunnelDispatcher implements Service {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("removing our own outbound " + cfg);
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("removing our own outbound " + cfg);
|
||||
TunnelId outId = cfg.getConfig(0).getSendTunnel();
|
||||
TunnelGateway gw = _outboundGateways.remove(outId);
|
||||
if (gw != null) {
|
||||
@@ -498,8 +498,8 @@ public class TunnelDispatcher implements Service {
|
||||
|
||||
boolean removed = (null != _participatingConfig.remove(recvId));
|
||||
if (removed) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("removing " + cfg /* , new Exception() */ );
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("removing " + cfg /* , new Exception() */ );
|
||||
} else {
|
||||
// this is normal, this can get called twice
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
|
@@ -448,8 +448,8 @@ class BuildHandler implements Runnable {
|
||||
*/
|
||||
private long handleRequest(BuildMessageState state) {
|
||||
long timeSinceReceived = _context.clock().now()-state.recvTime;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(state.msg.getUniqueId() + ": handling request after " + timeSinceReceived);
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug(state.msg.getUniqueId() + ": handling request after " + timeSinceReceived);
|
||||
|
||||
Hash from = state.fromHash;
|
||||
if (from == null && state.from != null)
|
||||
@@ -492,7 +492,7 @@ class BuildHandler implements Runnable {
|
||||
RouterInfo nextPeerInfo = _context.netDb().lookupRouterInfoLocally(nextPeer);
|
||||
long lookupTime = System.currentTimeMillis()-beforeLookup;
|
||||
if (lookupTime > 500 && _log.shouldLog(Log.WARN))
|
||||
_log.warn("Took too long to lookup the request: " + lookupTime + "/" + readPeerTime + " for message " + state.msg.getUniqueId() + " received " + (timeSinceReceived+decryptTime) + " ago");
|
||||
_log.warn("Took too long to lookup the request: " + lookupTime + "/" + readPeerTime + " for " + req);
|
||||
if (nextPeerInfo == null) {
|
||||
// limit concurrent next-hop lookups to prevent job queue overload attacks
|
||||
int numTunnels = _context.tunnelManager().getParticipatingCount();
|
||||
@@ -500,7 +500,7 @@ class BuildHandler implements Runnable {
|
||||
int current = _currentLookups.incrementAndGet();
|
||||
if (current <= limit) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Request " + state.msg.getUniqueId() + '/' + req.readReceiveTunnelId() + '/' + req.readNextTunnelId()
|
||||
_log.debug("Request " + req
|
||||
+ " handled, lookup next peer " + nextPeer
|
||||
+ " lookups: " + current + '/' + limit);
|
||||
_context.netDb().lookupRouterInfo(nextPeer, new HandleReq(_context, state, req, nextPeer),
|
||||
@@ -508,7 +508,7 @@ class BuildHandler implements Runnable {
|
||||
} else {
|
||||
_currentLookups.decrementAndGet();
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Drop next hop lookup, limit " + limit);
|
||||
_log.warn("Drop next hop lookup, limit " + limit + ": " + req);
|
||||
_context.statManager().addRateData("tunnel.dropLookupThrottle", 1);
|
||||
}
|
||||
if (from != null)
|
||||
@@ -519,7 +519,7 @@ class BuildHandler implements Runnable {
|
||||
handleReq(nextPeerInfo, state, req, nextPeer);
|
||||
long handleTime = System.currentTimeMillis() - beforeHandle;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Request " + state.msg.getUniqueId() + " handled and we know the next peer "
|
||||
_log.debug("Request " + req + " handled and we know the next peer "
|
||||
+ nextPeer + " after " + handleTime
|
||||
+ "/" + decryptTime + "/" + lookupTime + "/" + timeSinceReceived);
|
||||
return handleTime;
|
||||
@@ -560,7 +560,7 @@ class BuildHandler implements Runnable {
|
||||
// decrement in-progress counter
|
||||
_currentLookups.decrementAndGet();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Request " + _state.msg.getUniqueId() + " handled with a successful deferred lookup for the next peer " + _nextPeer);
|
||||
_log.debug("Request " + _state.msg.getUniqueId() + " handled with a successful deferred lookup: " + _req);
|
||||
|
||||
RouterInfo ri = getContext().netDb().lookupRouterInfoLocally(_nextPeer);
|
||||
if (ri != null) {
|
||||
@@ -568,7 +568,7 @@ class BuildHandler implements Runnable {
|
||||
getContext().statManager().addRateData("tunnel.buildLookupSuccess", 1);
|
||||
} else {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Deferred successfully, but we couldnt find " + _nextPeer);
|
||||
_log.warn("Deferred successfully, but we couldnt find " + _nextPeer + "? " + _req);
|
||||
getContext().statManager().addRateData("tunnel.buildLookupSuccess", 0);
|
||||
}
|
||||
}
|
||||
@@ -593,15 +593,15 @@ class BuildHandler implements Runnable {
|
||||
_currentLookups.decrementAndGet();
|
||||
getContext().statManager().addRateData("tunnel.rejectTimeout", 1);
|
||||
getContext().statManager().addRateData("tunnel.buildLookupSuccess", 0);
|
||||
// logging commented out so class can be static
|
||||
//if (_log.shouldLog(Log.WARN))
|
||||
// _log.warn("Request " + _state.msg.getUniqueId()
|
||||
// + " could no be satisfied, as the next peer could not be found: " + _nextPeer.toBase64());
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Next hop lookup failure: " + _req);
|
||||
|
||||
// ??? should we blame the peer here? getContext().profileManager().tunnelTimedOut(_nextPeer);
|
||||
getContext().messageHistory().tunnelRejected(_state.fromHash, new TunnelId(_req.readReceiveTunnelId()), _nextPeer,
|
||||
"rejected because we couldn't find " + _nextPeer + ": " +
|
||||
_state.msg.getUniqueId() + "/" + _req.readNextTunnelId());
|
||||
// this is all disabled anyway
|
||||
//"rejected because we couldn't find " + _nextPeer + ": " +
|
||||
//_state.msg.getUniqueId() + "/" + _req.readNextTunnelId());
|
||||
"lookup fail");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -654,7 +654,7 @@ class BuildHandler implements Runnable {
|
||||
|
||||
if (isInGW && isOutEnd) {
|
||||
_context.statManager().addRateData("tunnel.rejectHostile", 1);
|
||||
_log.error("Dropping build request, IBGW+OBEP");
|
||||
_log.error("Dropping build request, IBGW+OBEP: " + req);
|
||||
if (from != null)
|
||||
_context.commSystem().mayDisconnect(from);
|
||||
return;
|
||||
@@ -667,7 +667,7 @@ class BuildHandler implements Runnable {
|
||||
// No way to recognize if we are every other hop, but see below
|
||||
// old i2pd
|
||||
if (_log.shouldWarn())
|
||||
_log.warn("Dropping build request, we are the next hop");
|
||||
_log.warn("Dropping build request, we are the next hop: " + req);
|
||||
if (from != null)
|
||||
_context.commSystem().mayDisconnect(from);
|
||||
return;
|
||||
@@ -678,7 +678,7 @@ class BuildHandler implements Runnable {
|
||||
// but if not, something is seriously wrong here.
|
||||
if (from == null || _context.routerHash().equals(from)) {
|
||||
_context.statManager().addRateData("tunnel.rejectHostile", 1);
|
||||
_log.error("Dropping build request, we are the previous hop");
|
||||
_log.error("Dropping build request, we are the previous hop: " + req);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -689,7 +689,7 @@ class BuildHandler implements Runnable {
|
||||
// i2pd does this
|
||||
_context.statManager().addRateData("tunnel.rejectHostile", 1);
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Dropping build request with the same previous and next hop");
|
||||
_log.warn("Dropping build request with the same previous and next hop: " + req);
|
||||
_context.commSystem().mayDisconnect(from);
|
||||
return;
|
||||
}
|
||||
@@ -704,7 +704,7 @@ class BuildHandler implements Runnable {
|
||||
if (timeDiff > MAX_REQUEST_AGE) {
|
||||
_context.statManager().addRateData("tunnel.rejectTooOld", 1);
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Dropping build request too old... replay attack? " + DataHelper.formatDuration(timeDiff));
|
||||
_log.warn("Dropping build request too old... replay attack? " + DataHelper.formatDuration(timeDiff) + ": " + req);
|
||||
if (from != null)
|
||||
_context.commSystem().mayDisconnect(from);
|
||||
return;
|
||||
@@ -712,7 +712,7 @@ class BuildHandler implements Runnable {
|
||||
if (timeDiff < 0 - MAX_REQUEST_FUTURE) {
|
||||
_context.statManager().addRateData("tunnel.rejectFuture", 1);
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Dropping build request too far in future " + DataHelper.formatDuration(0 - timeDiff));
|
||||
_log.warn("Dropping build request too far in future " + DataHelper.formatDuration(0 - timeDiff) + ": " + req);
|
||||
if (from != null)
|
||||
_context.commSystem().mayDisconnect(from);
|
||||
return;
|
||||
@@ -791,7 +791,7 @@ class BuildHandler implements Runnable {
|
||||
if (response == 0 && !isInGW) {
|
||||
if (from != null && _throttler.shouldThrottle(from)) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Rejecting tunnel (hop throttle), previous hop: " + from);
|
||||
_log.warn("Rejecting tunnel (hop throttle), previous hop: " + from + ": " + req);
|
||||
// no setTunnelStatus() indication
|
||||
_context.statManager().addRateData("tunnel.rejectHopThrottle", 1);
|
||||
response = TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
|
||||
@@ -800,17 +800,12 @@ class BuildHandler implements Runnable {
|
||||
if (response == 0 && (!isOutEnd) &&
|
||||
_throttler.shouldThrottle(nextPeer)) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Rejecting tunnel (hop throttle), next hop: " + nextPeer);
|
||||
_log.warn("Rejecting tunnel (hop throttle), next hop: " + req);
|
||||
_context.statManager().addRateData("tunnel.rejectHopThrottle", 1);
|
||||
// no setTunnelStatus() indication
|
||||
response = TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
|
||||
}
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Responding to " + state.msg.getUniqueId() + "/" + ourId
|
||||
+ " after " + recvDelay + " with " + response
|
||||
+ " from " + (from != null ? from : "tunnel"));
|
||||
|
||||
HopConfig cfg = null;
|
||||
if (response == 0) {
|
||||
cfg = new HopConfig();
|
||||
@@ -849,7 +844,7 @@ class BuildHandler implements Runnable {
|
||||
success = _context.tunnelDispatcher().joinParticipant(cfg);
|
||||
if (success) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Joining " + state.msg.getUniqueId() + "/" + cfg.getReceiveTunnel() + "/" + recvDelay + " as " + (isOutEnd ? "outbound endpoint" : isInGW ? "inbound gw" : "participant"));
|
||||
_log.debug("Joining: " + req);
|
||||
} else {
|
||||
// Dup Tunnel ID. This can definitely happen (birthday paradox).
|
||||
// Probability in 11 minutes (per hop type):
|
||||
@@ -857,7 +852,7 @@ class BuildHandler implements Runnable {
|
||||
response = TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
|
||||
_context.statManager().addRateData("tunnel.rejectDupID", 1);
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("DUP ID failure " + state.msg.getUniqueId() + "/" + cfg.getReceiveTunnel() + " as " + (isOutEnd ? "outbound endpoint" : isInGW ? "inbound gw" : "participant"));
|
||||
_log.warn("DUP ID failure: " + req);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -866,10 +861,12 @@ class BuildHandler implements Runnable {
|
||||
if (response != 0) {
|
||||
_context.statManager().addRateData("tunnel.reject." + response, 1);
|
||||
_context.messageHistory().tunnelRejected(from, new TunnelId(ourId), nextPeer,
|
||||
"rejecting for " + response + ": " +
|
||||
state.msg.getUniqueId() + "/" + ourId + "/" + req.readNextTunnelId() + " delay " +
|
||||
recvDelay + " as " +
|
||||
(isOutEnd ? "outbound endpoint" : isInGW ? "inbound gw" : "participant"));
|
||||
// this is all disabled anyway
|
||||
//"rejecting for " + response + ": " +
|
||||
//state.msg.getUniqueId() + "/" + ourId + "/" + req.readNextTunnelId() + " delay " +
|
||||
//recvDelay + " as " +
|
||||
//(isOutEnd ? "outbound endpoint" : isInGW ? "inbound gw" : "participant"));
|
||||
Integer.toString(response));
|
||||
if (from != null)
|
||||
_context.commSystem().mayDisconnect(from);
|
||||
// Connection congestion control:
|
||||
@@ -881,7 +878,7 @@ class BuildHandler implements Runnable {
|
||||
(! _context.commSystem().isEstablished(nextPeer))) {
|
||||
_context.statManager().addRateData("tunnel.dropConnLimits", 1);
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Not sending rejection due to conn limits");
|
||||
_log.warn("Not sending rejection due to conn limits: " + req);
|
||||
return;
|
||||
}
|
||||
} else if (isInGW && from != null) {
|
||||
@@ -889,6 +886,11 @@ class BuildHandler implements Runnable {
|
||||
_context.commSystem().mayDisconnect(from);
|
||||
}
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Responding to " + state.msg.getUniqueId()
|
||||
+ " after " + recvDelay + " with " + response
|
||||
+ " from " + (from != null ? from : "tunnel") + ": " + req);
|
||||
|
||||
EncryptedBuildRecord reply = BuildResponseRecord.create(_context, response, req.readReplyKey(), req.readReplyIV(), state.msg.getUniqueId());
|
||||
int records = state.msg.getRecordCount();
|
||||
int ourSlot = -1;
|
||||
@@ -904,19 +906,16 @@ class BuildHandler implements Runnable {
|
||||
}
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Read slot " + ourSlot + " containing our hop @ " + _context.routerHash()
|
||||
+ " accepted? " + response + " receiving on " + ourId
|
||||
+ " sending to " + nextId
|
||||
+ " on " + nextPeer
|
||||
+ " inGW? " + isInGW + " outEnd? " + isOutEnd
|
||||
+ " recvDelay " + recvDelay + " replyMessage " + req.readReplyMessageId()
|
||||
+ " replyKey " + req.readReplyKey() + " replyIV " + Base64.encode(req.readReplyIV()));
|
||||
_log.debug("Read slot " + ourSlot + " containing: " + req
|
||||
+ " accepted? " + response
|
||||
+ " recvDelay " + recvDelay + " replyMessage " + req.readReplyMessageId());
|
||||
|
||||
// now actually send the response
|
||||
long expires = _context.clock().now() + NEXT_HOP_SEND_TIMEOUT;
|
||||
if (!isOutEnd) {
|
||||
state.msg.setUniqueId(req.readReplyMessageId());
|
||||
state.msg.setMessageExpiration(_context.clock().now() + NEXT_HOP_SEND_TIMEOUT);
|
||||
OutNetMessage msg = new OutNetMessage(_context, state.msg, state.msg.getMessageExpiration(), PRIORITY, nextPeerInfo);
|
||||
state.msg.setMessageExpiration(expires);
|
||||
OutNetMessage msg = new OutNetMessage(_context, state.msg, expires, PRIORITY, nextPeerInfo);
|
||||
if (response == 0)
|
||||
msg.setOnFailedSendJob(new TunnelBuildNextHopFailJob(_context, cfg));
|
||||
_context.outNetMessagePool().add(msg);
|
||||
@@ -932,20 +931,20 @@ class BuildHandler implements Runnable {
|
||||
for (int i = 0; i < records; i++)
|
||||
replyMsg.setRecord(i, state.msg.getRecord(i));
|
||||
replyMsg.setUniqueId(req.readReplyMessageId());
|
||||
replyMsg.setMessageExpiration(_context.clock().now() + NEXT_HOP_SEND_TIMEOUT);
|
||||
replyMsg.setMessageExpiration(expires);
|
||||
TunnelGatewayMessage m = new TunnelGatewayMessage(_context);
|
||||
m.setMessage(replyMsg);
|
||||
m.setMessageExpiration(replyMsg.getMessageExpiration());
|
||||
m.setMessageExpiration(expires);
|
||||
m.setTunnelId(new TunnelId(nextId));
|
||||
if (_context.routerHash().equals(nextPeer)) {
|
||||
// ok, we are the gateway, so inject it
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("We are the reply gateway for " + nextId
|
||||
+ " when replying to replyMessage " + req.readReplyMessageId());
|
||||
+ " when replying to replyMessage " + req);
|
||||
_context.tunnelDispatcher().dispatch(m);
|
||||
} else {
|
||||
// ok, the gateway is some other peer, shove 'er across
|
||||
OutNetMessage outMsg = new OutNetMessage(_context, m, m.getMessageExpiration(), PRIORITY, nextPeerInfo);
|
||||
OutNetMessage outMsg = new OutNetMessage(_context, m, expires, PRIORITY, nextPeerInfo);
|
||||
if (response == 0)
|
||||
outMsg.setOnFailedSendJob(new TunnelBuildNextHopFailJob(_context, cfg));
|
||||
_context.outNetMessagePool().add(outMsg);
|
||||
@@ -972,10 +971,10 @@ class BuildHandler implements Runnable {
|
||||
// endpoint, receiving the request at the last hop)
|
||||
long reqId = receivedMessage.getUniqueId();
|
||||
PooledTunnelCreatorConfig cfg = _exec.removeFromBuilding(reqId);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Receive tunnel build message " + reqId + " from "
|
||||
+ (from != null ? from.calculateHash() : fromHash != null ? fromHash : "tunnels")
|
||||
+ ", found matching tunnel? " + (cfg != null));
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug("Receive tunnel build message " + reqId + " from "
|
||||
// + (from != null ? from.calculateHash() : fromHash != null ? fromHash : "tunnels")
|
||||
// + ", found matching tunnel? " + (cfg != null));
|
||||
if (cfg != null) {
|
||||
if (!cfg.isInbound()) {
|
||||
// shouldnt happen - should we put it back?
|
||||
@@ -1171,7 +1170,11 @@ class BuildHandler implements Runnable {
|
||||
public String getName() { return "Timeout contacting next peer for tunnel join"; }
|
||||
|
||||
public void runJob() {
|
||||
getContext().tunnelDispatcher().remove(_cfg);
|
||||
// TODO
|
||||
// This doesn't seem to be a reliable indication of actual failure,
|
||||
// as we sometimes get subsequent tunnel messages.
|
||||
// Until this is investigated and fixed, don't remove the tunnel.
|
||||
//getContext().tunnelDispatcher().remove(_cfg);
|
||||
getContext().statManager().addRateData("tunnel.rejectTimeout2", 1);
|
||||
Log log = getContext().logManager().getLog(BuildHandler.class);
|
||||
if (log.shouldLog(Log.WARN))
|
||||
|
Reference in New Issue
Block a user