forked from I2P_Developers/i2p.i2p
BuildHandler: Disable removal of tunnel on next-hop timeout,
as it isn't reliable cleanup and log tweaks
This commit is contained in:
10
history.txt
10
history.txt
@@ -1,3 +1,13 @@
|
|||||||
|
2016-01-17 zzz
|
||||||
|
* BuildHandler: Disable tunnel removal on next-hop timeout
|
||||||
|
* Console:
|
||||||
|
- Fix mime type for svg in themes directory
|
||||||
|
- Add zh_TW translation
|
||||||
|
* Fortuna: Add getByte() method
|
||||||
|
* i2psnark: add opendocument mime types
|
||||||
|
* i2ptunnel: Remove unused stats
|
||||||
|
* Utils: Move CachedIteratorArrayList from core to router
|
||||||
|
|
||||||
2016-01-13 zzz
|
2016-01-13 zzz
|
||||||
* BuildHandler: More early-disconnect cases
|
* BuildHandler: More early-disconnect cases
|
||||||
* Family: Add i2p-dev cert
|
* Family: Add i2p-dev cert
|
||||||
|
@@ -18,7 +18,7 @@ public class RouterVersion {
|
|||||||
/** deprecated */
|
/** deprecated */
|
||||||
public final static String ID = "Monotone";
|
public final static String ID = "Monotone";
|
||||||
public final static String VERSION = CoreVersion.VERSION;
|
public final static String VERSION = CoreVersion.VERSION;
|
||||||
public final static long BUILD = 24;
|
public final static long BUILD = 25;
|
||||||
|
|
||||||
/** for example "-test" */
|
/** for example "-test" */
|
||||||
public final static String EXTRA = "-rc";
|
public final static String EXTRA = "-rc";
|
||||||
|
@@ -451,8 +451,8 @@ public class TunnelDispatcher implements Service {
|
|||||||
public void remove(TunnelCreatorConfig cfg) {
|
public void remove(TunnelCreatorConfig cfg) {
|
||||||
if (cfg.isInbound()) {
|
if (cfg.isInbound()) {
|
||||||
TunnelId recvId = cfg.getConfig(cfg.getLength()-1).getReceiveTunnel();
|
TunnelId recvId = cfg.getConfig(cfg.getLength()-1).getReceiveTunnel();
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.INFO))
|
||||||
_log.debug("removing our own inbound " + cfg);
|
_log.info("removing our own inbound " + cfg);
|
||||||
TunnelParticipant participant = _participants.remove(recvId);
|
TunnelParticipant participant = _participants.remove(recvId);
|
||||||
if (participant == null) {
|
if (participant == null) {
|
||||||
_inboundGateways.remove(recvId);
|
_inboundGateways.remove(recvId);
|
||||||
@@ -470,8 +470,8 @@ public class TunnelDispatcher implements Service {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.INFO))
|
||||||
_log.debug("removing our own outbound " + cfg);
|
_log.info("removing our own outbound " + cfg);
|
||||||
TunnelId outId = cfg.getConfig(0).getSendTunnel();
|
TunnelId outId = cfg.getConfig(0).getSendTunnel();
|
||||||
TunnelGateway gw = _outboundGateways.remove(outId);
|
TunnelGateway gw = _outboundGateways.remove(outId);
|
||||||
if (gw != null) {
|
if (gw != null) {
|
||||||
@@ -498,8 +498,8 @@ public class TunnelDispatcher implements Service {
|
|||||||
|
|
||||||
boolean removed = (null != _participatingConfig.remove(recvId));
|
boolean removed = (null != _participatingConfig.remove(recvId));
|
||||||
if (removed) {
|
if (removed) {
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.INFO))
|
||||||
_log.debug("removing " + cfg /* , new Exception() */ );
|
_log.info("removing " + cfg /* , new Exception() */ );
|
||||||
} else {
|
} else {
|
||||||
// this is normal, this can get called twice
|
// this is normal, this can get called twice
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
|
@@ -448,8 +448,8 @@ class BuildHandler implements Runnable {
|
|||||||
*/
|
*/
|
||||||
private long handleRequest(BuildMessageState state) {
|
private long handleRequest(BuildMessageState state) {
|
||||||
long timeSinceReceived = _context.clock().now()-state.recvTime;
|
long timeSinceReceived = _context.clock().now()-state.recvTime;
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
//if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug(state.msg.getUniqueId() + ": handling request after " + timeSinceReceived);
|
// _log.debug(state.msg.getUniqueId() + ": handling request after " + timeSinceReceived);
|
||||||
|
|
||||||
Hash from = state.fromHash;
|
Hash from = state.fromHash;
|
||||||
if (from == null && state.from != null)
|
if (from == null && state.from != null)
|
||||||
@@ -492,7 +492,7 @@ class BuildHandler implements Runnable {
|
|||||||
RouterInfo nextPeerInfo = _context.netDb().lookupRouterInfoLocally(nextPeer);
|
RouterInfo nextPeerInfo = _context.netDb().lookupRouterInfoLocally(nextPeer);
|
||||||
long lookupTime = System.currentTimeMillis()-beforeLookup;
|
long lookupTime = System.currentTimeMillis()-beforeLookup;
|
||||||
if (lookupTime > 500 && _log.shouldLog(Log.WARN))
|
if (lookupTime > 500 && _log.shouldLog(Log.WARN))
|
||||||
_log.warn("Took too long to lookup the request: " + lookupTime + "/" + readPeerTime + " for message " + state.msg.getUniqueId() + " received " + (timeSinceReceived+decryptTime) + " ago");
|
_log.warn("Took too long to lookup the request: " + lookupTime + "/" + readPeerTime + " for " + req);
|
||||||
if (nextPeerInfo == null) {
|
if (nextPeerInfo == null) {
|
||||||
// limit concurrent next-hop lookups to prevent job queue overload attacks
|
// limit concurrent next-hop lookups to prevent job queue overload attacks
|
||||||
int numTunnels = _context.tunnelManager().getParticipatingCount();
|
int numTunnels = _context.tunnelManager().getParticipatingCount();
|
||||||
@@ -500,7 +500,7 @@ class BuildHandler implements Runnable {
|
|||||||
int current = _currentLookups.incrementAndGet();
|
int current = _currentLookups.incrementAndGet();
|
||||||
if (current <= limit) {
|
if (current <= limit) {
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("Request " + state.msg.getUniqueId() + '/' + req.readReceiveTunnelId() + '/' + req.readNextTunnelId()
|
_log.debug("Request " + req
|
||||||
+ " handled, lookup next peer " + nextPeer
|
+ " handled, lookup next peer " + nextPeer
|
||||||
+ " lookups: " + current + '/' + limit);
|
+ " lookups: " + current + '/' + limit);
|
||||||
_context.netDb().lookupRouterInfo(nextPeer, new HandleReq(_context, state, req, nextPeer),
|
_context.netDb().lookupRouterInfo(nextPeer, new HandleReq(_context, state, req, nextPeer),
|
||||||
@@ -508,7 +508,7 @@ class BuildHandler implements Runnable {
|
|||||||
} else {
|
} else {
|
||||||
_currentLookups.decrementAndGet();
|
_currentLookups.decrementAndGet();
|
||||||
if (_log.shouldLog(Log.WARN))
|
if (_log.shouldLog(Log.WARN))
|
||||||
_log.warn("Drop next hop lookup, limit " + limit);
|
_log.warn("Drop next hop lookup, limit " + limit + ": " + req);
|
||||||
_context.statManager().addRateData("tunnel.dropLookupThrottle", 1);
|
_context.statManager().addRateData("tunnel.dropLookupThrottle", 1);
|
||||||
}
|
}
|
||||||
if (from != null)
|
if (from != null)
|
||||||
@@ -519,7 +519,7 @@ class BuildHandler implements Runnable {
|
|||||||
handleReq(nextPeerInfo, state, req, nextPeer);
|
handleReq(nextPeerInfo, state, req, nextPeer);
|
||||||
long handleTime = System.currentTimeMillis() - beforeHandle;
|
long handleTime = System.currentTimeMillis() - beforeHandle;
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("Request " + state.msg.getUniqueId() + " handled and we know the next peer "
|
_log.debug("Request " + req + " handled and we know the next peer "
|
||||||
+ nextPeer + " after " + handleTime
|
+ nextPeer + " after " + handleTime
|
||||||
+ "/" + decryptTime + "/" + lookupTime + "/" + timeSinceReceived);
|
+ "/" + decryptTime + "/" + lookupTime + "/" + timeSinceReceived);
|
||||||
return handleTime;
|
return handleTime;
|
||||||
@@ -560,7 +560,7 @@ class BuildHandler implements Runnable {
|
|||||||
// decrement in-progress counter
|
// decrement in-progress counter
|
||||||
_currentLookups.decrementAndGet();
|
_currentLookups.decrementAndGet();
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("Request " + _state.msg.getUniqueId() + " handled with a successful deferred lookup for the next peer " + _nextPeer);
|
_log.debug("Request " + _state.msg.getUniqueId() + " handled with a successful deferred lookup: " + _req);
|
||||||
|
|
||||||
RouterInfo ri = getContext().netDb().lookupRouterInfoLocally(_nextPeer);
|
RouterInfo ri = getContext().netDb().lookupRouterInfoLocally(_nextPeer);
|
||||||
if (ri != null) {
|
if (ri != null) {
|
||||||
@@ -568,7 +568,7 @@ class BuildHandler implements Runnable {
|
|||||||
getContext().statManager().addRateData("tunnel.buildLookupSuccess", 1);
|
getContext().statManager().addRateData("tunnel.buildLookupSuccess", 1);
|
||||||
} else {
|
} else {
|
||||||
if (_log.shouldLog(Log.WARN))
|
if (_log.shouldLog(Log.WARN))
|
||||||
_log.warn("Deferred successfully, but we couldnt find " + _nextPeer);
|
_log.warn("Deferred successfully, but we couldnt find " + _nextPeer + "? " + _req);
|
||||||
getContext().statManager().addRateData("tunnel.buildLookupSuccess", 0);
|
getContext().statManager().addRateData("tunnel.buildLookupSuccess", 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -593,15 +593,15 @@ class BuildHandler implements Runnable {
|
|||||||
_currentLookups.decrementAndGet();
|
_currentLookups.decrementAndGet();
|
||||||
getContext().statManager().addRateData("tunnel.rejectTimeout", 1);
|
getContext().statManager().addRateData("tunnel.rejectTimeout", 1);
|
||||||
getContext().statManager().addRateData("tunnel.buildLookupSuccess", 0);
|
getContext().statManager().addRateData("tunnel.buildLookupSuccess", 0);
|
||||||
// logging commented out so class can be static
|
if (_log.shouldLog(Log.WARN))
|
||||||
//if (_log.shouldLog(Log.WARN))
|
_log.warn("Next hop lookup failure: " + _req);
|
||||||
// _log.warn("Request " + _state.msg.getUniqueId()
|
|
||||||
// + " could no be satisfied, as the next peer could not be found: " + _nextPeer.toBase64());
|
|
||||||
|
|
||||||
// ??? should we blame the peer here? getContext().profileManager().tunnelTimedOut(_nextPeer);
|
// ??? should we blame the peer here? getContext().profileManager().tunnelTimedOut(_nextPeer);
|
||||||
getContext().messageHistory().tunnelRejected(_state.fromHash, new TunnelId(_req.readReceiveTunnelId()), _nextPeer,
|
getContext().messageHistory().tunnelRejected(_state.fromHash, new TunnelId(_req.readReceiveTunnelId()), _nextPeer,
|
||||||
"rejected because we couldn't find " + _nextPeer + ": " +
|
// this is all disabled anyway
|
||||||
_state.msg.getUniqueId() + "/" + _req.readNextTunnelId());
|
//"rejected because we couldn't find " + _nextPeer + ": " +
|
||||||
|
//_state.msg.getUniqueId() + "/" + _req.readNextTunnelId());
|
||||||
|
"lookup fail");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -654,7 +654,7 @@ class BuildHandler implements Runnable {
|
|||||||
|
|
||||||
if (isInGW && isOutEnd) {
|
if (isInGW && isOutEnd) {
|
||||||
_context.statManager().addRateData("tunnel.rejectHostile", 1);
|
_context.statManager().addRateData("tunnel.rejectHostile", 1);
|
||||||
_log.error("Dropping build request, IBGW+OBEP");
|
_log.error("Dropping build request, IBGW+OBEP: " + req);
|
||||||
if (from != null)
|
if (from != null)
|
||||||
_context.commSystem().mayDisconnect(from);
|
_context.commSystem().mayDisconnect(from);
|
||||||
return;
|
return;
|
||||||
@@ -667,7 +667,7 @@ class BuildHandler implements Runnable {
|
|||||||
// No way to recognize if we are every other hop, but see below
|
// No way to recognize if we are every other hop, but see below
|
||||||
// old i2pd
|
// old i2pd
|
||||||
if (_log.shouldWarn())
|
if (_log.shouldWarn())
|
||||||
_log.warn("Dropping build request, we are the next hop");
|
_log.warn("Dropping build request, we are the next hop: " + req);
|
||||||
if (from != null)
|
if (from != null)
|
||||||
_context.commSystem().mayDisconnect(from);
|
_context.commSystem().mayDisconnect(from);
|
||||||
return;
|
return;
|
||||||
@@ -678,7 +678,7 @@ class BuildHandler implements Runnable {
|
|||||||
// but if not, something is seriously wrong here.
|
// but if not, something is seriously wrong here.
|
||||||
if (from == null || _context.routerHash().equals(from)) {
|
if (from == null || _context.routerHash().equals(from)) {
|
||||||
_context.statManager().addRateData("tunnel.rejectHostile", 1);
|
_context.statManager().addRateData("tunnel.rejectHostile", 1);
|
||||||
_log.error("Dropping build request, we are the previous hop");
|
_log.error("Dropping build request, we are the previous hop: " + req);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -689,7 +689,7 @@ class BuildHandler implements Runnable {
|
|||||||
// i2pd does this
|
// i2pd does this
|
||||||
_context.statManager().addRateData("tunnel.rejectHostile", 1);
|
_context.statManager().addRateData("tunnel.rejectHostile", 1);
|
||||||
if (_log.shouldLog(Log.WARN))
|
if (_log.shouldLog(Log.WARN))
|
||||||
_log.warn("Dropping build request with the same previous and next hop");
|
_log.warn("Dropping build request with the same previous and next hop: " + req);
|
||||||
_context.commSystem().mayDisconnect(from);
|
_context.commSystem().mayDisconnect(from);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -704,7 +704,7 @@ class BuildHandler implements Runnable {
|
|||||||
if (timeDiff > MAX_REQUEST_AGE) {
|
if (timeDiff > MAX_REQUEST_AGE) {
|
||||||
_context.statManager().addRateData("tunnel.rejectTooOld", 1);
|
_context.statManager().addRateData("tunnel.rejectTooOld", 1);
|
||||||
if (_log.shouldLog(Log.WARN))
|
if (_log.shouldLog(Log.WARN))
|
||||||
_log.warn("Dropping build request too old... replay attack? " + DataHelper.formatDuration(timeDiff));
|
_log.warn("Dropping build request too old... replay attack? " + DataHelper.formatDuration(timeDiff) + ": " + req);
|
||||||
if (from != null)
|
if (from != null)
|
||||||
_context.commSystem().mayDisconnect(from);
|
_context.commSystem().mayDisconnect(from);
|
||||||
return;
|
return;
|
||||||
@@ -712,7 +712,7 @@ class BuildHandler implements Runnable {
|
|||||||
if (timeDiff < 0 - MAX_REQUEST_FUTURE) {
|
if (timeDiff < 0 - MAX_REQUEST_FUTURE) {
|
||||||
_context.statManager().addRateData("tunnel.rejectFuture", 1);
|
_context.statManager().addRateData("tunnel.rejectFuture", 1);
|
||||||
if (_log.shouldLog(Log.WARN))
|
if (_log.shouldLog(Log.WARN))
|
||||||
_log.warn("Dropping build request too far in future " + DataHelper.formatDuration(0 - timeDiff));
|
_log.warn("Dropping build request too far in future " + DataHelper.formatDuration(0 - timeDiff) + ": " + req);
|
||||||
if (from != null)
|
if (from != null)
|
||||||
_context.commSystem().mayDisconnect(from);
|
_context.commSystem().mayDisconnect(from);
|
||||||
return;
|
return;
|
||||||
@@ -791,7 +791,7 @@ class BuildHandler implements Runnable {
|
|||||||
if (response == 0 && !isInGW) {
|
if (response == 0 && !isInGW) {
|
||||||
if (from != null && _throttler.shouldThrottle(from)) {
|
if (from != null && _throttler.shouldThrottle(from)) {
|
||||||
if (_log.shouldLog(Log.WARN))
|
if (_log.shouldLog(Log.WARN))
|
||||||
_log.warn("Rejecting tunnel (hop throttle), previous hop: " + from);
|
_log.warn("Rejecting tunnel (hop throttle), previous hop: " + from + ": " + req);
|
||||||
// no setTunnelStatus() indication
|
// no setTunnelStatus() indication
|
||||||
_context.statManager().addRateData("tunnel.rejectHopThrottle", 1);
|
_context.statManager().addRateData("tunnel.rejectHopThrottle", 1);
|
||||||
response = TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
|
response = TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
|
||||||
@@ -800,17 +800,12 @@ class BuildHandler implements Runnable {
|
|||||||
if (response == 0 && (!isOutEnd) &&
|
if (response == 0 && (!isOutEnd) &&
|
||||||
_throttler.shouldThrottle(nextPeer)) {
|
_throttler.shouldThrottle(nextPeer)) {
|
||||||
if (_log.shouldLog(Log.WARN))
|
if (_log.shouldLog(Log.WARN))
|
||||||
_log.warn("Rejecting tunnel (hop throttle), next hop: " + nextPeer);
|
_log.warn("Rejecting tunnel (hop throttle), next hop: " + req);
|
||||||
_context.statManager().addRateData("tunnel.rejectHopThrottle", 1);
|
_context.statManager().addRateData("tunnel.rejectHopThrottle", 1);
|
||||||
// no setTunnelStatus() indication
|
// no setTunnelStatus() indication
|
||||||
response = TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
|
response = TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
|
||||||
_log.debug("Responding to " + state.msg.getUniqueId() + "/" + ourId
|
|
||||||
+ " after " + recvDelay + " with " + response
|
|
||||||
+ " from " + (from != null ? from : "tunnel"));
|
|
||||||
|
|
||||||
HopConfig cfg = null;
|
HopConfig cfg = null;
|
||||||
if (response == 0) {
|
if (response == 0) {
|
||||||
cfg = new HopConfig();
|
cfg = new HopConfig();
|
||||||
@@ -849,7 +844,7 @@ class BuildHandler implements Runnable {
|
|||||||
success = _context.tunnelDispatcher().joinParticipant(cfg);
|
success = _context.tunnelDispatcher().joinParticipant(cfg);
|
||||||
if (success) {
|
if (success) {
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("Joining " + state.msg.getUniqueId() + "/" + cfg.getReceiveTunnel() + "/" + recvDelay + " as " + (isOutEnd ? "outbound endpoint" : isInGW ? "inbound gw" : "participant"));
|
_log.debug("Joining: " + req);
|
||||||
} else {
|
} else {
|
||||||
// Dup Tunnel ID. This can definitely happen (birthday paradox).
|
// Dup Tunnel ID. This can definitely happen (birthday paradox).
|
||||||
// Probability in 11 minutes (per hop type):
|
// Probability in 11 minutes (per hop type):
|
||||||
@@ -857,7 +852,7 @@ class BuildHandler implements Runnable {
|
|||||||
response = TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
|
response = TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
|
||||||
_context.statManager().addRateData("tunnel.rejectDupID", 1);
|
_context.statManager().addRateData("tunnel.rejectDupID", 1);
|
||||||
if (_log.shouldLog(Log.WARN))
|
if (_log.shouldLog(Log.WARN))
|
||||||
_log.warn("DUP ID failure " + state.msg.getUniqueId() + "/" + cfg.getReceiveTunnel() + " as " + (isOutEnd ? "outbound endpoint" : isInGW ? "inbound gw" : "participant"));
|
_log.warn("DUP ID failure: " + req);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -866,10 +861,12 @@ class BuildHandler implements Runnable {
|
|||||||
if (response != 0) {
|
if (response != 0) {
|
||||||
_context.statManager().addRateData("tunnel.reject." + response, 1);
|
_context.statManager().addRateData("tunnel.reject." + response, 1);
|
||||||
_context.messageHistory().tunnelRejected(from, new TunnelId(ourId), nextPeer,
|
_context.messageHistory().tunnelRejected(from, new TunnelId(ourId), nextPeer,
|
||||||
"rejecting for " + response + ": " +
|
// this is all disabled anyway
|
||||||
state.msg.getUniqueId() + "/" + ourId + "/" + req.readNextTunnelId() + " delay " +
|
//"rejecting for " + response + ": " +
|
||||||
recvDelay + " as " +
|
//state.msg.getUniqueId() + "/" + ourId + "/" + req.readNextTunnelId() + " delay " +
|
||||||
(isOutEnd ? "outbound endpoint" : isInGW ? "inbound gw" : "participant"));
|
//recvDelay + " as " +
|
||||||
|
//(isOutEnd ? "outbound endpoint" : isInGW ? "inbound gw" : "participant"));
|
||||||
|
Integer.toString(response));
|
||||||
if (from != null)
|
if (from != null)
|
||||||
_context.commSystem().mayDisconnect(from);
|
_context.commSystem().mayDisconnect(from);
|
||||||
// Connection congestion control:
|
// Connection congestion control:
|
||||||
@@ -881,7 +878,7 @@ class BuildHandler implements Runnable {
|
|||||||
(! _context.commSystem().isEstablished(nextPeer))) {
|
(! _context.commSystem().isEstablished(nextPeer))) {
|
||||||
_context.statManager().addRateData("tunnel.dropConnLimits", 1);
|
_context.statManager().addRateData("tunnel.dropConnLimits", 1);
|
||||||
if (_log.shouldLog(Log.WARN))
|
if (_log.shouldLog(Log.WARN))
|
||||||
_log.warn("Not sending rejection due to conn limits");
|
_log.warn("Not sending rejection due to conn limits: " + req);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
} else if (isInGW && from != null) {
|
} else if (isInGW && from != null) {
|
||||||
@@ -889,6 +886,11 @@ class BuildHandler implements Runnable {
|
|||||||
_context.commSystem().mayDisconnect(from);
|
_context.commSystem().mayDisconnect(from);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
|
_log.debug("Responding to " + state.msg.getUniqueId()
|
||||||
|
+ " after " + recvDelay + " with " + response
|
||||||
|
+ " from " + (from != null ? from : "tunnel") + ": " + req);
|
||||||
|
|
||||||
EncryptedBuildRecord reply = BuildResponseRecord.create(_context, response, req.readReplyKey(), req.readReplyIV(), state.msg.getUniqueId());
|
EncryptedBuildRecord reply = BuildResponseRecord.create(_context, response, req.readReplyKey(), req.readReplyIV(), state.msg.getUniqueId());
|
||||||
int records = state.msg.getRecordCount();
|
int records = state.msg.getRecordCount();
|
||||||
int ourSlot = -1;
|
int ourSlot = -1;
|
||||||
@@ -904,19 +906,16 @@ class BuildHandler implements Runnable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("Read slot " + ourSlot + " containing our hop @ " + _context.routerHash()
|
_log.debug("Read slot " + ourSlot + " containing: " + req
|
||||||
+ " accepted? " + response + " receiving on " + ourId
|
+ " accepted? " + response
|
||||||
+ " sending to " + nextId
|
+ " recvDelay " + recvDelay + " replyMessage " + req.readReplyMessageId());
|
||||||
+ " on " + nextPeer
|
|
||||||
+ " inGW? " + isInGW + " outEnd? " + isOutEnd
|
|
||||||
+ " recvDelay " + recvDelay + " replyMessage " + req.readReplyMessageId()
|
|
||||||
+ " replyKey " + req.readReplyKey() + " replyIV " + Base64.encode(req.readReplyIV()));
|
|
||||||
|
|
||||||
// now actually send the response
|
// now actually send the response
|
||||||
|
long expires = _context.clock().now() + NEXT_HOP_SEND_TIMEOUT;
|
||||||
if (!isOutEnd) {
|
if (!isOutEnd) {
|
||||||
state.msg.setUniqueId(req.readReplyMessageId());
|
state.msg.setUniqueId(req.readReplyMessageId());
|
||||||
state.msg.setMessageExpiration(_context.clock().now() + NEXT_HOP_SEND_TIMEOUT);
|
state.msg.setMessageExpiration(expires);
|
||||||
OutNetMessage msg = new OutNetMessage(_context, state.msg, state.msg.getMessageExpiration(), PRIORITY, nextPeerInfo);
|
OutNetMessage msg = new OutNetMessage(_context, state.msg, expires, PRIORITY, nextPeerInfo);
|
||||||
if (response == 0)
|
if (response == 0)
|
||||||
msg.setOnFailedSendJob(new TunnelBuildNextHopFailJob(_context, cfg));
|
msg.setOnFailedSendJob(new TunnelBuildNextHopFailJob(_context, cfg));
|
||||||
_context.outNetMessagePool().add(msg);
|
_context.outNetMessagePool().add(msg);
|
||||||
@@ -932,20 +931,20 @@ class BuildHandler implements Runnable {
|
|||||||
for (int i = 0; i < records; i++)
|
for (int i = 0; i < records; i++)
|
||||||
replyMsg.setRecord(i, state.msg.getRecord(i));
|
replyMsg.setRecord(i, state.msg.getRecord(i));
|
||||||
replyMsg.setUniqueId(req.readReplyMessageId());
|
replyMsg.setUniqueId(req.readReplyMessageId());
|
||||||
replyMsg.setMessageExpiration(_context.clock().now() + NEXT_HOP_SEND_TIMEOUT);
|
replyMsg.setMessageExpiration(expires);
|
||||||
TunnelGatewayMessage m = new TunnelGatewayMessage(_context);
|
TunnelGatewayMessage m = new TunnelGatewayMessage(_context);
|
||||||
m.setMessage(replyMsg);
|
m.setMessage(replyMsg);
|
||||||
m.setMessageExpiration(replyMsg.getMessageExpiration());
|
m.setMessageExpiration(expires);
|
||||||
m.setTunnelId(new TunnelId(nextId));
|
m.setTunnelId(new TunnelId(nextId));
|
||||||
if (_context.routerHash().equals(nextPeer)) {
|
if (_context.routerHash().equals(nextPeer)) {
|
||||||
// ok, we are the gateway, so inject it
|
// ok, we are the gateway, so inject it
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("We are the reply gateway for " + nextId
|
_log.debug("We are the reply gateway for " + nextId
|
||||||
+ " when replying to replyMessage " + req.readReplyMessageId());
|
+ " when replying to replyMessage " + req);
|
||||||
_context.tunnelDispatcher().dispatch(m);
|
_context.tunnelDispatcher().dispatch(m);
|
||||||
} else {
|
} else {
|
||||||
// ok, the gateway is some other peer, shove 'er across
|
// ok, the gateway is some other peer, shove 'er across
|
||||||
OutNetMessage outMsg = new OutNetMessage(_context, m, m.getMessageExpiration(), PRIORITY, nextPeerInfo);
|
OutNetMessage outMsg = new OutNetMessage(_context, m, expires, PRIORITY, nextPeerInfo);
|
||||||
if (response == 0)
|
if (response == 0)
|
||||||
outMsg.setOnFailedSendJob(new TunnelBuildNextHopFailJob(_context, cfg));
|
outMsg.setOnFailedSendJob(new TunnelBuildNextHopFailJob(_context, cfg));
|
||||||
_context.outNetMessagePool().add(outMsg);
|
_context.outNetMessagePool().add(outMsg);
|
||||||
@@ -972,10 +971,10 @@ class BuildHandler implements Runnable {
|
|||||||
// endpoint, receiving the request at the last hop)
|
// endpoint, receiving the request at the last hop)
|
||||||
long reqId = receivedMessage.getUniqueId();
|
long reqId = receivedMessage.getUniqueId();
|
||||||
PooledTunnelCreatorConfig cfg = _exec.removeFromBuilding(reqId);
|
PooledTunnelCreatorConfig cfg = _exec.removeFromBuilding(reqId);
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
//if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("Receive tunnel build message " + reqId + " from "
|
// _log.debug("Receive tunnel build message " + reqId + " from "
|
||||||
+ (from != null ? from.calculateHash() : fromHash != null ? fromHash : "tunnels")
|
// + (from != null ? from.calculateHash() : fromHash != null ? fromHash : "tunnels")
|
||||||
+ ", found matching tunnel? " + (cfg != null));
|
// + ", found matching tunnel? " + (cfg != null));
|
||||||
if (cfg != null) {
|
if (cfg != null) {
|
||||||
if (!cfg.isInbound()) {
|
if (!cfg.isInbound()) {
|
||||||
// shouldnt happen - should we put it back?
|
// shouldnt happen - should we put it back?
|
||||||
@@ -1171,7 +1170,11 @@ class BuildHandler implements Runnable {
|
|||||||
public String getName() { return "Timeout contacting next peer for tunnel join"; }
|
public String getName() { return "Timeout contacting next peer for tunnel join"; }
|
||||||
|
|
||||||
public void runJob() {
|
public void runJob() {
|
||||||
getContext().tunnelDispatcher().remove(_cfg);
|
// TODO
|
||||||
|
// This doesn't seem to be a reliable indication of actual failure,
|
||||||
|
// as we sometimes get subsequent tunnel messages.
|
||||||
|
// Until this is investigated and fixed, don't remove the tunnel.
|
||||||
|
//getContext().tunnelDispatcher().remove(_cfg);
|
||||||
getContext().statManager().addRateData("tunnel.rejectTimeout2", 1);
|
getContext().statManager().addRateData("tunnel.rejectTimeout2", 1);
|
||||||
Log log = getContext().logManager().getLog(BuildHandler.class);
|
Log log = getContext().logManager().getLog(BuildHandler.class);
|
||||||
if (log.shouldLog(Log.WARN))
|
if (log.shouldLog(Log.WARN))
|
||||||
|
Reference in New Issue
Block a user