- Better handling of unsupported encryption in destinations
 - Implement handling of unsupported encryption in router identities
 - Banlist forever all RIs with unsupported encryption
 - New negative cache of all dests with unsupported encryption
 - New methods for destination lookup that will succeed even if
   the LS is expired or encryption is unsupported
 - Use new dest lookup so client will get the right error code
   later, rather than failing with no LS when we really got it
   but just couldn't verify it.
 - Cleanups and javadocs

OCMOSJ: Detect unsupported encryption on dest and return the correct failure code
   through I2CP to streaming to i2ptunnel

Streaming: Re-enable message status override, but treat LS lookup failure
   as a soft failure for now.

HTTP Client: Add error page for unsupported encryption
This commit is contained in:
zzz
2014-09-05 22:52:23 +00:00
parent 3b2f1d35c4
commit 330a5ddd0f
19 changed files with 486 additions and 98 deletions

View File

@@ -601,9 +601,12 @@ public abstract class I2PTunnelHTTPClientBase extends I2PTunnelClientBase implem
return;
int status = ise != null ? ise.getStatus() : -1;
String error;
//TODO MessageStatusMessage.STATUS_SEND_FAILURE_UNSUPPORTED_ENCRYPTION
if (status == MessageStatusMessage.STATUS_SEND_FAILURE_NO_LEASESET) {
// We won't get this one unless it is treated as a hard failure
// in streaming. See PacketQueue.java
error = usingWWWProxy ? "nolsp" : "nols";
} else if (status == MessageStatusMessage.STATUS_SEND_FAILURE_UNSUPPORTED_ENCRYPTION) {
error = usingWWWProxy ? "encp" : "enc";
} else {
error = usingWWWProxy ? "dnfp" : "dnf";
}

View File

@@ -44,7 +44,7 @@ class PacketQueue implements SendMessageStatusListener {
private static final int FINAL_TAGS_TO_SEND = 4;
private static final int FINAL_TAG_THRESHOLD = 2;
private static final long REMOVE_EXPIRED_TIME = 67*1000;
private static final boolean ENABLE_STATUS_LISTEN = false;
private static final boolean ENABLE_STATUS_LISTEN = true;
public PacketQueue(I2PAppContext context, I2PSession session, ConnectionManager mgr) {
_context = context;
@@ -267,6 +267,20 @@ class PacketQueue implements SendMessageStatusListener {
_messageStatusMap.remove(id);
break;
case MessageStatusMessage.STATUS_SEND_FAILURE_NO_LEASESET:
// Ideally we would like to make this a hard failure,
// but it caused far too many fast-fails that were then
// resolved by the user clicking reload in his browser.
// Until the LS fetch is faster and more reliable,
// or we increase the timeout for it,
// we can't treat this one as a hard fail.
// Let the streaming retransmission paper over the problem.
if (_log.shouldLog(Log.WARN))
_log.warn("LS lookup (soft) failure for msg " + msgId + " on " + con);
_messageStatusMap.remove(id);
break;
case MessageStatusMessage.STATUS_SEND_FAILURE_LOCAL:
case MessageStatusMessage.STATUS_SEND_FAILURE_ROUTER:
case MessageStatusMessage.STATUS_SEND_FAILURE_NETWORK:
@@ -280,7 +294,6 @@ class PacketQueue implements SendMessageStatusListener {
case MessageStatusMessage.STATUS_SEND_FAILURE_DESTINATION:
case MessageStatusMessage.STATUS_SEND_FAILURE_BAD_LEASESET:
case MessageStatusMessage.STATUS_SEND_FAILURE_EXPIRED_LEASESET:
case MessageStatusMessage.STATUS_SEND_FAILURE_NO_LEASESET:
case SendMessageStatusListener.STATUS_CANCELLED:
if (con.getHighestAckedThrough() >= 0) {
// a retxed SYN succeeded before the first SYN failed

View File

@@ -0,0 +1,24 @@
HTTP/1.1 504 Gateway Timeout
Content-Type: text/html; charset=UTF-8
Cache-control: no-cache
Connection: close
Proxy-Connection: close
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html><head>
<title>_("Warning: Eepsite Unreachable")</title>
<link rel="shortcut icon" href="http://proxy.i2p/themes/console/images/favicon.ico">
<link href="http://proxy.i2p/themes/console/default/console.css" rel="stylesheet" type="text/css">
</head>
<body>
<div class="logo">
<a href="http://127.0.0.1:7657/" title="_("Router Console")"><img src="http://proxy.i2p/themes/console/images/i2plogo.png" alt="_("I2P Router Console")" border="0"></a><hr>
<a href="http://127.0.0.1:7657/config.jsp">_("Configuration")</a> <a href="http://127.0.0.1:7657/help.jsp">_("Help")</a> <a href="http://127.0.0.1:7657/susidns/index">_("Addressbook")</a>
</div>
<div class="warning" id="warning">
<h3>_("Warning: Eepsite Unreachable")</h3>
<p>
_("The eepsite was not reachable, because it uses encryption options that are not supported by your I2P or Java version.")
<hr>
<p><b>_("Could not connect to the following destination:")</b>
</p>

View File

@@ -0,0 +1,25 @@
HTTP/1.1 504 Gateway Timeout
Content-Type: text/html; charset=UTF-8
Cache-control: no-cache
Connection: close
Proxy-Connection: close
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html><head>
<title>_("Warning: Outproxy Unreachable")</title>
<link rel="shortcut icon" href="http://proxy.i2p/themes/console/images/favicon.ico">
<link href="http://proxy.i2p/themes/console/default/console.css" rel="stylesheet" type="text/css">
</head>
<body>
<div class="logo">
<a href="http://127.0.0.1:7657/" title="_("Router Console")"><img src="http://proxy.i2p/themes/console/images/i2plogo.png" alt="_("I2P Router Console")" border="0"></a><hr>
<a href="http://127.0.0.1:7657/config.jsp">_("Configuration")</a> <a href="http://127.0.0.1:7657/help.jsp">_("Help")</a> <a href="http://127.0.0.1:7657/susidns/index">_("Addressbook")</a>
</div>
<div class="warning" id="warning">
<h3>_("Warning: Outproxy Unreachable")</h3>
<p>
_("The HTTP outproxy was not reachable, because it uses encryption options that are not supported by your I2P or Java version.")
_("You may want to {0}retry{1} as this will randomly reselect an outproxy from the pool you have defined {2}here{3} (if you have more than one configured).", "<a href=\"javascript:parent.window.location.reload()\">", "</a>", "<a href=\"http://127.0.0.1:7657/i2ptunnel/index.jsp\">", "</a>")
_("If you continue to have trouble you may want to edit your outproxy list {0}here{1}.", "<a href=\"http://127.0.0.1:7657/i2ptunnel/edit.jsp?tunnel=0\">", "</a>")
</p>
<hr><p><b>_("Could not connect to the following destination:")</b></p>

View File

@@ -14,6 +14,7 @@ import java.util.Collections;
import java.util.Set;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Destination;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.router.RouterInfo;
@@ -51,18 +52,51 @@ public abstract class NetworkDatabaseFacade implements Service {
public abstract LeaseSet lookupLeaseSetLocally(Hash key);
public abstract void lookupRouterInfo(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs);
public abstract RouterInfo lookupRouterInfoLocally(Hash key);
/**
* return the leaseSet if another leaseSet already existed at that key
* Lookup using the client's tunnels
* Succeeds even if LS validation fails due to unsupported sig type
*
* @param fromLocalDest use these tunnels for the lookup, or null for exploratory
* @since 0.9.16
*/
public abstract void lookupDestination(Hash key, Job onFinishedJob, long timeoutMs, Hash fromLocalDest);
/**
* Lookup locally in netDB and in badDest cache
* Succeeds even if LS validation failed due to unsupported sig type
*
* @since 0.9.16
*/
public abstract Destination lookupDestinationLocally(Hash key);
/**
* @return the leaseSet if another leaseSet already existed at that key
*
* @throws IllegalArgumentException if the data is not valid
*/
public abstract LeaseSet store(Hash key, LeaseSet leaseSet) throws IllegalArgumentException;
/**
* return the routerInfo if another router already existed at that key
* @return the routerInfo if another router already existed at that key
*
* @throws IllegalArgumentException if the data is not valid
*/
public abstract RouterInfo store(Hash key, RouterInfo routerInfo) throws IllegalArgumentException;
/**
* @return the old entry if it already existed at that key
* @throws IllegalArgumentException if the data is not valid
* @since 0.9.16
*/
public DatabaseEntry store(Hash key, DatabaseEntry entry) throws IllegalArgumentException {
if (entry.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)
return store(key, (RouterInfo) entry);
if (entry.getType() == DatabaseEntry.KEY_TYPE_LEASESET)
return store(key, (LeaseSet) entry);
throw new IllegalArgumentException("unknown type");
}
/**
* @throws IllegalArgumentException if the local router is not valid
*/
@@ -101,4 +135,12 @@ public abstract class NetworkDatabaseFacade implements Service {
* @since IPv6
*/
public boolean floodfillEnabled() { return false; };
/**
* Is it permanently negative cached?
*
* @param key only for Destinations; for RouterIdentities, see Banlist
* @since 0.9.16
*/
public boolean isNegativeCachedForever(Hash key) { return false; }
}

View File

@@ -70,9 +70,8 @@ public class PersistentKeyRing extends KeyRing {
Hash h = e.getKey();
buf.append(h.toBase64().substring(0, 6)).append("&hellip;");
buf.append("<td>");
LeaseSet ls = _ctx.netDb().lookupLeaseSetLocally(h);
if (ls != null) {
Destination dest = ls.getDestination();
Destination dest = _ctx.netDb().lookupDestinationLocally(h);
if (dest != null) {
if (_ctx.clientManager().isLocal(dest)) {
TunnelPoolSettings in = _ctx.tunnelManager().getInboundSettings(h);
if (in != null && in.getDestinationNickname() != null)

View File

@@ -38,7 +38,11 @@ class LookupDestJob extends JobImpl {
}
/**
* One of h or name non-null
* One of h or name non-null.
*
* For hash or b32 name, the dest will be returned if the LS can be found,
* even if the dest uses unsupported crypto.
*
* @param reqID must be >= 0 if name != null
* @param sessID must non-null if reqID >= 0
* @param fromLocalDest use these tunnels for the lookup, or null for exploratory
@@ -88,7 +92,7 @@ class LookupDestJob extends JobImpl {
returnFail();
} else {
DoneJob done = new DoneJob(getContext());
getContext().netDb().lookupLeaseSet(_hash, done, done, _timeout, _fromLocalDest);
getContext().netDb().lookupDestination(_hash, done, _timeout, _fromLocalDest);
}
}
@@ -98,9 +102,9 @@ class LookupDestJob extends JobImpl {
}
public String getName() { return "LeaseSet Lookup Reply to Client"; }
public void runJob() {
LeaseSet ls = getContext().netDb().lookupLeaseSetLocally(_hash);
if (ls != null)
returnDest(ls.getDestination());
Destination dest = getContext().netDb().lookupDestinationLocally(_hash);
if (dest != null)
returnDest(dest);
else
returnFail();
}

View File

@@ -15,6 +15,7 @@ import java.util.Map;
import java.util.Set;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Destination;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.router.RouterInfo;
@@ -23,8 +24,8 @@ import net.i2p.router.NetworkDatabaseFacade;
import net.i2p.router.RouterContext;
public class DummyNetworkDatabaseFacade extends NetworkDatabaseFacade {
private Map<Hash, RouterInfo> _routers;
private RouterContext _context;
private final Map<Hash, RouterInfo> _routers;
private final RouterContext _context;
public DummyNetworkDatabaseFacade(RouterContext ctx) {
_routers = Collections.synchronizedMap(new HashMap<Hash, RouterInfo>());
@@ -42,6 +43,11 @@ public class DummyNetworkDatabaseFacade extends NetworkDatabaseFacade {
public void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs) {}
public void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, Hash fromLocalDest) {}
public LeaseSet lookupLeaseSetLocally(Hash key) { return null; }
public void lookupDestination(Hash key, Job onFinishedJob, long timeoutMs, Hash fromLocalDest) {}
public Destination lookupDestinationLocally(Hash key) { return null; }
public void lookupRouterInfo(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs) {
RouterInfo info = lookupRouterInfoLocally(key);
if (info == null)
@@ -50,13 +56,16 @@ public class DummyNetworkDatabaseFacade extends NetworkDatabaseFacade {
_context.jobQueue().addJob(onFindJob);
}
public RouterInfo lookupRouterInfoLocally(Hash key) { return _routers.get(key); }
public void publish(LeaseSet localLeaseSet) {}
public void publish(RouterInfo localRouterInfo) {}
public LeaseSet store(Hash key, LeaseSet leaseSet) { return leaseSet; }
public RouterInfo store(Hash key, RouterInfo routerInfo) {
RouterInfo rv = _routers.put(key, routerInfo);
return rv;
}
public void unpublish(LeaseSet localLeaseSet) {}
public void fail(Hash dbEntry) {
_routers.remove(dbEntry);

View File

@@ -425,12 +425,19 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
getContext().statManager().addRateData("client.leaseSetFailedRemoteTime", lookupTime);
}
//if (_finished == Result.NONE) {
int cause;
if (getContext().netDb().isNegativeCachedForever(_to.calculateHash())) {
if (_log.shouldLog(Log.WARN))
_log.warn("Unable to send to " + _toString + " because the sig type is unsupported");
cause = MessageStatusMessage.STATUS_SEND_FAILURE_UNSUPPORTED_ENCRYPTION;
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("Unable to send to " + _toString + " because we couldn't find their leaseSet");
//}
cause = MessageStatusMessage.STATUS_SEND_FAILURE_NO_LEASESET;
}
dieFatal(MessageStatusMessage.STATUS_SEND_FAILURE_NO_LEASESET);
dieFatal(cause);
}
}

View File

@@ -2,10 +2,10 @@ package net.i2p.router.networkdb.kademlia;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.LeaseSet;
import net.i2p.data.router.RouterInfo;
import net.i2p.data.i2np.DatabaseSearchReplyMessage;
import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.router.RouterInfo;
import net.i2p.router.JobImpl;
import net.i2p.router.ReplyJob;
import net.i2p.router.RouterContext;
@@ -62,6 +62,9 @@ class FloodOnlyLookupMatchJob extends JobImpl implements ReplyJob {
} else {
getContext().netDb().store(dsm.getKey(), (RouterInfo) dsm.getEntry());
}
} catch (UnsupportedCryptoException uce) {
_search.failed();
return;
} catch (IllegalArgumentException iae) {
if (_log.shouldLog(Log.WARN))
_log.warn(_search.getJobId() + ": Received an invalid store reply", iae);

View File

@@ -7,11 +7,12 @@ import java.util.Map;
import java.util.Set;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Destination;
import net.i2p.data.Hash;
import net.i2p.data.router.RouterInfo;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.DatabaseLookupMessage;
import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.router.RouterInfo;
import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.OutNetMessage;
@@ -31,7 +32,6 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
private final Set<Hash> _verifiesInProgress;
private FloodThrottler _floodThrottler;
private LookupThrottler _lookupThrottler;
private NegativeLookupCache _negativeCache;
/**
* This is the flood redundancy. Entries are
@@ -65,7 +65,6 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
_context.statManager().createRateStat("netDb.searchReplyNotValidated", "How many search replies we get that we are NOT able to validate (fetch)", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.searchReplyValidationSkipped", "How many search replies we get from unreliable peers that we skip?", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.republishQuantity", "How many peers do we need to send a found leaseSet to?", "NetworkDatabase", new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.negativeCache", "Aborted lookup, already cached", "NetworkDatabase", new long[] { 60*60*1000l });
}
@Override
@@ -73,7 +72,6 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
super.startup();
_context.jobQueue().addJob(new FloodfillMonitorJob(_context, this));
_lookupThrottler = new LookupThrottler();
_negativeCache = new NegativeLookupCache();
// refresh old routers
Job rrj = new RefreshRoutersJob(_context, this);
@@ -171,25 +169,6 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
return _lookupThrottler.shouldThrottle(from, id);
}
/**
* Increment in the negative lookup cache
* @since 0.9.4
*/
void lookupFailed(Hash key) {
_negativeCache.lookupFailed(key);
}
/**
* Is the key in the negative lookup cache?
* @since 0.9.4
*/
boolean isNegativeCached(Hash key) {
boolean rv = _negativeCache.isCached(key);
if (rv)
_context.statManager().addRateData("netDb.negativeCache", 1);
return rv;
}
/**
* Send to a subset of all floodfill peers.
* We do this to implement Kademlia within the floodfills, i.e.
@@ -301,7 +280,9 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
}
/**
* Lookup using exploratory tunnels
* Lookup using exploratory tunnels.
*
* Caller should check negative cache and/or banlist before calling.
*
* Begin a kademlia style search for the key specified, which can take up to timeoutMs and
* will fire the appropriate jobs on success or timeout (or if the kademlia search completes
@@ -315,7 +296,10 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
}
/**
* Lookup using the client's tunnels
* Lookup using the client's tunnels.
*
* Caller should check negative cache and/or banlist before calling.
*
* @param fromLocalDest use these tunnels for the lookup, or null for exploratory
* @return null always
* @since 0.9.10
@@ -473,6 +457,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
// should we skip the search?
if (_floodfillEnabled ||
_context.jobQueue().getMaxLag() > 500 ||
_context.banlist().isBanlistedForever(peer) ||
getKBucketSetSize() > MAX_DB_BEFORE_SKIPPING_SEARCH) {
// don't try to overload ourselves (e.g. failing 3000 router refs at
// once, and then firing off 3000 netDb lookup tasks)

View File

@@ -6,6 +6,7 @@ import java.util.Set;
import net.i2p.data.Certificate;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Destination;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.router.RouterInfo;
@@ -173,9 +174,9 @@ class FloodfillVerifyStoreJob extends JobImpl {
FloodfillPeerSelector sel = (FloodfillPeerSelector)_facade.getPeerSelector();
Certificate keyCert = null;
if (!_isRouterInfo) {
LeaseSet ls = _facade.lookupLeaseSetLocally(_key);
if (ls != null) {
Certificate cert = ls.getDestination().getCertificate();
Destination dest = _facade.lookupDestinationLocally(_key);
if (dest != null) {
Certificate cert = dest.getCertificate();
if (cert.getCertificateType() == Certificate.CERTIFICATE_TYPE_KEY)
keyCert = cert;
}

View File

@@ -51,6 +51,8 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
long recvBegin = System.currentTimeMillis();
String invalidMessage = null;
// set if invalid store but not his fault
boolean dontBlamePeer = false;
boolean wasNew = false;
RouterInfo prevNetDb = null;
Hash key = _message.getKey();
@@ -72,6 +74,7 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
if (getContext().clientManager().isLocal(key)) {
//getContext().statManager().addRateData("netDb.storeLocalLeaseSetAttempt", 1, 0);
// throw rather than return, so that we send the ack below (prevent easy attack)
dontBlamePeer = true;
throw new IllegalArgumentException("Peer attempted to store local leaseSet: " +
key.toBase64().substring(0, 4));
}
@@ -114,6 +117,9 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
//if (!ls.getReceivedAsReply())
// match.setReceivedAsPublished(true);
}
} catch (UnsupportedCryptoException uce) {
invalidMessage = uce.getMessage();
dontBlamePeer = true;
} catch (IllegalArgumentException iae) {
invalidMessage = iae.getMessage();
}
@@ -131,8 +137,10 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
if (getContext().routerHash().equals(key)) {
//getContext().statManager().addRateData("netDb.storeLocalRouterInfoAttempt", 1, 0);
// throw rather than return, so that we send the ack below (prevent easy attack)
dontBlamePeer = true;
throw new IllegalArgumentException("Peer attempted to store our RouterInfo");
}
getContext().profileManager().heardAbout(key);
prevNetDb = getContext().netDb().store(key, ri);
wasNew = ((null == prevNetDb) || (prevNetDb.getPublished() < ri.getPublished()));
// Check new routerinfo address against blocklist
@@ -152,7 +160,9 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
_log.warn("New address received, Blocklisting old peer " + key + ' ' + ri);
}
}
getContext().profileManager().heardAbout(key);
} catch (UnsupportedCryptoException uce) {
invalidMessage = uce.getMessage();
dontBlamePeer = true;
} catch (IllegalArgumentException iae) {
invalidMessage = iae.getMessage();
}
@@ -165,6 +175,8 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
long recvEnd = System.currentTimeMillis();
getContext().statManager().addRateData("netDb.storeRecvTime", recvEnd-recvBegin);
// ack even if invalid or unsupported
// TODO any cases where we shouldn't?
if (_message.getReplyToken() > 0)
sendAck();
long ackEnd = System.currentTimeMillis();
@@ -172,7 +184,7 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
if (_from != null)
_fromHash = _from.getHash();
if (_fromHash != null) {
if (invalidMessage == null) {
if (invalidMessage == null || dontBlamePeer) {
getContext().profileManager().dbStoreReceived(_fromHash, wasNew);
getContext().statManager().addRateData("netDb.storeHandled", ackEnd-recvEnd);
} else {
@@ -180,7 +192,7 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
if (_log.shouldLog(Log.WARN))
_log.warn("Peer " + _fromHash.toBase64() + " sent bad data: " + invalidMessage);
}
} else if (invalidMessage != null) {
} else if (invalidMessage != null && !dontBlamePeer) {
if (_log.shouldLog(Log.WARN))
_log.warn("Unknown peer sent bad data: " + invalidMessage);
}

View File

@@ -19,14 +19,20 @@ import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import net.i2p.crypto.SigType;
import net.i2p.data.Certificate;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.DataFormatException;
import net.i2p.data.DataHelper;
import net.i2p.data.Destination;
import net.i2p.data.Hash;
import net.i2p.data.KeyCertificate;
import net.i2p.data.LeaseSet;
import net.i2p.data.router.RouterAddress;
import net.i2p.data.router.RouterInfo;
import net.i2p.data.i2np.DatabaseLookupMessage;
import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.router.RouterAddress;
import net.i2p.data.router.RouterIdentity;
import net.i2p.data.router.RouterInfo;
import net.i2p.kademlia.KBucketSet;
import net.i2p.kademlia.RejectTrimmer;
import net.i2p.kademlia.SelectionCollector;
@@ -63,6 +69,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
protected final RouterContext _context;
private final ReseedChecker _reseedChecker;
private volatile long _lastRIPublishTime;
private NegativeLookupCache _negativeCache;
/**
* Map of Hash to RepublishLeaseSetJob for leases we'realready managing.
@@ -155,6 +162,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
_reseedChecker = new ReseedChecker(context);
context.statManager().createRateStat("netDb.lookupDeferred", "how many lookups are deferred?", "NetworkDatabase", new long[] { 60*60*1000 });
context.statManager().createRateStat("netDb.exploreKeySet", "how many keys are queued for exploration?", "NetworkDatabase", new long[] { 60*60*1000 });
context.statManager().createRateStat("netDb.negativeCache", "Aborted lookup, already cached", "NetworkDatabase", new long[] { 60*60*1000l });
// following are for StoreJob
context.statManager().createRateStat("netDb.storeRouterInfoSent", "How many routerInfo store messages have we sent?", "NetworkDatabase", new long[] { 60*60*1000l });
context.statManager().createRateStat("netDb.storeLeaseSetSent", "How many leaseSet store messages have we sent?", "NetworkDatabase", new long[] { 60*60*1000l });
@@ -223,6 +231,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
//_ds = null;
_exploreKeys.clear(); // hope this doesn't cause an explosion, it shouldn't.
// _exploreKeys = null;
_negativeCache.clear();
}
public synchronized void restart() {
@@ -262,6 +271,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
//_ds = new TransientDataStore();
// _exploreKeys = new HashSet(64);
_dbDir = dbDir;
_negativeCache = new NegativeLookupCache();
createHandlers();
@@ -480,7 +490,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
}
/**
* Lookup using exploratory tunnels
* Lookup using exploratory tunnels.
* Use lookupDestination() if you don't need the LS or need it validated.
*/
public void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs) {
lookupLeaseSet(key, onFindJob, onFailedLookupJob, timeoutMs, null);
@@ -488,6 +499,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
/**
* Lookup using the client's tunnels
* Use lookupDestination() if you don't need the LS or need it validated.
*
* @param fromLocalDest use these tunnels for the lookup, or null for exploratory
* @since 0.9.10
*/
@@ -500,6 +513,11 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
_log.debug("leaseSet found locally, firing " + onFindJob);
if (onFindJob != null)
_context.jobQueue().addJob(onFindJob);
} else if (isNegativeCached(key)) {
if (_log.shouldLog(Log.WARN))
_log.warn("Negative cached, not searching: " + key);
if (onFailedLookupJob != null)
_context.jobQueue().addJob(onFailedLookupJob);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("leaseSet not found locally, running search");
@@ -509,6 +527,9 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
_log.debug("after lookupLeaseSet");
}
/**
* Use lookupDestination() if you don't need the LS or need it validated.
*/
public LeaseSet lookupLeaseSetLocally(Hash key) {
if (!_initialized) return null;
DatabaseEntry ds = _ds.get(key);
@@ -532,12 +553,56 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
}
}
/**
* Lookup using the client's tunnels
* Succeeds even if LS validation and store fails due to unsupported sig type, expired, etc.
*
* Note that there are not separate success and fail jobs. Caller must call
* lookupDestinationLocally() in the job to determine success.
*
* @param onFinishedJob non-null
* @param fromLocalDest use these tunnels for the lookup, or null for exploratory
* @since 0.9.16
*/
public void lookupDestination(Hash key, Job onFinishedJob, long timeoutMs, Hash fromLocalDest) {
if (!_initialized) return;
Destination d = lookupDestinationLocally(key);
if (d != null) {
_context.jobQueue().addJob(onFinishedJob);
} else {
search(key, onFinishedJob, onFinishedJob, timeoutMs, true, fromLocalDest);
}
}
/**
* Lookup locally in netDB and in badDest cache
* Succeeds even if LS validation fails due to unsupported sig type, expired, etc.
*
* @since 0.9.16
*/
public Destination lookupDestinationLocally(Hash key) {
if (!_initialized) return null;
DatabaseEntry ds = _ds.get(key);
if (ds != null) {
if (ds.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
LeaseSet ls = (LeaseSet)ds;
return ls.getDestination();
}
} else {
return _negativeCache.getBadDest(key);
}
return null;
}
public void lookupRouterInfo(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs) {
if (!_initialized) return;
RouterInfo ri = lookupRouterInfoLocally(key);
if (ri != null) {
if (onFindJob != null)
_context.jobQueue().addJob(onFindJob);
} else if (_context.banlist().isBanlistedForever(key)) {
if (onFailedLookupJob != null)
_context.jobQueue().addJob(onFailedLookupJob);
} else {
search(key, onFindJob, onFailedLookupJob, timeoutMs, false);
}
@@ -694,9 +759,10 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
* Unlike for RouterInfos, this is only called once, when stored.
* After that, LeaseSet.isCurrent() is used.
*
* @throws UnsupportedCryptoException if that's why it failed.
* @return reason why the entry is not valid, or null if it is valid
*/
private String validate(Hash key, LeaseSet leaseSet) {
private String validate(Hash key, LeaseSet leaseSet) throws UnsupportedCryptoException {
if (!key.equals(leaseSet.getDestination().calculateHash())) {
if (_log.shouldLog(Log.WARN))
_log.warn("Invalid store attempt! key does not match leaseSet.destination! key = "
@@ -704,9 +770,11 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
return "Key does not match leaseSet.destination - " + key.toBase64();
}
if (!leaseSet.verifySignature()) {
// throws UnsupportedCryptoException
processStoreFailure(key, leaseSet);
if (_log.shouldLog(Log.WARN))
_log.warn("Invalid leaseSet signature! leaseSet = " + leaseSet);
return "Invalid leaseSet signature on " + leaseSet.getDestination().calculateHash().toBase64();
_log.warn("Invalid leaseSet signature! " + leaseSet);
return "Invalid leaseSet signature on " + key;
}
long earliest = leaseSet.getEarliestLeaseDate();
long latest = leaseSet.getLatestLeaseDate();
@@ -722,7 +790,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
+ " first exp. " + new Date(earliest)
+ " last exp. " + new Date(latest),
new Exception("Rejecting store"));
return "Expired leaseSet for " + leaseSet.getDestination().calculateHash().toBase64()
return "Expired leaseSet for " + leaseSet.getDestination().calculateHash()
+ " expired " + DataHelper.formatDuration(age) + " ago";
}
if (latest > now + (Router.CLOCK_FUDGE_FACTOR + MAX_LEASE_FUTURE)) {
@@ -739,9 +807,13 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
}
/**
* Store the leaseSet
* Store the leaseSet.
*
* If the store fails due to unsupported crypto, it will negative cache
* the hash until restart.
*
* @throws IllegalArgumentException if the leaseSet is not valid
* @throws UnsupportedCryptoException if that's why it failed.
* @return previous entry or null
*/
public LeaseSet store(Hash key, LeaseSet leaseSet) throws IllegalArgumentException {
@@ -798,6 +870,10 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
*
* Call this only on first store, to check the key and signature once
*
* If the store fails due to unsupported crypto, it will banlist
* the router hash until restart and then throw UnsupportedCrytpoException.
*
* @throws UnsupportedCryptoException if that's why it failed.
* @return reason why the entry is not valid, or null if it is valid
*/
private String validate(Hash key, RouterInfo routerInfo) throws IllegalArgumentException {
@@ -807,6 +883,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
return "Key does not match routerInfo.identity";
}
if (!routerInfo.isValid()) {
// throws UnsupportedCryptoException
processStoreFailure(key, routerInfo);
if (_log.shouldLog(Log.WARN))
_log.warn("Invalid routerInfo signature! forged router structure! router = " + routerInfo);
return "Invalid routerInfo signature";
@@ -892,15 +970,29 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
}
/**
* store the routerInfo
* Store the routerInfo.
*
* If the store fails due to unsupported crypto, it will banlist
* the router hash until restart and then throw UnsupportedCrytpoException.
*
* @throws IllegalArgumentException if the routerInfo is not valid
* @throws UnsupportedCryptoException if that's why it failed.
* @return previous entry or null
*/
public RouterInfo store(Hash key, RouterInfo routerInfo) throws IllegalArgumentException {
return store(key, routerInfo, true);
}
/**
* Store the routerInfo.
*
* If the store fails due to unsupported crypto, it will banlist
* the router hash until restart and then throw UnsupportedCrytpoException.
*
* @throws IllegalArgumentException if the routerInfo is not valid
* @throws UnsupportedCryptoException if that's why it failed.
* @return previous entry or null
*/
RouterInfo store(Hash key, RouterInfo routerInfo, boolean persist) throws IllegalArgumentException {
if (!_initialized) return null;
@@ -935,6 +1027,59 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
return rv;
}
/**
* If the validate fails, call this
* to determine if it was because of unsupported crypto.
*
* If so, this will banlist-forever the router hash or permanently negative cache the dest hash,
* and then throw the exception. Otherwise it does nothing.
*
* @throws UnsupportedCryptoException if that's why it failed.
* @since 0.9.16
*/
private void processStoreFailure(Hash h, DatabaseEntry entry) throws UnsupportedCryptoException {
if (entry.getHash().equals(h)) {
if (entry.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
LeaseSet ls = (LeaseSet) entry;
Destination d = ls.getDestination();
Certificate c = d.getCertificate();
if (c.getCertificateType() == Certificate.CERTIFICATE_TYPE_KEY) {
try {
KeyCertificate kc = c.toKeyCertificate();
SigType type = kc.getSigType();
if (type == null || !type.isAvailable()) {
failPermanently(d);
String stype = (type != null) ? type.toString() : Integer.toString(kc.getSigTypeCode());
if (_log.shouldLog(Log.WARN))
_log.warn("Unsupported sig type " + stype + " for destination " + h);
throw new UnsupportedCryptoException("Sig type " + stype);
}
} catch (DataFormatException dfe) {}
}
} else if (entry.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
RouterInfo ri = (RouterInfo) entry;
RouterIdentity id = ri.getIdentity();
Certificate c = id.getCertificate();
if (c.getCertificateType() == Certificate.CERTIFICATE_TYPE_KEY) {
try {
KeyCertificate kc = c.toKeyCertificate();
SigType type = kc.getSigType();
if (type == null || !type.isAvailable()) {
String stype = (type != null) ? type.toString() : Integer.toString(kc.getSigTypeCode());
_context.banlist().banlistRouterForever(h, "Unsupported signature type " + stype);
if (_log.shouldLog(Log.WARN))
_log.warn("Unsupported sig type " + stype + " for router " + h);
throw new UnsupportedCryptoException("Sig type " + stype);
}
} catch (DataFormatException dfe) {}
}
}
}
if (_log.shouldLog(Log.WARN))
_log.warn("Verify fail, cause unknown: " + entry);
}
/**
* Final remove for a leaseset.
* For a router info, will look up in the network before dropping.
@@ -1005,8 +1150,12 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
* without any match)
*
* Unused - called only by FNDF.searchFull() from FloodSearchJob which is overridden - don't use this.
*
* @throws UnsupportedOperationException always
*/
SearchJob search(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, boolean isLease) {
throw new UnsupportedOperationException();
/****
if (!_initialized) return null;
boolean isNew = true;
SearchJob searchJob = null;
@@ -1031,6 +1180,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
_context.statManager().addRateData("netDb.lookupDeferred", deferred, searchJob.getExpiration()-_context.clock().now());
}
return searchJob;
****/
}
/**
@@ -1102,6 +1252,47 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
_context.jobQueue().addJob(new StoreJob(_context, this, key, ds, onSuccess, onFailure, sendTimeout, toIgnore));
}
/**
* Increment in the negative lookup cache
*
* @param key for Destinations or RouterIdentities
* @since 0.9.4 moved from FNDF to KNDF in 0.9.16
*/
void lookupFailed(Hash key) {
_negativeCache.lookupFailed(key);
}
/**
* Is the key in the negative lookup cache?
*&
* @param key for Destinations or RouterIdentities
* @since 0.9.4 moved from FNDF to KNDF in 0.9.16
*/
boolean isNegativeCached(Hash key) {
boolean rv = _negativeCache.isCached(key);
if (rv)
_context.statManager().addRateData("netDb.negativeCache", 1);
return rv;
}
/**
* Negative cache until restart
* @since 0.9.16
*/
void failPermanently(Destination dest) {
_negativeCache.failPermanently(dest);
}
/**
* Is it permanently negative cached?
*
* @param key only for Destinations; for RouterIdentities, see Banlist
* @since 0.9.16
*/
public boolean isNegativeCachedForever(Hash key) {
return _negativeCache.getBadDest(key) != null;
}
/**
* Debug info, HTML formatted
* @since 0.9.10

View File

@@ -1,6 +1,9 @@
package net.i2p.router.networkdb.kademlia;
import java.util.Map;
import net.i2p.data.Destination;
import net.i2p.data.Hash;
import net.i2p.util.LHMCache;
import net.i2p.util.ObjectCounter;
import net.i2p.util.SimpleScheduler;
import net.i2p.util.SimpleTimer;
@@ -12,11 +15,15 @@ import net.i2p.util.SimpleTimer;
*/
class NegativeLookupCache {
private final ObjectCounter<Hash> counter;
private final Map<Hash, Destination> badDests;
private static final int MAX_FAILS = 3;
private static final int MAX_BAD_DESTS = 128;
private static final long CLEAN_TIME = 2*60*1000;
public NegativeLookupCache() {
this.counter = new ObjectCounter<Hash>();
this.badDests = new LHMCache<Hash, Destination>(MAX_BAD_DESTS);
SimpleScheduler.getInstance().addPeriodicEvent(new Cleaner(), CLEAN_TIME);
}
@@ -25,7 +32,46 @@ class NegativeLookupCache {
}
public boolean isCached(Hash h) {
return this.counter.count(h) >= MAX_FAILS;
if (counter.count(h) >= MAX_FAILS)
return true;
synchronized(badDests) {
return badDests.get(h) != null;
}
}
/**
* Negative cache the hash until restart,
* but cache the destination.
*
* @since 0.9.16
*/
public void failPermanently(Destination dest) {
Hash h = dest.calculateHash();
synchronized(badDests) {
badDests.put(h, dest);
}
}
/**
* Get an unsupported but cached Destination
*
* @return dest or null if not cached
* @since 0.9.16
*/
public Destination getBadDest(Hash h) {
synchronized(badDests) {
return badDests.get(h);
}
}
/**
* @since 0.9.16
*/
public void clear() {
counter.clear();
synchronized(badDests) {
badDests.clear();
}
}
private class Cleaner implements SimpleTimer.TimedEvent {

View File

@@ -202,22 +202,29 @@ class SearchJob extends JobImpl {
_log.debug(getJobId() + ": Already completed");
return;
}
if (_state.isAborted()) {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Search aborted");
_state.complete();
fail();
return;
}
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Searching: " + _state);
if (isLocal()) {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Key found locally");
_state.complete(true);
_state.complete();
succeed();
} else if (isExpired()) {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Key search expired");
_state.complete(true);
_state.complete();
fail();
} else if (_state.getAttempted().size() > MAX_PEERS_QUERIED) {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Too many peers quried");
_state.complete(true);
_state.complete();
fail();
} else {
//_log.debug("Continuing search");

View File

@@ -19,15 +19,16 @@ import net.i2p.router.RouterContext;
*/
class SearchState {
private final RouterContext _context;
private final HashSet<Hash> _pendingPeers;
private final Set<Hash> _pendingPeers;
private final Map<Hash, Long> _pendingPeerTimes;
private final HashSet<Hash> _attemptedPeers;
private final HashSet<Hash> _failedPeers;
private final HashSet<Hash> _successfulPeers;
private final HashSet<Hash> _repliedPeers;
private final Set<Hash> _attemptedPeers;
private final Set<Hash> _failedPeers;
private final Set<Hash> _successfulPeers;
private final Set<Hash> _repliedPeers;
private final Hash _searchKey;
private volatile long _completed;
private volatile long _started;
private volatile boolean _aborted;
public SearchState(RouterContext context, Hash key) {
_context = context;
@@ -87,12 +88,21 @@ class SearchState {
return new HashSet<Hash>(_failedPeers);
}
}
public boolean completed() { return _completed != -1; }
public void complete(boolean completed) {
if (completed)
public void complete() {
_completed = _context.clock().now();
}
/** @since 0.9.16 */
public boolean isAborted() { return _aborted; }
/** @since 0.9.16 */
public void abort() {
_aborted = true;
}
public long getWhenStarted() { return _started; }
public long getWhenCompleted() { return _completed; }
@@ -177,6 +187,8 @@ class SearchState {
buf.append(" completed? false ");
else
buf.append(" completed on ").append(new Date(_completed));
if (_aborted)
buf.append(" (Aborted)");
buf.append("\n\tAttempted: ");
synchronized (_attemptedPeers) {
buf.append(_attemptedPeers.size()).append(' ');

View File

@@ -81,35 +81,22 @@ class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob {
if (message instanceof DatabaseStoreMessage) {
long timeToReply = _state.dataFound(_peer);
DatabaseStoreMessage msg = (DatabaseStoreMessage)message;
DatabaseEntry entry = msg.getEntry();
if (entry.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
try {
_facade.store(msg.getKey(), (LeaseSet) entry);
_facade.store(msg.getKey(), entry);
getContext().profileManager().dbLookupSuccessful(_peer, timeToReply);
} catch (IllegalArgumentException iae) {
if (_log.shouldLog(Log.ERROR))
_log.warn("Peer " + _peer + " sent us an invalid leaseSet: " + iae.getMessage());
getContext().profileManager().dbLookupReply(_peer, 0, 0, 1, 0, timeToReply);
}
} else if (entry.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": dbStore received on search containing router "
+ msg.getKey() + " with publishDate of "
+ new Date(entry.getDate()));
try {
_facade.store(msg.getKey(), (RouterInfo) entry);
} catch (UnsupportedCryptoException iae) {
// don't blame the peer
getContext().profileManager().dbLookupSuccessful(_peer, timeToReply);
_state.abort();
// searchNext() will call fail()
} catch (IllegalArgumentException iae) {
if (_log.shouldLog(Log.ERROR))
_log.warn("Peer " + _peer + " sent us an invalid routerInfo: " + iae.getMessage());
if (_log.shouldLog(Log.WARN))
_log.warn("Peer " + _peer + " sent us invalid data: ", iae);
// blame the peer
getContext().profileManager().dbLookupReply(_peer, 0, 0, 1, 0, timeToReply);
}
} else {
if (_log.shouldLog(Log.ERROR))
_log.error(getJobId() + ": Unknown db store type?!@ " + entry.getType());
}
} else if (message instanceof DatabaseSearchReplyMessage) {
_job.replyFound((DatabaseSearchReplyMessage)message, _peer);
} else {

View File

@@ -0,0 +1,18 @@
package net.i2p.router.networkdb.kademlia;
/**
* Signature verification failed because the
* sig type is unknown or unavailable.
*
* @since 0.9.16
*/
public class UnsupportedCryptoException extends IllegalArgumentException {
public UnsupportedCryptoException(String msg) {
super(msg);
}
public UnsupportedCryptoException(String msg, Throwable t) {
super(msg, t);
}
}