* NetDB: Add negative lookup cache

This commit is contained in:
zzz
2012-11-19 16:10:02 +00:00
parent f64eacefe3
commit f15828fa95
7 changed files with 75 additions and 3 deletions

View File

@ -1,3 +1,8 @@
2012-11-19 zzz
* BuildHandler: Disable CoDel, wasn't helping
* NetDB: Add negative lookup cache
* Profiles: Split up files into subdirectories
2012-11-17 zzz
* error500.jsp: Add servlet version
* i2psnark: Clear PEX peers set after use, cause of bad peer counts

View File

@ -18,7 +18,7 @@ public class RouterVersion {
/** deprecated */
public final static String ID = "Monotone";
public final static String VERSION = CoreVersion.VERSION;
public final static long BUILD = 8;
public final static long BUILD = 9;
/** for example "-test" */
public final static String EXTRA = "";

View File

@ -12,7 +12,7 @@ import net.i2p.util.SimpleTimer;
* @since 0.7.11
*/
class FloodThrottler {
private ObjectCounter<Hash> counter;
private final ObjectCounter<Hash> counter;
private static final int MAX_FLOODS = 3;
private static final long CLEAN_TIME = 60*1000;

View File

@ -33,6 +33,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
private final Set<Hash> _verifiesInProgress;
private FloodThrottler _floodThrottler;
private LookupThrottler _lookupThrottler;
private NegativeLookupCache _negativeCache;
/**
* This is the flood redundancy. Entries are
@ -62,6 +63,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
_context.statManager().createRateStat("netDb.searchReplyNotValidated", "How many search replies we get that we are NOT able to validate (fetch)", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.searchReplyValidationSkipped", "How many search replies we get from unreliable peers that we skip?", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.republishQuantity", "How many peers do we need to send a found leaseSet to?", "NetworkDatabase", new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.negativeCache", "Aborted lookup, already cached", "NetworkDatabase", new long[] { 60*60*1000l });
}
@Override
@ -69,6 +71,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
super.startup();
_context.jobQueue().addJob(new FloodfillMonitorJob(_context, this));
_lookupThrottler = new LookupThrottler();
_negativeCache = new NegativeLookupCache();
// refresh old routers
Job rrj = new RefreshRoutersJob(_context, this);
@ -166,6 +169,25 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
return _lookupThrottler.shouldThrottle(from, id);
}
/**
* Increment in the negative lookup cache
* @since 0.9.4
*/
void lookupFailed(Hash key) {
_negativeCache.lookupFailed(key);
}
/**
* Is the key in the negative lookup cache?
* @since 0.9.4
*/
boolean isNegativeCached(Hash key) {
boolean rv = _negativeCache.isCached(key);
if (rv)
_context.statManager().addRateData("netDb.negativeCache", 1);
return rv;
}
/**
* Send to a subset of all floodfill peers.
* We do this to implement Kademlia within the floodfills, i.e.

View File

@ -14,6 +14,7 @@ import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.RouterInfo;
import net.i2p.data.i2np.DatabaseLookupMessage;
import net.i2p.router.CommSystemFacade;
import net.i2p.router.Job;
import net.i2p.router.MessageSelector;
import net.i2p.router.OutNetMessage;
@ -93,6 +94,12 @@ class IterativeSearchJob extends FloodSearchJob {
@Override
public void runJob() {
if (_facade.isNegativeCached(_key)) {
if (_log.shouldLog(Log.WARN))
_log.warn("Negative cached, not searching: " + _key);
failed();
return;
}
// pick some floodfill peers and send out the searches
List<Hash> floodfillPeers;
KBucketSet ks = _facade.getKBuckets();
@ -304,6 +311,8 @@ class IterativeSearchJob extends FloodSearchJob {
_dead = true;
}
_facade.complete(_key);
if (getContext().commSystem().getReachabilityStatus() != CommSystemFacade.STATUS_DISCONNECTED)
_facade.lookupFailed(_key);
getContext().messageRegistry().unregisterPending(_out);
int tries;
synchronized(this) {

View File

@ -16,7 +16,7 @@ import net.i2p.util.SimpleTimer;
* @since 0.7.11
*/
class LookupThrottler {
private ObjectCounter<ReplyTunnel> counter;
private final ObjectCounter<ReplyTunnel> counter;
/** the id of this is -1 */
private static final TunnelId DUMMY_ID = new TunnelId();
/** this seems like plenty */

View File

@ -0,0 +1,36 @@
package net.i2p.router.networkdb.kademlia;
import net.i2p.data.Hash;
import net.i2p.util.ObjectCounter;
import net.i2p.util.SimpleScheduler;
import net.i2p.util.SimpleTimer;
/**
* Track lookup fails
*
* @since 0.9.4
*/
class NegativeLookupCache {
private final ObjectCounter<Hash> counter;
private static final int MAX_FAILS = 3;
private static final long CLEAN_TIME = 4*60*1000;
public NegativeLookupCache() {
this.counter = new ObjectCounter();
SimpleScheduler.getInstance().addPeriodicEvent(new Cleaner(), CLEAN_TIME);
}
public void lookupFailed(Hash h) {
this.counter.increment(h);
}
public boolean isCached(Hash h) {
return this.counter.count(h) >= MAX_FAILS;
}
private class Cleaner implements SimpleTimer.TimedEvent {
public void timeReached() {
NegativeLookupCache.this.counter.clear();
}
}
}