forked from I2P_Developers/i2p.i2p
Compare commits
29 Commits
i2p-2.5.2-
...
i2p.i2p.2.
Author | SHA1 | Date | |
---|---|---|---|
564e2101e7 | |||
5c5cd8e909 | |||
5b8f0d41f1 | |||
d1d1ee157f | |||
fc447453f0 | |||
386b25c3b6 | |||
bda12fb627 | |||
678c3db2f2 | |||
42c83194e2 | |||
6f8460607a | |||
fa7b313134 | |||
e26754f9ed | |||
e6c74fb494 | |||
8805bf0944 | |||
c2e8a9716e | |||
e912f20ba6 | |||
61abf49f3d | |||
646d2623bf | |||
12c1c9459d | |||
f23ac402ba | |||
13bcb7fcd5 | |||
f6c6fb8bbb | |||
9eac400261 | |||
14f4323889 | |||
cc5e3e94d0 | |||
4cbaad5e50 | |||
80cc0bb1ce | |||
d3e71f6f70 | |||
9de1dd46e1 |
@ -263,12 +263,20 @@ public abstract class DatabaseEntry extends DataStructureImpl {
|
|||||||
*
|
*
|
||||||
* @since 0.9.58 moved up from LeaseSet
|
* @since 0.9.58 moved up from LeaseSet
|
||||||
*/
|
*/
|
||||||
public boolean getReceivedAsPublished() { return _receivedAsPublished; }
|
public boolean getReceivedAsPublished() {
|
||||||
|
return _receivedAsPublished;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @since 0.9.58 moved up from LeaseSet
|
* @since 0.9.58 moved up from LeaseSet
|
||||||
|
*
|
||||||
|
* use this carefully, when updating the flags make sure the old and new
|
||||||
|
* leaseSet are actually equivalent, or simply copy over the reply value,
|
||||||
|
* see KademliaNetworkDatabaseFacade.java line 997 for more information.
|
||||||
*/
|
*/
|
||||||
public void setReceivedAsPublished(boolean received) { _receivedAsPublished = received; }
|
public void setReceivedAsPublished() {
|
||||||
|
_receivedAsPublished = true;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If true, we received this LeaseSet by searching for it
|
* If true, we received this LeaseSet by searching for it
|
||||||
@ -276,12 +284,16 @@ public abstract class DatabaseEntry extends DataStructureImpl {
|
|||||||
*
|
*
|
||||||
* @since 0.7.14, moved up from LeaseSet in 0.9.58
|
* @since 0.7.14, moved up from LeaseSet in 0.9.58
|
||||||
*/
|
*/
|
||||||
public boolean getReceivedAsReply() { return _receivedAsReply; }
|
public boolean getReceivedAsReply() {
|
||||||
|
return _receivedAsReply;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* set to true
|
* set to true
|
||||||
*
|
*
|
||||||
* @since 0.7.14, moved up from LeaseSet in 0.9.58
|
* @since 0.7.14, moved up from LeaseSet in 0.9.58
|
||||||
*/
|
*/
|
||||||
public void setReceivedAsReply() { _receivedAsReply = true; }
|
public void setReceivedAsReply() {
|
||||||
|
_receivedAsReply = true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,3 +1,11 @@
|
|||||||
|
2023-06-28 idk
|
||||||
|
* Cache stores of multihomed leaseSets when stored from multihome peers,
|
||||||
|
and if our local leaseSet is not in the keyspace return the multihome instead
|
||||||
|
* When updating a leaseSet because recieving it as published always make a
|
||||||
|
complete copy of the leaseSet before merging the flags.
|
||||||
|
* Rate-Limit lookups
|
||||||
|
* I2P 2.3.0
|
||||||
|
|
||||||
2023-05-29 idk
|
2023-05-29 idk
|
||||||
* adds "virtual contexts" to bloom filter, where each entity that
|
* adds "virtual contexts" to bloom filter, where each entity that
|
||||||
passes an i2np message to the bloom filter xor's the messageID with a random, local value.
|
passes an i2np message to the bloom filter xor's the messageID with a random, local value.
|
||||||
|
@ -31,6 +31,10 @@ public class ClientMessagePool {
|
|||||||
_cache = new OutboundCache(_context);
|
_cache = new OutboundCache(_context);
|
||||||
OutboundClientMessageOneShotJob.init(_context);
|
OutboundClientMessageOneShotJob.init(_context);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public OutboundCache getCache() {
|
||||||
|
return _cache;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @since 0.8.8
|
* @since 0.8.8
|
||||||
|
@ -95,6 +95,15 @@ public class OutboundCache {
|
|||||||
*/
|
*/
|
||||||
final Map<HashPair, Long> lastReplyRequestCache = new ConcurrentHashMap<HashPair, Long>(64);
|
final Map<HashPair, Long> lastReplyRequestCache = new ConcurrentHashMap<HashPair, Long>(64);
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This cache is used to keep track of when we recieve a leaseSet from a router
|
||||||
|
* we are multihomed with, or otherwise are asked to store a valid routerInfo for
|
||||||
|
* a destination which we also host.
|
||||||
|
*/
|
||||||
|
|
||||||
|
public final ConcurrentHashMap<Hash, LeaseSet> multihomedCache = new ConcurrentHashMap<Hash, LeaseSet>(64);
|
||||||
|
|
||||||
private final RouterContext _context;
|
private final RouterContext _context;
|
||||||
|
|
||||||
private static final int CLEAN_INTERVAL = 5*60*1000;
|
private static final int CLEAN_INTERVAL = 5*60*1000;
|
||||||
|
@ -147,14 +147,40 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
|||||||
// Only send it out if it is in our estimated keyspace.
|
// Only send it out if it is in our estimated keyspace.
|
||||||
// For this, we do NOT use their dontInclude list as it can't be trusted
|
// For this, we do NOT use their dontInclude list as it can't be trusted
|
||||||
// (i.e. it could mess up the closeness calculation)
|
// (i.e. it could mess up the closeness calculation)
|
||||||
|
LeaseSet possibleMultihomed = getContext().clientMessagePool().getCache().multihomedCache.get(searchKey);
|
||||||
Set<Hash> closestHashes = getContext().netDb().findNearestRouters(searchKey,
|
Set<Hash> closestHashes = getContext().netDb().findNearestRouters(searchKey,
|
||||||
CLOSENESS_THRESHOLD, null);
|
CLOSENESS_THRESHOLD, null);
|
||||||
if (weAreClosest(closestHashes)) {
|
if (weAreClosest(closestHashes)) {
|
||||||
// It's in our keyspace, so give it to them
|
// It's in our keyspace, so give it to them
|
||||||
|
// there is a slight chance that there is also a multihomed router in our cache at the
|
||||||
|
// same time we are closest to our locally published leaseSet. That means there is a slight
|
||||||
|
// chance an attacker can send a least as a store which goes into the multihome cache, then
|
||||||
|
// fetch back a locally-created, locally-published leaseset. BUT, if we always publish a
|
||||||
|
// multihomed leaseset even if we are closest to the local, we never send it out if a potential
|
||||||
|
// multihome is found in the cache.
|
||||||
if (_log.shouldLog(Log.INFO))
|
if (_log.shouldLog(Log.INFO))
|
||||||
_log.info("We have local LS " + searchKey + ", answering query, in our keyspace");
|
_log.info("We have local LS " + searchKey + ", answering query, in our keyspace");
|
||||||
getContext().statManager().addRateData("netDb.lookupsMatchedLocalClosest", 1);
|
getContext().statManager().addRateData("netDb.lookupsMatchedLocalClosest", 1);
|
||||||
sendData(searchKey, ls, fromKey, toTunnel);
|
sendData(searchKey, ls, fromKey, toTunnel);
|
||||||
|
} else if (possibleMultihomed != null) {
|
||||||
|
if (possibleMultihomed.isCurrent(Router.CLOCK_FUDGE_FACTOR / 4)) {
|
||||||
|
// If it's in the possibleMultihomed cache, then it was definitely stored to us meaning it is effectively
|
||||||
|
// always recievedAsPublished. No need to decide whether or not to answer the request like above, just
|
||||||
|
// answer it so it doesn't look different from other stores.
|
||||||
|
if (_log.shouldLog(Log.INFO))
|
||||||
|
_log.info("We have local LS, possibly from a multihomed router " + searchKey + ", and somebody requested it back from us. Answering query, as if in our keyspace, to avoid attack.");
|
||||||
|
getContext().statManager().addRateData("netDb.lookupsMatchedLocalMultihome", 1);
|
||||||
|
sendData(searchKey, possibleMultihomed, fromKey, toTunnel);
|
||||||
|
} else {
|
||||||
|
// if it expired, remove it from the cache.
|
||||||
|
getContext().clientMessagePool().getCache().multihomedCache.remove(searchKey);
|
||||||
|
// Lie, pretend we don't have it
|
||||||
|
if (_log.shouldLog(Log.INFO))
|
||||||
|
_log.info("We have local LS " + searchKey + ", NOT answering query, out of our keyspace");
|
||||||
|
getContext().statManager().addRateData("netDb.lookupsMatchedLocalNotClosest", 1);
|
||||||
|
Set<Hash> routerHashSet = getNearestRouters(lookupType);
|
||||||
|
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
// Lie, pretend we don't have it
|
// Lie, pretend we don't have it
|
||||||
if (_log.shouldLog(Log.INFO))
|
if (_log.shouldLog(Log.INFO))
|
||||||
@ -164,17 +190,44 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
|||||||
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
|
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// It was not published to us (we looked it up, for example)
|
LeaseSet possibleMultihomed = getContext().clientMessagePool().getCache().multihomedCache.get(searchKey);
|
||||||
// or it's local and we aren't floodfill,
|
if (possibleMultihomed != null) {
|
||||||
// or it's local and we don't publish it.
|
if (possibleMultihomed.isCurrent(Router.CLOCK_FUDGE_FACTOR / 4)) {
|
||||||
// Lie, pretend we don't have it
|
// If it's in the possibleMultihomed cache, then it was definitely stored to us meaning it is effectively
|
||||||
if (_log.shouldLog(Log.INFO))
|
// always recievedAsPublished. No need to decide whether or not to answer the request like above, just
|
||||||
_log.info("We have LS " + searchKey +
|
// answer it so it doesn't look different from other stores.
|
||||||
", NOT answering query - local? " + isLocal + " shouldPublish? " + shouldPublishLocal +
|
if (_log.shouldLog(Log.INFO))
|
||||||
" RAP? " + ls.getReceivedAsPublished() + " RAR? " + ls.getReceivedAsReply());
|
_log.info("We have local LS, possibly from a multihomed router " + searchKey + ", and somebody requested it back from us. Answering query, as if in our keyspace, to avoid attack.");
|
||||||
getContext().statManager().addRateData("netDb.lookupsMatchedRemoteNotClosest", 1);
|
getContext().statManager().addRateData("netDb.lookupsMatchedLocalMultihome", 1);
|
||||||
Set<Hash> routerHashSet = getNearestRouters(lookupType);
|
sendData(searchKey, possibleMultihomed, fromKey, toTunnel);
|
||||||
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
|
} else {
|
||||||
|
// if it expired, remove it from the cache.
|
||||||
|
getContext().clientMessagePool().getCache().multihomedCache.remove(searchKey);
|
||||||
|
// It was not published to us (we looked it up, for example)
|
||||||
|
// or it's local and we aren't floodfill,
|
||||||
|
// or it's local and we don't publish it.
|
||||||
|
// Lie, pretend we don't have it
|
||||||
|
if (_log.shouldLog(Log.INFO))
|
||||||
|
_log.info("We have LS " + searchKey +
|
||||||
|
", NOT answering query - local? " + isLocal + " shouldPublish? " + shouldPublishLocal +
|
||||||
|
" RAP? " + ls.getReceivedAsPublished() + " RAR? " + ls.getReceivedAsReply());
|
||||||
|
getContext().statManager().addRateData("netDb.lookupsMatchedRemoteNotClosest", 1);
|
||||||
|
Set<Hash> routerHashSet = getNearestRouters(lookupType);
|
||||||
|
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// It was not published to us (we looked it up, for example)
|
||||||
|
// or it's local and we aren't floodfill,
|
||||||
|
// or it's local and we don't publish it.
|
||||||
|
// Lie, pretend we don't have it
|
||||||
|
if (_log.shouldLog(Log.INFO))
|
||||||
|
_log.info("We have LS " + searchKey +
|
||||||
|
", NOT answering query - local? " + isLocal + " shouldPublish? " + shouldPublishLocal +
|
||||||
|
" RAP? " + ls.getReceivedAsPublished() + " RAR? " + ls.getReceivedAsReply());
|
||||||
|
getContext().statManager().addRateData("netDb.lookupsMatchedRemoteNotClosest", 1);
|
||||||
|
Set<Hash> routerHashSet = getNearestRouters(lookupType);
|
||||||
|
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else if (type == DatabaseEntry.KEY_TYPE_ROUTERINFO &&
|
} else if (type == DatabaseEntry.KEY_TYPE_ROUTERINFO &&
|
||||||
lookupType != DatabaseLookupMessage.Type.LS) {
|
lookupType != DatabaseLookupMessage.Type.LS) {
|
||||||
|
@ -48,14 +48,39 @@ public class FloodfillDatabaseLookupMessageHandler implements HandlerJobBuilder
|
|||||||
_context.statManager().addRateData("netDb.lookupsReceived", 1);
|
_context.statManager().addRateData("netDb.lookupsReceived", 1);
|
||||||
|
|
||||||
DatabaseLookupMessage dlm = (DatabaseLookupMessage)receivedMessage;
|
DatabaseLookupMessage dlm = (DatabaseLookupMessage)receivedMessage;
|
||||||
if (!_facade.shouldThrottleLookup(dlm.getFrom(), dlm.getReplyTunnel())) {
|
|
||||||
|
if (_facade.shouldBanLookup(dlm.getFrom(), dlm.getReplyTunnel())) {
|
||||||
|
if (_log.shouldLog(Log.WARN)) {
|
||||||
|
_log.warn("Possibly throttling " + dlm.getSearchType() + " lookup request for " + dlm.getSearchKey() + " because requests are being sent extremely fast, reply was to: " + dlm.getFrom() + " tunnel: " + dlm.getReplyTunnel());
|
||||||
|
_context.statManager().addRateData("netDb.repeatedLookupsDropped", 1);
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* We don't do this yet, but we do ban routers who do much faster bursts of lookups
|
||||||
|
* _context.banlist().banlistRouter(dlm.getFrom(), " <b>➜</b> Excessive lookup requests", null, null, _context.clock().now() + 4*60*60*1000);
|
||||||
|
* _context.commSystem().mayDisconnect(dlm.getFrom());
|
||||||
|
* _context.statManager().addRateData("netDb.lookupsDropped", 1);
|
||||||
|
* return null;
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
if (_facade.shouldBanBurstLookup(dlm.getFrom(), dlm.getReplyTunnel())) {
|
||||||
|
if (_log.shouldLog(Log.WARN)) {
|
||||||
|
_log.warn("Banning " + dlm.getSearchType() + " lookup request for " + dlm.getSearchKey() + " because requests are being sent extremely fast in a very short time, reply was to: " + dlm.getFrom() + " tunnel: " + dlm.getReplyTunnel());
|
||||||
|
_context.statManager().addRateData("netDb.repeatedBurstLookupsDropped", 1);
|
||||||
|
}
|
||||||
|
_context.banlist().banlistRouter(dlm.getFrom(), " <b>➜</b> Excessive lookup requests, burst", null, null, _context.clock().now() + 4*60*60*1000);
|
||||||
|
_context.commSystem().mayDisconnect(dlm.getFrom());
|
||||||
|
_context.statManager().addRateData("netDb.lookupsDropped", 1);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
if ((!_facade.shouldThrottleLookup(dlm.getFrom(), dlm.getReplyTunnel()) && !_facade.shouldThrottleBurstLookup(dlm.getFrom(), dlm.getReplyTunnel()))
|
||||||
|
|| _context.routerHash().equals(dlm.getFrom())) {
|
||||||
Job j = new HandleFloodfillDatabaseLookupMessageJob(_context, dlm, from, fromHash, _msgIDBloomXor);
|
Job j = new HandleFloodfillDatabaseLookupMessageJob(_context, dlm, from, fromHash, _msgIDBloomXor);
|
||||||
//if (false) {
|
//if (false) {
|
||||||
// // might as well inline it, all the heavy lifting is queued up in later jobs, if necessary
|
// // might as well inline it, all the heavy lifting is queued up in later jobs, if necessary
|
||||||
// j.runJob();
|
// j.runJob();
|
||||||
// return null;
|
// return null;
|
||||||
//} else {
|
//} else {
|
||||||
return j;
|
return j;
|
||||||
//}
|
//}
|
||||||
} else {
|
} else {
|
||||||
if (_log.shouldLog(Log.WARN))
|
if (_log.shouldLog(Log.WARN))
|
||||||
|
@ -39,7 +39,16 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
|||||||
private final Set<Hash> _verifiesInProgress;
|
private final Set<Hash> _verifiesInProgress;
|
||||||
private FloodThrottler _floodThrottler;
|
private FloodThrottler _floodThrottler;
|
||||||
private LookupThrottler _lookupThrottler;
|
private LookupThrottler _lookupThrottler;
|
||||||
|
private LookupThrottler _lookupThrottlerBurst;
|
||||||
|
private LookupThrottler _lookupBanner;
|
||||||
|
private LookupThrottler _lookupBannerBurst;
|
||||||
private final Job _ffMonitor;
|
private final Job _ffMonitor;
|
||||||
|
private final int BAN_LOOKUP_BASE = 50;
|
||||||
|
private final int BAN_LOOKUP_BASE_INTERVAL = 5*60*1000;
|
||||||
|
private final int BAN_LOOKUP_BURST = 10;
|
||||||
|
private final int BAN_LOOKUP_BURST_INTERVAL = 15*1000;
|
||||||
|
private final int DROP_LOOKUP_BURST = 10;
|
||||||
|
private final int DROP_LOOKUP_BURST_INTERVAL = 30*1000;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This is the flood redundancy. Entries are
|
* This is the flood redundancy. Entries are
|
||||||
@ -84,6 +93,9 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
|||||||
super.startup();
|
super.startup();
|
||||||
_context.jobQueue().addJob(_ffMonitor);
|
_context.jobQueue().addJob(_ffMonitor);
|
||||||
_lookupThrottler = new LookupThrottler();
|
_lookupThrottler = new LookupThrottler();
|
||||||
|
_lookupBanner = new LookupThrottler(BAN_LOOKUP_BASE, BAN_LOOKUP_BASE_INTERVAL);
|
||||||
|
_lookupThrottlerBurst = new LookupThrottler(DROP_LOOKUP_BURST, DROP_LOOKUP_BURST_INTERVAL);
|
||||||
|
_lookupBannerBurst = new LookupThrottler(BAN_LOOKUP_BURST, BAN_LOOKUP_BURST_INTERVAL);
|
||||||
|
|
||||||
boolean isFF = _context.getBooleanProperty(FloodfillMonitorJob.PROP_FLOODFILL_PARTICIPANT);
|
boolean isFF = _context.getBooleanProperty(FloodfillMonitorJob.PROP_FLOODFILL_PARTICIPANT);
|
||||||
long down = _context.router().getEstimatedDowntime();
|
long down = _context.router().getEstimatedDowntime();
|
||||||
@ -180,14 +192,38 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
|||||||
// of the flooding - instead, send them to a random floodfill peer so *they* can flood 'em out.
|
// of the flooding - instead, send them to a random floodfill peer so *they* can flood 'em out.
|
||||||
// perhaps statistically adjust this so we are the source every 1/N times... or something.
|
// perhaps statistically adjust this so we are the source every 1/N times... or something.
|
||||||
if (floodfillEnabled() && (ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)) {
|
if (floodfillEnabled() && (ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)) {
|
||||||
flood(ds);
|
//if (!chanceOfFloodingOurOwn(-1)) {
|
||||||
if (onSuccess != null)
|
flood(ds);
|
||||||
_context.jobQueue().addJob(onSuccess);
|
if (onSuccess != null)
|
||||||
|
_context.jobQueue().addJob(onSuccess);
|
||||||
|
//} else {
|
||||||
|
// _context.jobQueue().addJob(new FloodfillStoreJob(_context, this, key, ds, onSuccess, onFailure, sendTimeout, toIgnore));
|
||||||
|
//} Less sure I should do this this time around. TODO: figure out how this should adjust
|
||||||
} else {
|
} else {
|
||||||
_context.jobQueue().addJob(new FloodfillStoreJob(_context, this, key, ds, onSuccess, onFailure, sendTimeout, toIgnore));
|
_context.jobQueue().addJob(new FloodfillStoreJob(_context, this, key, ds, onSuccess, onFailure, sendTimeout, toIgnore));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* TODO: figure out how this should work
|
||||||
|
private boolean chanceOfFloodingOurOwn(int percent) {
|
||||||
|
if (percent < 0) {
|
||||||
|
// make percent equal to 1-peer.failedLookupRate by retrieving it from the stats
|
||||||
|
RateStat percentRate = _context.statManager().getRate("netDb.failedLookupRate");
|
||||||
|
if (percentRate != null)
|
||||||
|
percent = (1-(int)percentRate.getLifetimeAverageValue())*100;
|
||||||
|
else {
|
||||||
|
_log.warn("chanceOfFloodingOurOwn() could not find netDb.failedLookupRate");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// if the router has been up for at least an hour
|
||||||
|
if (_context.router().getUptime() > 60*60*1000) {
|
||||||
|
// then 30% of the time return true
|
||||||
|
return Math.random() < (percent / 100.0f);
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}*/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Increments and tests.
|
* Increments and tests.
|
||||||
* @since 0.7.11
|
* @since 0.7.11
|
||||||
@ -205,6 +241,21 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
|||||||
return _lookupThrottler == null || _lookupThrottler.shouldThrottle(from, id);
|
return _lookupThrottler == null || _lookupThrottler.shouldThrottle(from, id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
boolean shouldBanLookup(Hash from, TunnelId id) {
|
||||||
|
// null before startup
|
||||||
|
return _lookupBanner == null || _lookupBanner.shouldThrottle(from, id);
|
||||||
|
}
|
||||||
|
|
||||||
|
boolean shouldThrottleBurstLookup(Hash from, TunnelId id) {
|
||||||
|
// null before startup
|
||||||
|
return _lookupThrottler == null || _lookupThrottlerBurst.shouldThrottle(from, id);
|
||||||
|
}
|
||||||
|
|
||||||
|
boolean shouldBanBurstLookup(Hash from, TunnelId id) {
|
||||||
|
// null before startup
|
||||||
|
return _lookupBanner == null || _lookupBannerBurst.shouldThrottle(from, id);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If we are floodfill AND the key is not throttled,
|
* If we are floodfill AND the key is not throttled,
|
||||||
* flood it, otherwise don't.
|
* flood it, otherwise don't.
|
||||||
|
@ -31,6 +31,7 @@ import net.i2p.router.OutNetMessage;
|
|||||||
import net.i2p.router.Router;
|
import net.i2p.router.Router;
|
||||||
import net.i2p.router.RouterContext;
|
import net.i2p.router.RouterContext;
|
||||||
import net.i2p.router.TunnelInfo;
|
import net.i2p.router.TunnelInfo;
|
||||||
|
import net.i2p.router.message.OutboundCache;
|
||||||
import net.i2p.router.message.SendMessageDirectJob;
|
import net.i2p.router.message.SendMessageDirectJob;
|
||||||
import net.i2p.util.Log;
|
import net.i2p.util.Log;
|
||||||
import net.i2p.util.SystemVersion;
|
import net.i2p.util.SystemVersion;
|
||||||
@ -90,14 +91,24 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
|
|||||||
// somebody has our keys...
|
// somebody has our keys...
|
||||||
// This could happen with multihoming - where it's really important to prevent
|
// This could happen with multihoming - where it's really important to prevent
|
||||||
// storing the other guy's leaseset, it will confuse us badly.
|
// storing the other guy's leaseset, it will confuse us badly.
|
||||||
|
LeaseSet ls = (LeaseSet) entry;
|
||||||
if (getContext().clientManager().isLocal(key)) {
|
if (getContext().clientManager().isLocal(key)) {
|
||||||
//getContext().statManager().addRateData("netDb.storeLocalLeaseSetAttempt", 1, 0);
|
getContext().statManager().addRateData("netDb.storeLocalLeaseSetAttempt", 1, 0);
|
||||||
// throw rather than return, so that we send the ack below (prevent easy attack)
|
// throw rather than return, so that we send the ack below (prevent easy attack)
|
||||||
dontBlamePeer = true;
|
dontBlamePeer = true;
|
||||||
|
// store the peer in the outboundCache instead so that we can reply back with it without confusing ourselves.
|
||||||
|
if (ls.isCurrent(Router.CLOCK_FUDGE_FACTOR / 4)) {
|
||||||
|
if (_facade.validate(key, ls) == null) {
|
||||||
|
LeaseSet compareLeasesetDate = getContext().clientMessagePool().getCache().multihomedCache.get(key);
|
||||||
|
if (compareLeasesetDate == null)
|
||||||
|
getContext().clientMessagePool().getCache().multihomedCache.put(key, ls);
|
||||||
|
else if (compareLeasesetDate.getEarliestLeaseDate() < ls.getEarliestLeaseDate())
|
||||||
|
getContext().clientMessagePool().getCache().multihomedCache.put(key, ls);
|
||||||
|
}
|
||||||
|
}
|
||||||
throw new IllegalArgumentException("Peer attempted to store local leaseSet: " +
|
throw new IllegalArgumentException("Peer attempted to store local leaseSet: " +
|
||||||
key.toBase32());
|
key.toBase32());
|
||||||
}
|
}
|
||||||
LeaseSet ls = (LeaseSet) entry;
|
|
||||||
//boolean oldrar = ls.getReceivedAsReply();
|
//boolean oldrar = ls.getReceivedAsReply();
|
||||||
//boolean oldrap = ls.getReceivedAsPublished();
|
//boolean oldrap = ls.getReceivedAsPublished();
|
||||||
// If this was received as a response to a query,
|
// If this was received as a response to a query,
|
||||||
@ -109,7 +120,7 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
|
|||||||
// receive in response to our own lookups.
|
// receive in response to our own lookups.
|
||||||
// See ../HDLMJ for more info
|
// See ../HDLMJ for more info
|
||||||
if (!ls.getReceivedAsReply())
|
if (!ls.getReceivedAsReply())
|
||||||
ls.setReceivedAsPublished(true);
|
ls.setReceivedAsPublished();
|
||||||
//boolean rap = ls.getReceivedAsPublished();
|
//boolean rap = ls.getReceivedAsPublished();
|
||||||
//if (_log.shouldLog(Log.INFO))
|
//if (_log.shouldLog(Log.INFO))
|
||||||
// _log.info("oldrap? " + oldrap + " oldrar? " + oldrar + " newrap? " + rap);
|
// _log.info("oldrap? " + oldrap + " oldrar? " + oldrar + " newrap? " + rap);
|
||||||
@ -162,9 +173,9 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
|
|||||||
if (_message.getReceivedAsReply()) {
|
if (_message.getReceivedAsReply()) {
|
||||||
ri.setReceivedAsReply();
|
ri.setReceivedAsReply();
|
||||||
if (_message.getReplyToken() > 0)
|
if (_message.getReplyToken() > 0)
|
||||||
ri.setReceivedAsPublished(true);
|
ri.setReceivedAsPublished();
|
||||||
} else {
|
} else {
|
||||||
ri.setReceivedAsPublished(true);
|
ri.setReceivedAsPublished();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (_log.shouldInfo()) {
|
if (_log.shouldInfo()) {
|
||||||
|
@ -889,7 +889,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
|||||||
* @throws UnsupportedCryptoException if that's why it failed.
|
* @throws UnsupportedCryptoException if that's why it failed.
|
||||||
* @return reason why the entry is not valid, or null if it is valid
|
* @return reason why the entry is not valid, or null if it is valid
|
||||||
*/
|
*/
|
||||||
private String validate(Hash key, LeaseSet leaseSet) throws UnsupportedCryptoException {
|
public String validate(Hash key, LeaseSet leaseSet) throws UnsupportedCryptoException {
|
||||||
if (!key.equals(leaseSet.getHash())) {
|
if (!key.equals(leaseSet.getHash())) {
|
||||||
if (_log.shouldLog(Log.WARN))
|
if (_log.shouldLog(Log.WARN))
|
||||||
_log.warn("Invalid store attempt! key does not match leaseSet.destination! key = "
|
_log.warn("Invalid store attempt! key does not match leaseSet.destination! key = "
|
||||||
@ -981,18 +981,31 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
|||||||
if (rv != null && rv.getEarliestLeaseDate() >= leaseSet.getEarliestLeaseDate()) {
|
if (rv != null && rv.getEarliestLeaseDate() >= leaseSet.getEarliestLeaseDate()) {
|
||||||
if (_log.shouldDebug())
|
if (_log.shouldDebug())
|
||||||
_log.debug("Not storing older " + key);
|
_log.debug("Not storing older " + key);
|
||||||
// if it hasn't changed, no need to do anything
|
// TODO: Determine if this deep equals is actually truly necessary as part of this test or if the date is actually enough
|
||||||
// except copy over the flags
|
if (rv.equals(leaseSet)) {
|
||||||
Hash to = leaseSet.getReceivedBy();
|
if (_log.shouldDebug())
|
||||||
if (to != null) {
|
_log.debug("Updating leaseSet found in Datastore " + key);
|
||||||
rv.setReceivedBy(to);
|
/** - DatabaseEntry.java note
|
||||||
} else if (leaseSet.getReceivedAsReply()) {
|
* we used to just copy the flags here but due to concerns about crafted
|
||||||
rv.setReceivedAsReply();
|
* entries being used to "follow" a leaseSet from one context to another,
|
||||||
}
|
* i.e. sent to a client vs sent to a router. Copying the entire leaseSet,
|
||||||
if (leaseSet.getReceivedAsPublished()) {
|
* flags and all, limits the ability of the attacker craft leaseSet entries
|
||||||
rv.setReceivedAsPublished(true);
|
* maliciously.
|
||||||
}
|
*/
|
||||||
return rv;
|
_ds.put(key, leaseSet);
|
||||||
|
rv = (LeaseSet)_ds.get(key);
|
||||||
|
Hash to = leaseSet.getReceivedBy();
|
||||||
|
if (to != null) {
|
||||||
|
rv.setReceivedBy(to);
|
||||||
|
} else if (leaseSet.getReceivedAsReply()) {
|
||||||
|
rv.setReceivedAsReply();
|
||||||
|
}
|
||||||
|
if (leaseSet.getReceivedAsPublished()) {
|
||||||
|
rv.setReceivedAsPublished();
|
||||||
|
}
|
||||||
|
return rv;
|
||||||
|
}// TODO: Is there any reason to do anything here, if the fields are somehow unequal?
|
||||||
|
// Like, is there any case where this is not true? I don't think it's possible for it to be.
|
||||||
}
|
}
|
||||||
} catch (ClassCastException cce) {
|
} catch (ClassCastException cce) {
|
||||||
throw new IllegalArgumentException("Attempt to replace RI with " + leaseSet);
|
throw new IllegalArgumentException("Attempt to replace RI with " + leaseSet);
|
||||||
|
@ -19,11 +19,18 @@ class LookupThrottler {
|
|||||||
private final ObjectCounter<ReplyTunnel> counter;
|
private final ObjectCounter<ReplyTunnel> counter;
|
||||||
/** the id of this is -1 */
|
/** the id of this is -1 */
|
||||||
private static final TunnelId DUMMY_ID = new TunnelId();
|
private static final TunnelId DUMMY_ID = new TunnelId();
|
||||||
/** this seems like plenty */
|
/** 30 seems like plenty, possibly too many, maybe dial this down again next release(2.4.0)*/
|
||||||
private static final int MAX_LOOKUPS = 30;
|
private final int MAX_LOOKUPS; // DEFAULT=20
|
||||||
private static final long CLEAN_TIME = 3*60*1000;
|
private final long CLEAN_TIME; // DEFAULT=3*60*1000
|
||||||
|
|
||||||
LookupThrottler() {
|
LookupThrottler() {
|
||||||
|
MAX_LOOKUPS = 20;
|
||||||
|
CLEAN_TIME = 3*60*1000;
|
||||||
|
this.counter = new ObjectCounter<ReplyTunnel>();
|
||||||
|
SimpleTimer2.getInstance().addPeriodicEvent(new Cleaner(), CLEAN_TIME);
|
||||||
|
}
|
||||||
|
LookupThrottler(int maxlookups, long cleanTime) {
|
||||||
|
MAX_LOOKUPS = maxlookups;
|
||||||
|
CLEAN_TIME = cleanTime;
|
||||||
this.counter = new ObjectCounter<ReplyTunnel>();
|
this.counter = new ObjectCounter<ReplyTunnel>();
|
||||||
SimpleTimer2.getInstance().addPeriodicEvent(new Cleaner(), CLEAN_TIME);
|
SimpleTimer2.getInstance().addPeriodicEvent(new Cleaner(), CLEAN_TIME);
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user