Compare commits

...

5 Commits

Author SHA1 Message Date
b80505f3ae Router: bypass throttle when lookup sent to client DB 2023-10-18 12:28:56 -04:00
00e3390437 Router: reverse changes to RepublishLeaseSetJob 2023-10-17 12:33:53 -04:00
c67ff1376a Router: fix broken null check in shouldThrottleBurstLookup 2023-10-17 12:30:40 -04:00
2af65243a4 Router: remove 'Confirm Succeess' from ISJ, closes #455 2023-10-16 19:38:52 -04:00
idk
de2b122f7b Merge branch 'i2p.i2p.2.4.0-reverse-api-change' into 'master'
Router: Reverse API change from banlistRouterHard back to BanlistRouterForever

See merge request i2p-hackers/i2p.i2p!131
2023-10-16 23:31:07 +00:00
3 changed files with 13 additions and 17 deletions

View File

@ -252,13 +252,23 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
* @since 0.7.11
*/
boolean shouldThrottleLookup(Hash from, TunnelId id) {
if (isClientDb()) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Lookup for:" + from + " sent to a client DB: "+_dbid.toString()+", bypassing throttle");
return false;
}
// null before startup
return _lookupThrottler == null || _lookupThrottler.shouldThrottle(from, id);
}
boolean shouldThrottleBurstLookup(Hash from, TunnelId id) {
if (isClientDb()) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Lookup for:" + from + " sent to a client DB: "+_dbid.toString()+", bypassing throttle");
return false;
}
// null before startup
return _lookupThrottler == null || _lookupThrottlerBurst.shouldThrottle(from, id);
return _lookupThrottlerBurst == null || _lookupThrottlerBurst.shouldThrottle(from, id);
}
/**

View File

@ -702,7 +702,6 @@ public class IterativeSearchJob extends FloodSearchJob {
// we will credit the wrong one.
int tries;
Hash peer = null;
Destination dest = null;
synchronized(this) {
if (_dead) return;
@ -715,14 +714,6 @@ public class IterativeSearchJob extends FloodSearchJob {
}
}
// Confirm success by checking for the Lease Set in local storage
if (_isLease) {
dest = getContext().netDb().lookupDestinationLocally(_key);
if ((dest == null) && (_log.shouldLog(Log.WARN)))
_log.warn("Warning! Lease Set not found in persistent data store for key = " + _key);
}
_facade.complete(_key);
if (peer != null) {
Long timeSent = _sentTime.get(peer);

View File

@ -44,7 +44,7 @@ class RepublishLeaseSetJob extends JobImpl {
try {
if (getContext().clientManager().isLocal(_dest)) {
LeaseSet ls = getContext().clientNetDb(_dest).lookupLeaseSetLocally(_dest);
LeaseSet ls = _facade.lookupLeaseSetLocally(_dest);
if (ls != null) {
if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) {
if (_log.shouldLog(Log.WARN))
@ -101,12 +101,7 @@ class RepublishLeaseSetJob extends JobImpl {
public void runJob() {
// Don't requeue if there's a newer LS, KNDF will have already done that
LeaseSet ls = null;
if (_dest != null)
ls = getContext().clientNetDb(_dest).lookupLeaseSetLocally(_ls.getHash());
else
getContext().netDb().lookupLeaseSetLocally(_ls.getHash());
// ^ _dest should never be null here, right? So maybe instead we return immediately?
LeaseSet ls = _facade.lookupLeaseSetLocally(_ls.getHash());
if (ls != null && ls.getEarliestLeaseDate() == _ls.getEarliestLeaseDate()) {
requeueRepublish();
} else {