forked from I2P_Developers/i2p.i2p
Compare commits
6 Commits
i2p.i2p.2.
...
i2p.i2p.2.
Author | SHA1 | Date | |
---|---|---|---|
d35b4e0f1e | |||
36d94733e2 | |||
4b40314a62 | |||
16d4d0625c | |||
b3d0b91db0 | |||
336a01752b |
@ -215,38 +215,14 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
// of the flooding - instead, send them to a random floodfill peer so *they* can flood 'em out.
|
||||
// perhaps statistically adjust this so we are the source every 1/N times... or something.
|
||||
if (floodfillEnabled() && (ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)) {
|
||||
//if (!chanceOfFloodingOurOwn(-1)) {
|
||||
flood(ds);
|
||||
if (onSuccess != null)
|
||||
_context.jobQueue().addJob(onSuccess);
|
||||
//} else {
|
||||
// _context.jobQueue().addJob(new FloodfillStoreJob(_context, this, key, ds, onSuccess, onFailure, sendTimeout, toIgnore));
|
||||
//} Less sure I should do this this time around. TODO: figure out how this should adjust
|
||||
flood(ds);
|
||||
if (onSuccess != null)
|
||||
_context.jobQueue().addJob(onSuccess);
|
||||
} else {
|
||||
_context.jobQueue().addJob(new FloodfillStoreJob(_context, this, key, ds, onSuccess, onFailure, sendTimeout, toIgnore));
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: figure out how this should work
|
||||
private boolean chanceOfFloodingOurOwn(int percent) {
|
||||
if (percent < 0) {
|
||||
// make percent equal to 1-peer.failedLookupRate by retrieving it from the stats
|
||||
RateStat percentRate = _context.statManager().getRate("netDb.failedLookupRate");
|
||||
if (percentRate != null)
|
||||
percent = (1-(int)percentRate.getLifetimeAverageValue())*100;
|
||||
else {
|
||||
_log.warn("chanceOfFloodingOurOwn() could not find netDb.failedLookupRate");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// if the router has been up for at least an hour
|
||||
if (_context.router().getUptime() > 60*60*1000) {
|
||||
// then 30% of the time return true
|
||||
return Math.random() < (percent / 100.0f);
|
||||
}
|
||||
return false;
|
||||
}*/
|
||||
|
||||
/**
|
||||
* Increments and tests.
|
||||
* @since 0.7.11
|
||||
@ -514,14 +490,11 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
* @return null always
|
||||
* @since 0.9.10
|
||||
*/
|
||||
// ToDo: With repect to segmented netDb clients, this framework needs
|
||||
// refinement. A client with a segmented netDb can not use exploratory
|
||||
// tunnels. The return messages will not have sufficient information
|
||||
// to be directed back to the clientmaking the query.
|
||||
SearchJob search(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, boolean isLease,
|
||||
Hash fromLocalDest) {
|
||||
//if (true) return super.search(key, onFindJob, onFailedLookupJob, timeoutMs, isLease);
|
||||
if (key == null) throw new IllegalArgumentException("searchin for nothin, eh?");
|
||||
if (fromLocalDest == null && isClientDb()) throw new IllegalArgumentException("client subDbs cannot use exploratory tunnels");
|
||||
boolean isNew = false;
|
||||
FloodSearchJob searchJob;
|
||||
synchronized (_activeFloodQueries) {
|
||||
|
@ -41,7 +41,7 @@ class RefreshRoutersJob extends JobImpl {
|
||||
* Don't go faster as this overloads the expl. OBEP / IBGW
|
||||
*/
|
||||
private final static long RERUN_DELAY_MS = 2500;
|
||||
public final static long EXPIRE = 2*60*60*1000;
|
||||
private final static long EXPIRE = 2*60*60*1000;
|
||||
private final static long NEW_LOOP_DELAY = 37*60*1000;
|
||||
private static final int ENOUGH_FFS = 3 * StartExplorersJob.LOW_FFS;
|
||||
|
||||
|
@ -147,10 +147,6 @@ class CapacityCalculator {
|
||||
capacity -= PENALTY_CAP_E;
|
||||
}
|
||||
}
|
||||
/* TODO: G caps can be excluded in TunnelPeerSelector by adding it to DEFAULT_EXCLUDE_CAPS */
|
||||
// decide what other handling if any is needed here.
|
||||
//else if (caps.indexOf(Router.CAPABILITY_NO_TUNNELS) >= 0)
|
||||
// capacity -= PENALTY_G_CAP;
|
||||
} else {
|
||||
capacity -= PENALTY_NO_RI;
|
||||
}
|
||||
|
@ -238,13 +238,6 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
|
||||
// Handling of client tunnel messages need explicit handling
|
||||
// in the context of the client subDb.
|
||||
if (_client != null) {
|
||||
//Hash dbid = _context.netDbSegmentor().getDbidByHash(_client);
|
||||
/*if (dbid == null) {
|
||||
// This error shouldn't occur. All clients should have their own netDb.
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Error, client (" + _clientNickname + ") dbid not found while processing messages in the IBMD.");
|
||||
return;
|
||||
}*/
|
||||
// For now, the only client message we know how to handle here is a DSM.
|
||||
// There aren't normally DSM messages here, but it should be safe to store
|
||||
// them in the client netDb.
|
||||
|
Reference in New Issue
Block a user