forked from I2P_Developers/i2p.i2p
* Netdb exploration:
- Remove floodfills from dont-include list in exploration DatabaseLookupMessage, as the dont-include-floodfills flag has been supported since release 0.7.9. This will make exploration work better, as there is room for non-floodfills in the don't-include list now. - Reduce min and max exploration intervals - Explore aggressively at startup and if known routers is low - Explore slowly if known routers is high
This commit is contained in:
@ -34,4 +34,9 @@ public interface DataStore {
|
|||||||
public void rescan();
|
public void rescan();
|
||||||
public int countLeaseSets();
|
public int countLeaseSets();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return total size (RI and LS)
|
||||||
|
* @since 0.8.8
|
||||||
|
*/
|
||||||
|
public int size();
|
||||||
}
|
}
|
||||||
|
@ -98,12 +98,13 @@ class ExploreJob extends SearchJob {
|
|||||||
// in a few releases, we can (and should) remove this,
|
// in a few releases, we can (and should) remove this,
|
||||||
// as routers will honor the above flag, and we want the table to include
|
// as routers will honor the above flag, and we want the table to include
|
||||||
// only non-floodfills.
|
// only non-floodfills.
|
||||||
if (available > 0 && ks != null) {
|
// Removed in 0.8.8, good thing, as we had well over MAX_CLOSEST floodfills.
|
||||||
List peers = _peerSelector.selectFloodfillParticipants(rkey, available, ks);
|
//if (available > 0 && ks != null) {
|
||||||
int len = peers.size();
|
// List peers = _peerSelector.selectFloodfillParticipants(rkey, available, ks);
|
||||||
if (len > 0)
|
// int len = peers.size();
|
||||||
msg.getDontIncludePeers().addAll(peers);
|
// if (len > 0)
|
||||||
}
|
// msg.getDontIncludePeers().addAll(peers);
|
||||||
|
//}
|
||||||
|
|
||||||
available = MAX_CLOSEST - msg.getDontIncludePeers().size();
|
available = MAX_CLOSEST - msg.getDontIncludePeers().size();
|
||||||
if (available > 0) {
|
if (available > 0) {
|
||||||
|
@ -9,7 +9,6 @@ package net.i2p.router.networkdb.kademlia;
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.Iterator;
|
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
import net.i2p.data.Hash;
|
import net.i2p.data.Hash;
|
||||||
@ -24,15 +23,23 @@ import net.i2p.util.Log;
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
class StartExplorersJob extends JobImpl {
|
class StartExplorersJob extends JobImpl {
|
||||||
private Log _log;
|
private final Log _log;
|
||||||
private KademliaNetworkDatabaseFacade _facade;
|
private final KademliaNetworkDatabaseFacade _facade;
|
||||||
|
|
||||||
/** don't explore more than 1 bucket at a time */
|
/** don't explore more than 1 bucket at a time */
|
||||||
private static final int MAX_PER_RUN = 1;
|
private static final int MAX_PER_RUN = 1;
|
||||||
/** dont explore the network more often than this */
|
/** dont explore the network more often than this */
|
||||||
private static final int MIN_RERUN_DELAY_MS = 5*60*1000;
|
private static final int MIN_RERUN_DELAY_MS = 99*1000;
|
||||||
/** explore the network at least once every thirty minutes */
|
/** explore the network at least this often */
|
||||||
private static final int MAX_RERUN_DELAY_MS = 30*60*1000;
|
private static final int MAX_RERUN_DELAY_MS = 15*60*1000;
|
||||||
|
/** aggressively explore during this time - same as KNDF expiration grace period */
|
||||||
|
private static final int STARTUP_TIME = 60*60*1000;
|
||||||
|
/** super-aggressively explore if we have less than this many routers */
|
||||||
|
private static final int LOW_ROUTERS = 125;
|
||||||
|
/** aggressively explore if we have less than this many routers */
|
||||||
|
private static final int MIN_ROUTERS = 250;
|
||||||
|
/** explore slowly if we have more than this many routers */
|
||||||
|
private static final int MAX_ROUTERS = 800;
|
||||||
|
|
||||||
public StartExplorersJob(RouterContext context, KademliaNetworkDatabaseFacade facade) {
|
public StartExplorersJob(RouterContext context, KademliaNetworkDatabaseFacade facade) {
|
||||||
super(context);
|
super(context);
|
||||||
@ -44,12 +51,14 @@ class StartExplorersJob extends JobImpl {
|
|||||||
public void runJob() {
|
public void runJob() {
|
||||||
if (! (((FloodfillNetworkDatabaseFacade)_facade).floodfillEnabled() ||
|
if (! (((FloodfillNetworkDatabaseFacade)_facade).floodfillEnabled() ||
|
||||||
getContext().router().gracefulShutdownInProgress())) {
|
getContext().router().gracefulShutdownInProgress())) {
|
||||||
Set toExplore = selectKeysToExplore();
|
int num = MAX_PER_RUN;
|
||||||
|
if (_facade.getDataStore().size() < LOW_ROUTERS)
|
||||||
|
num *= 2;
|
||||||
|
Set<Hash> toExplore = selectKeysToExplore(num);
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("Keys to explore during this run: " + toExplore);
|
_log.debug("Keys to explore during this run: " + toExplore);
|
||||||
_facade.removeFromExploreKeys(toExplore);
|
_facade.removeFromExploreKeys(toExplore);
|
||||||
for (Iterator iter = toExplore.iterator(); iter.hasNext(); ) {
|
for (Hash key : toExplore) {
|
||||||
Hash key = (Hash)iter.next();
|
|
||||||
getContext().jobQueue().addJob(new ExploreJob(getContext(), _facade, key));
|
getContext().jobQueue().addJob(new ExploreJob(getContext(), _facade, key));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -70,8 +79,24 @@ class StartExplorersJob extends JobImpl {
|
|||||||
getTiming().setStartAfter(getContext().clock().now() + delay);
|
getTiming().setStartAfter(getContext().clock().now() + delay);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** how long should we wait before exploring? */
|
/**
|
||||||
|
* How long should we wait before exploring?
|
||||||
|
* We wait as long as it's been since we were last successful,
|
||||||
|
* with exceptions.
|
||||||
|
*/
|
||||||
private long getNextRunDelay() {
|
private long getNextRunDelay() {
|
||||||
|
// we don't explore if floodfill
|
||||||
|
if (((FloodfillNetworkDatabaseFacade)_facade).floodfillEnabled())
|
||||||
|
return MAX_RERUN_DELAY_MS;
|
||||||
|
|
||||||
|
// If we don't know too many peers, or just started, explore aggressively
|
||||||
|
// Use DataStore.size() which includes leasesets because it's faster
|
||||||
|
if (getContext().router().getUptime() < STARTUP_TIME ||
|
||||||
|
_facade.getDataStore().size() < MIN_ROUTERS)
|
||||||
|
return MIN_RERUN_DELAY_MS;
|
||||||
|
if (_facade.getDataStore().size() > MAX_ROUTERS)
|
||||||
|
return MAX_RERUN_DELAY_MS;
|
||||||
|
|
||||||
long delay = getContext().clock().now() - _facade.getLastExploreNewDate();
|
long delay = getContext().clock().now() - _facade.getLastExploreNewDate();
|
||||||
if (delay < MIN_RERUN_DELAY_MS)
|
if (delay < MIN_RERUN_DELAY_MS)
|
||||||
return MIN_RERUN_DELAY_MS;
|
return MIN_RERUN_DELAY_MS;
|
||||||
@ -87,16 +112,16 @@ class StartExplorersJob extends JobImpl {
|
|||||||
* Nope, ExploreKeySelectorJob is disabled, so the explore pool
|
* Nope, ExploreKeySelectorJob is disabled, so the explore pool
|
||||||
* may be empty. In that case, generate random keys.
|
* may be empty. In that case, generate random keys.
|
||||||
*/
|
*/
|
||||||
private Set selectKeysToExplore() {
|
private Set<Hash> selectKeysToExplore(int num) {
|
||||||
Set queued = _facade.getExploreKeys();
|
Set<Hash> queued = _facade.getExploreKeys();
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("Keys waiting for exploration: " + queued.size());
|
_log.debug("Keys waiting for exploration: " + queued.size());
|
||||||
Set rv = new HashSet(MAX_PER_RUN);
|
Set<Hash> rv = new HashSet(num);
|
||||||
for (Iterator iter = queued.iterator(); iter.hasNext(); ) {
|
for (Hash key : queued) {
|
||||||
if (rv.size() >= MAX_PER_RUN) break;
|
if (rv.size() >= num) break;
|
||||||
rv.add(iter.next());
|
rv.add(key);
|
||||||
}
|
}
|
||||||
for (int i = rv.size(); i < MAX_PER_RUN; i++) {
|
for (int i = rv.size(); i < num; i++) {
|
||||||
byte hash[] = new byte[Hash.HASH_LENGTH];
|
byte hash[] = new byte[Hash.HASH_LENGTH];
|
||||||
getContext().random().nextBytes(hash);
|
getContext().random().nextBytes(hash);
|
||||||
Hash key = new Hash(hash);
|
Hash key = new Hash(hash);
|
||||||
|
@ -48,6 +48,14 @@ class TransientDataStore implements DataStore {
|
|||||||
|
|
||||||
public void rescan() {}
|
public void rescan() {}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return total size (RI and LS)
|
||||||
|
* @since 0.8.8
|
||||||
|
*/
|
||||||
|
public int size() {
|
||||||
|
return _data.size();
|
||||||
|
}
|
||||||
|
|
||||||
public Set<Hash> getKeys() {
|
public Set<Hash> getKeys() {
|
||||||
return new HashSet(_data.keySet());
|
return new HashSet(_data.keySet());
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user