diff --git a/apps/routerconsole/jsp/debug.jsp b/apps/routerconsole/jsp/debug.jsp
index 90bd964eb..93b9b58c0 100644
--- a/apps/routerconsole/jsp/debug.jsp
+++ b/apps/routerconsole/jsp/debug.jsp
@@ -21,6 +21,12 @@
*/
net.i2p.router.RouterContext ctx = (net.i2p.router.RouterContext) net.i2p.I2PAppContext.getGlobalContext();
+ /*
+ * Print out the status for the NetDB
+ */
+ out.print("
Router DHT
");
+ ctx.netDb().renderStatusHTML(out);
+
/*
* Print out the status for the UpdateManager
*/
diff --git a/build.xml b/build.xml
index 0902f3f08..81cfe97c8 100644
--- a/build.xml
+++ b/build.xml
@@ -476,7 +476,7 @@
-
+
diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/KBucket.java b/core/java/src/net/i2p/kademlia/KBucket.java
similarity index 97%
rename from apps/i2psnark/java/src/net/i2p/kademlia/KBucket.java
rename to core/java/src/net/i2p/kademlia/KBucket.java
index 8c3b85f23..0e86df38b 100644
--- a/apps/i2psnark/java/src/net/i2p/kademlia/KBucket.java
+++ b/core/java/src/net/i2p/kademlia/KBucket.java
@@ -17,7 +17,7 @@ import net.i2p.data.SimpleDataStructure;
* a local key, using XOR as the distance metric
*
* Refactored from net.i2p.router.networkdb.kademlia
- * @since 0.9.2
+ * @since 0.9.2 in i2psnark, moved to core in 0.9.10
*/
public interface KBucket {
diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/KBucketImpl.java b/core/java/src/net/i2p/kademlia/KBucketImpl.java
similarity index 98%
rename from apps/i2psnark/java/src/net/i2p/kademlia/KBucketImpl.java
rename to core/java/src/net/i2p/kademlia/KBucketImpl.java
index e3c72c092..ac804b90a 100644
--- a/apps/i2psnark/java/src/net/i2p/kademlia/KBucketImpl.java
+++ b/core/java/src/net/i2p/kademlia/KBucketImpl.java
@@ -41,7 +41,7 @@ import net.i2p.util.ConcurrentHashSet;
* removing entries, this KBucket will exceed the max size.
*
* Refactored from net.i2p.router.networkdb.kademlia
- * @since 0.9.2
+ * @since 0.9.2 in i2psnark, moved to core in 0.9.10
*/
class KBucketImpl implements KBucket {
/**
diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/KBucketSet.java b/core/java/src/net/i2p/kademlia/KBucketSet.java
similarity index 99%
rename from apps/i2psnark/java/src/net/i2p/kademlia/KBucketSet.java
rename to core/java/src/net/i2p/kademlia/KBucketSet.java
index 542e30026..1b2ba756a 100644
--- a/apps/i2psnark/java/src/net/i2p/kademlia/KBucketSet.java
+++ b/core/java/src/net/i2p/kademlia/KBucketSet.java
@@ -35,7 +35,7 @@ import net.i2p.util.Log;
* times 2**(B-1) for Kademlia value B.
*
* Refactored from net.i2p.router.networkdb.kademlia
- * @since 0.9.2
+ * @since 0.9.2 in i2psnark, moved to core in 0.9.10
*/
public class KBucketSet {
private final Log _log;
diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/KBucketTrimmer.java b/core/java/src/net/i2p/kademlia/KBucketTrimmer.java
similarity index 91%
rename from apps/i2psnark/java/src/net/i2p/kademlia/KBucketTrimmer.java
rename to core/java/src/net/i2p/kademlia/KBucketTrimmer.java
index b33f85ddb..fb73737ea 100644
--- a/apps/i2psnark/java/src/net/i2p/kademlia/KBucketTrimmer.java
+++ b/core/java/src/net/i2p/kademlia/KBucketTrimmer.java
@@ -4,7 +4,7 @@ import net.i2p.data.SimpleDataStructure;
/**
* Called when a kbucket can no longer be split and is too big
- * @since 0.9.2
+ * @since 0.9.2 in i2psnark, moved to core in 0.9.10
*/
public interface KBucketTrimmer {
/**
diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/RandomIfOldTrimmer.java b/core/java/src/net/i2p/kademlia/RandomIfOldTrimmer.java
similarity index 91%
rename from apps/i2psnark/java/src/net/i2p/kademlia/RandomIfOldTrimmer.java
rename to core/java/src/net/i2p/kademlia/RandomIfOldTrimmer.java
index ade28ce50..dc50c8a22 100644
--- a/apps/i2psnark/java/src/net/i2p/kademlia/RandomIfOldTrimmer.java
+++ b/core/java/src/net/i2p/kademlia/RandomIfOldTrimmer.java
@@ -5,7 +5,7 @@ import net.i2p.data.SimpleDataStructure;
/**
* Removes a random element, but only if the bucket hasn't changed in 5 minutes.
- * @since 0.9.2
+ * @since 0.9.2 in i2psnark, moved to core in 0.9.10
*/
public class RandomIfOldTrimmer extends RandomTrimmer {
diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/RandomTrimmer.java b/core/java/src/net/i2p/kademlia/RandomTrimmer.java
similarity index 93%
rename from apps/i2psnark/java/src/net/i2p/kademlia/RandomTrimmer.java
rename to core/java/src/net/i2p/kademlia/RandomTrimmer.java
index c1efff262..72578ba1c 100644
--- a/apps/i2psnark/java/src/net/i2p/kademlia/RandomTrimmer.java
+++ b/core/java/src/net/i2p/kademlia/RandomTrimmer.java
@@ -8,7 +8,7 @@ import net.i2p.data.SimpleDataStructure;
/**
* Removes a random element. Not resistant to flooding.
- * @since 0.9.2
+ * @since 0.9.2 in i2psnark, moved to core in 0.9.10
*/
public class RandomTrimmer implements KBucketTrimmer {
protected final I2PAppContext _ctx;
diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/RejectTrimmer.java b/core/java/src/net/i2p/kademlia/RejectTrimmer.java
similarity index 85%
rename from apps/i2psnark/java/src/net/i2p/kademlia/RejectTrimmer.java
rename to core/java/src/net/i2p/kademlia/RejectTrimmer.java
index 2e29f28e2..5704541ff 100644
--- a/apps/i2psnark/java/src/net/i2p/kademlia/RejectTrimmer.java
+++ b/core/java/src/net/i2p/kademlia/RejectTrimmer.java
@@ -4,7 +4,7 @@ import net.i2p.data.SimpleDataStructure;
/**
* Removes nothing and always rejects the add. Flood resistant..
- * @since 0.9.2
+ * @since 0.9.2 in i2psnark, moved to core in 0.9.10
*/
public class RejectTrimmer implements KBucketTrimmer {
public boolean trim(KBucket kbucket, T toAdd) {
diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/SelectionCollector.java b/core/java/src/net/i2p/kademlia/SelectionCollector.java
similarity index 80%
rename from apps/i2psnark/java/src/net/i2p/kademlia/SelectionCollector.java
rename to core/java/src/net/i2p/kademlia/SelectionCollector.java
index e4cb770de..06a6f0957 100644
--- a/apps/i2psnark/java/src/net/i2p/kademlia/SelectionCollector.java
+++ b/core/java/src/net/i2p/kademlia/SelectionCollector.java
@@ -4,7 +4,7 @@ import net.i2p.data.SimpleDataStructure;
/**
* Visit kbuckets, gathering matches
- * @since 0.9.2
+ * @since 0.9.2 in i2psnark, moved to core in 0.9.10
*/
public interface SelectionCollector {
public void add(T entry);
diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/XORComparator.java b/core/java/src/net/i2p/kademlia/XORComparator.java
similarity index 88%
rename from apps/i2psnark/java/src/net/i2p/kademlia/XORComparator.java
rename to core/java/src/net/i2p/kademlia/XORComparator.java
index 2ac5017dd..5763a7b4f 100644
--- a/apps/i2psnark/java/src/net/i2p/kademlia/XORComparator.java
+++ b/core/java/src/net/i2p/kademlia/XORComparator.java
@@ -7,9 +7,9 @@ import net.i2p.data.SimpleDataStructure;
/**
* Help sort Hashes in relation to a base key using the XOR metric
*
- * @since 0.9.2
+ * @since 0.9.2 in i2psnark, moved to core in 0.9.10
*/
-class XORComparator implements Comparator {
+public class XORComparator implements Comparator {
private final byte[] _base;
/**
diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/package.html b/core/java/src/net/i2p/kademlia/package.html
similarity index 60%
rename from apps/i2psnark/java/src/net/i2p/kademlia/package.html
rename to core/java/src/net/i2p/kademlia/package.html
index fe1a24f43..f517b242f 100644
--- a/apps/i2psnark/java/src/net/i2p/kademlia/package.html
+++ b/core/java/src/net/i2p/kademlia/package.html
@@ -1,6 +1,6 @@
This is a major rewrite of KBucket, KBucketSet, and KBucketImpl from net.i2p.router.networkdb.kademlia.
The classes are now generic to support SHA1. SHA256, or other key lengths.
-The long-term goal is to prove out this new implementation in i2psnark,
-then move it to core, then convert the network database to use it.
+Packaged in i2psnark since 0.9.2, and moved to core in 0.9.10
+so the network database can use it.
diff --git a/router/java/src/net/i2p/router/NetworkDatabaseFacade.java b/router/java/src/net/i2p/router/NetworkDatabaseFacade.java
index be0ce340e..a488ff7fc 100644
--- a/router/java/src/net/i2p/router/NetworkDatabaseFacade.java
+++ b/router/java/src/net/i2p/router/NetworkDatabaseFacade.java
@@ -74,7 +74,8 @@ public abstract class NetworkDatabaseFacade implements Service {
public int getKnownLeaseSets() { return 0; }
public boolean isInitialized() { return true; }
public void rescan() {}
- /** @deprecated moved to router console */
+
+ /** Debug only - all user info moved to NetDbRenderer in router console */
public void renderStatusHTML(Writer out) throws IOException {}
/** public for NetDbRenderer in routerconsole */
public Set getLeases() { return Collections.emptySet(); }
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/ExploreJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/ExploreJob.java
index 05154420b..88122c0e2 100644
--- a/router/java/src/net/i2p/router/networkdb/kademlia/ExploreJob.java
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/ExploreJob.java
@@ -15,6 +15,7 @@ import java.util.Set;
import net.i2p.data.Hash;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.DatabaseLookupMessage;
+import net.i2p.kademlia.KBucketSet;
import net.i2p.router.RouterContext;
import net.i2p.util.Log;
@@ -97,7 +98,7 @@ class ExploreJob extends SearchJob {
available--;
}
- KBucketSet ks = _facade.getKBuckets();
+ KBucketSet ks = _facade.getKBuckets();
Hash rkey = getContext().routingKeyGenerator().getRoutingKey(getState().getTarget());
// in a few releases, we can (and should) remove this,
// as routers will honor the above flag, and we want the table to include
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/ExploreKeySelectorJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/ExploreKeySelectorJob.java
index cb4f111f6..d1f489d6a 100644
--- a/router/java/src/net/i2p/router/networkdb/kademlia/ExploreKeySelectorJob.java
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/ExploreKeySelectorJob.java
@@ -8,11 +8,14 @@ package net.i2p.router.networkdb.kademlia;
*
*/
+import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
import net.i2p.data.Hash;
+import net.i2p.kademlia.KBucket;
+import net.i2p.kademlia.KBucketSet;
import net.i2p.router.JobImpl;
import net.i2p.router.RouterContext;
import net.i2p.util.Log;
@@ -29,6 +32,7 @@ class ExploreKeySelectorJob extends JobImpl {
private KademliaNetworkDatabaseFacade _facade;
private final static long RERUN_DELAY_MS = 60*1000;
+ private final static long OLD_BUCKET_TIME = 15*60*1000;
public ExploreKeySelectorJob(RouterContext context, KademliaNetworkDatabaseFacade facade) {
super(context);
@@ -42,7 +46,7 @@ class ExploreKeySelectorJob extends JobImpl {
requeue(30*RERUN_DELAY_MS);
return;
}
- Set toExplore = selectKeysToExplore();
+ Collection toExplore = selectKeysToExplore();
_log.info("Filling the explorer pool with: " + toExplore);
if (toExplore != null)
_facade.queueForExploration(toExplore);
@@ -54,33 +58,11 @@ class ExploreKeySelectorJob extends JobImpl {
* for it, with a maximum number of keys limited by the exploration pool size
*
*/
- private Set selectKeysToExplore() {
+ private Collection selectKeysToExplore() {
Set alreadyQueued = _facade.getExploreKeys();
- if (alreadyQueued.size() > KBucketSet.NUM_BUCKETS) return null;
- Set toExplore = new HashSet(KBucketSet.NUM_BUCKETS - alreadyQueued.size());
- for (int i = 0; i < KBucketSet.NUM_BUCKETS; i++) {
- KBucket bucket = _facade.getKBuckets().getBucket(i);
- if (bucket.getKeyCount() < KBucketSet.BUCKET_SIZE) {
- boolean already = false;
- for (Iterator iter = alreadyQueued.iterator(); iter.hasNext(); ) {
- Hash key = iter.next();
- if (bucket.shouldContain(key)) {
- already = true;
- _log.debug("Bucket " + i + " is already queued for exploration \t" + key);
- break;
- }
- }
- if (!already) {
- // no keys are queued for exploring this still-too-small bucket yet
- Hash key = bucket.generateRandomKey();
- _log.debug("Bucket " + i + " is NOT queued for exploration, and it only has " + bucket.getKeyCount() + " keys, so explore with \t" + key);
- toExplore.add(key);
- }
- } else {
- _log.debug("Bucket " + i + " already has enough keys (" + bucket.getKeyCount() + "), no need to explore further");
- }
- }
- return toExplore;
+ if (alreadyQueued.size() > KademliaNetworkDatabaseFacade.MAX_EXPLORE_QUEUE)
+ return null;
+ return _facade.getKBuckets().getExploreKeys(OLD_BUCKET_TIME);
}
}
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodOnlySearchJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodOnlySearchJob.java
index c07f3de9b..932748906 100644
--- a/router/java/src/net/i2p/router/networkdb/kademlia/FloodOnlySearchJob.java
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodOnlySearchJob.java
@@ -8,6 +8,7 @@ import java.util.List;
import net.i2p.data.Hash;
import net.i2p.data.i2np.DatabaseLookupMessage;
+import net.i2p.kademlia.KBucketSet;
import net.i2p.router.Job;
import net.i2p.router.MessageSelector;
import net.i2p.router.OutNetMessage;
@@ -70,7 +71,7 @@ class FloodOnlySearchJob extends FloodSearchJob {
//List floodfillPeers = _facade.getFloodfillPeers();
// new
List floodfillPeers;
- KBucketSet ks = _facade.getKBuckets();
+ KBucketSet ks = _facade.getKBuckets();
if (ks != null) {
Hash rkey = getContext().routingKeyGenerator().getRoutingKey(_key);
// Ideally we would add the key to an exclude list, so we don't try to query a ff peer for itself,
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillPeerSelector.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillPeerSelector.java
index 3e054ed0c..fdf47bf3c 100644
--- a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillPeerSelector.java
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillPeerSelector.java
@@ -20,6 +20,9 @@ import java.util.TreeSet;
import net.i2p.data.Hash;
import net.i2p.data.RouterAddress;
import net.i2p.data.RouterInfo;
+import net.i2p.kademlia.KBucketSet;
+import net.i2p.kademlia.SelectionCollector;
+import net.i2p.kademlia.XORComparator;
import net.i2p.router.RouterContext;
import net.i2p.router.peermanager.PeerProfile;
import net.i2p.router.util.RandomIterator;
@@ -53,7 +56,7 @@ class FloodfillPeerSelector extends PeerSelector {
* @return List of Hash for the peers selected
*/
@Override
- List selectMostReliablePeers(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) {
+ List selectMostReliablePeers(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) {
return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets, true);
}
@@ -68,7 +71,7 @@ class FloodfillPeerSelector extends PeerSelector {
* @return List of Hash for the peers selected
*/
@Override
- List selectNearestExplicitThin(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) {
+ List selectNearestExplicitThin(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) {
return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets, false);
}
@@ -81,7 +84,7 @@ class FloodfillPeerSelector extends PeerSelector {
* @param peersToIgnore can be null
* @return List of Hash for the peers selected
*/
- List selectNearestExplicitThin(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets, boolean preferConnected) {
+ List selectNearestExplicitThin(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets, boolean preferConnected) {
if (peersToIgnore == null)
peersToIgnore = Collections.singleton(_context.routerHash());
else
@@ -104,7 +107,7 @@ class FloodfillPeerSelector extends PeerSelector {
* List will not include our own hash.
* List is not sorted and not shuffled.
*/
- List selectFloodfillParticipants(KBucketSet kbuckets) {
+ List selectFloodfillParticipants(KBucketSet kbuckets) {
Set ignore = Collections.singleton(_context.routerHash());
return selectFloodfillParticipants(ignore, kbuckets);
}
@@ -116,7 +119,7 @@ class FloodfillPeerSelector extends PeerSelector {
* List MAY INCLUDE our own hash.
* List is not sorted and not shuffled.
*/
- private List selectFloodfillParticipants(Set toIgnore, KBucketSet kbuckets) {
+ private List selectFloodfillParticipants(Set toIgnore, KBucketSet kbuckets) {
/*****
if (kbuckets == null) return Collections.EMPTY_LIST;
// TODO this is very slow - use profile getPeersByCapability('f') instead
@@ -155,7 +158,7 @@ class FloodfillPeerSelector extends PeerSelector {
* success newer than failure
* Group 3: All others
*/
- List selectFloodfillParticipants(Hash key, int maxNumRouters, KBucketSet kbuckets) {
+ List selectFloodfillParticipants(Hash key, int maxNumRouters, KBucketSet kbuckets) {
Set ignore = Collections.singleton(_context.routerHash());
return selectFloodfillParticipants(key, maxNumRouters, ignore, kbuckets);
}
@@ -175,7 +178,7 @@ class FloodfillPeerSelector extends PeerSelector {
* @param toIgnore can be null
* @param kbuckets now unused
*/
- List selectFloodfillParticipants(Hash key, int howMany, Set toIgnore, KBucketSet kbuckets) {
+ List selectFloodfillParticipants(Hash key, int howMany, Set toIgnore, KBucketSet kbuckets) {
if (toIgnore == null) {
toIgnore = Collections.singleton(_context.routerHash());
} else if (!toIgnore.contains(_context.routerHash())) {
@@ -193,9 +196,9 @@ class FloodfillPeerSelector extends PeerSelector {
* @param toIgnore can be null
* @param kbuckets now unused
*/
- private List selectFloodfillParticipantsIncludingUs(Hash key, int howMany, Set toIgnore, KBucketSet kbuckets) {
+ private List selectFloodfillParticipantsIncludingUs(Hash key, int howMany, Set toIgnore, KBucketSet kbuckets) {
List ffs = selectFloodfillParticipants(toIgnore, kbuckets);
- TreeSet sorted = new TreeSet(new XORComparator(key));
+ TreeSet sorted = new TreeSet(new XORComparator(key));
sorted.addAll(ffs);
List rv = new ArrayList(howMany);
@@ -339,7 +342,7 @@ class FloodfillPeerSelector extends PeerSelector {
return Integer.valueOf(rv);
}
- private class FloodfillSelectionCollector implements SelectionCollector {
+ private class FloodfillSelectionCollector implements SelectionCollector {
private final TreeSet _sorted;
private final List _floodfillMatches;
private final Hash _key;
@@ -354,7 +357,7 @@ class FloodfillPeerSelector extends PeerSelector {
*/
public FloodfillSelectionCollector(Hash key, Set toIgnore, int wanted) {
_key = key;
- _sorted = new TreeSet(new XORComparator(key));
+ _sorted = new TreeSet(new XORComparator(key));
_floodfillMatches = new ArrayList(8);
_toIgnore = toIgnore;
_wanted = wanted;
@@ -475,7 +478,7 @@ class FloodfillPeerSelector extends PeerSelector {
* @return List of Hash for the peers selected, ordered
*/
@Override
- List selectNearest(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) {
+ List selectNearest(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) {
Hash rkey = _context.routingKeyGenerator().getRoutingKey(key);
if (peersToIgnore != null && peersToIgnore.contains(Hash.FAKE_HASH)) {
// return non-ff
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/IterativeSearchJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/IterativeSearchJob.java
index 868f0d0b8..5eee0109f 100644
--- a/router/java/src/net/i2p/router/networkdb/kademlia/IterativeSearchJob.java
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/IterativeSearchJob.java
@@ -16,6 +16,8 @@ import net.i2p.data.Hash;
import net.i2p.data.RouterInfo;
import net.i2p.data.i2np.DatabaseLookupMessage;
import net.i2p.data.i2np.I2NPMessage;
+import net.i2p.kademlia.KBucketSet;
+import net.i2p.kademlia.XORComparator;
import net.i2p.router.CommSystemFacade;
import net.i2p.router.Job;
import net.i2p.router.MessageSelector;
@@ -93,7 +95,7 @@ class IterativeSearchJob extends FloodSearchJob {
_timeoutMs = Math.min(timeoutMs, MAX_SEARCH_TIME);
_expiration = _timeoutMs + ctx.clock().now();
_rkey = ctx.routingKeyGenerator().getRoutingKey(key);
- _toTry = new TreeSet(new XORComparator(_rkey));
+ _toTry = new TreeSet(new XORComparator(_rkey));
_unheardFrom = new HashSet(CONCURRENT_SEARCHES);
_failedPeers = new HashSet(TOTAL_SEARCH_LIMIT);
_sentTime = new ConcurrentHashMap(TOTAL_SEARCH_LIMIT);
@@ -109,7 +111,7 @@ class IterativeSearchJob extends FloodSearchJob {
}
// pick some floodfill peers and send out the searches
List floodfillPeers;
- KBucketSet ks = _facade.getKBuckets();
+ KBucketSet ks = _facade.getKBuckets();
if (ks != null) {
// Ideally we would add the key to an exclude list, so we don't try to query a ff peer for itself,
// but we're passing the rkey not the key, so we do it below instead in certain cases.
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/KBucket.java b/router/java/src/net/i2p/router/networkdb/kademlia/KBucket.java
deleted file mode 100644
index b76b948ed..000000000
--- a/router/java/src/net/i2p/router/networkdb/kademlia/KBucket.java
+++ /dev/null
@@ -1,83 +0,0 @@
-package net.i2p.router.networkdb.kademlia;
-/*
- * free (adj.): unencumbered; not under the control of others
- * Written by jrandom in 2003 and released into the public domain
- * with no warranty of any kind, either expressed or implied.
- * It probably won't make your computer catch on fire, or eat
- * your children, but it might. Use at your own risk.
- *
- */
-
-import java.util.Set;
-
-import net.i2p.data.Hash;
-
-/**
- * Group, without inherent ordering, a set of keys a certain distance away from
- * a local key, using XOR as the distance metric
- *
- */
-interface KBucket {
- /**
- * lowest order high bit for difference keys
- */
- public int getRangeBegin();
- /**
- * highest high bit for the difference keys
- *
- */
- public int getRangeEnd();
- /**
- * Set the range low and high bits for difference keys
- */
- public void setRange(int lowOrderBitLimit, int highOrderBitLimit);
- /**
- * Number of keys already contained in this kbuckey
- */
- public int getKeyCount();
- /**
- * whether or not the key qualifies as part of this bucket
- *
- */
- public boolean shouldContain(Hash key);
- /**
- * Add the peer to the bucket
- *
- * @return number of keys in the bucket after the addition
- */
- public int add(Hash key);
- /**
- * Remove the key from the bucket
- * @return true if the key existed in the bucket before removing it, else false
- */
- public boolean remove(Hash key);
-
- /**
- * Retrieve all routing table entries stored in the bucket
- * @return set of Hash structures
- */
- public Set getEntries();
-
- /**
- * Retrieve hashes stored in the bucket, excluding the ones specified
- * @return set of Hash structures
- * @deprecated makes a copy, remove toIgnore in KBS instead
- */
- public Set getEntries(Set toIgnoreHashes);
-
- public void getEntries(SelectionCollector collector);
-
- /**
- * Fill the bucket with entries
- * @param entries set of Hash structures
- */
- public void setEntries(Set entries);
-
- /**
- * Generate a random key that would go inside this bucket
- *
- */
- public Hash generateRandomKey();
-
- public LocalHash getLocal();
-}
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/KBucketImpl.java b/router/java/src/net/i2p/router/networkdb/kademlia/KBucketImpl.java
deleted file mode 100644
index 5b6189918..000000000
--- a/router/java/src/net/i2p/router/networkdb/kademlia/KBucketImpl.java
+++ /dev/null
@@ -1,474 +0,0 @@
-package net.i2p.router.networkdb.kademlia;
-/*
- * free (adj.): unencumbered; not under the control of others
- * Written by jrandom in 2003 and released into the public domain
- * with no warranty of any kind, either expressed or implied.
- * It probably won't make your computer catch on fire, or eat
- * your children, but it might. Use at your own risk.
- *
- */
-
-import java.math.BigInteger;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-
-import net.i2p.I2PAppContext;
-import net.i2p.data.DataHelper;
-import net.i2p.data.Hash;
-import net.i2p.util.ConcurrentHashSet;
-import net.i2p.util.Log;
-import net.i2p.util.RandomSource;
-
-class KBucketImpl implements KBucket {
- private Log _log;
- /**
- * set of Hash objects for the peers in the kbucketx
- *
- * jrandom switched from a HashSet to an ArrayList with this change:
- * 2005-08-27 jrandom
- * * Minor logging and optimization tweaks in the router and SDK
- *
- * Now we switch back to a ConcurrentHashSet and remove all the
- * synchronization, which may or may not be faster than
- * a synchronized ArrayList, with checks for existence before
- * adding a Hash. But the other benefit is it removes one
- * cause of profileMangager/netDb deadlock.
- */
- private final Set _entries;
- /** we center the kbucket set on the given hash, and derive distances from this */
- private LocalHash _local;
- /** include if any bits equal or higher to this bit (in big endian order) */
- private int _begin;
- /** include if no bits higher than this bit (inclusive) are set */
- private int _end;
- /** when did we last shake things up */
- private long _lastShuffle;
- private I2PAppContext _context;
-
- public KBucketImpl(I2PAppContext context, LocalHash local) {
- _context = context;
- _log = context.logManager().getLog(KBucketImpl.class);
- _entries = new ConcurrentHashSet(2); //all but the last 1 or 2 buckets will be empty
- _lastShuffle = context.clock().now();
- setLocal(local);
- }
-
- /** for testing - use above constructor for production to get common caching */
- public KBucketImpl(I2PAppContext context, Hash local) {
- this(context, new LocalHash(local));
- }
-
- public int getRangeBegin() { return _begin; }
- public int getRangeEnd() { return _end; }
- public void setRange(int lowOrderBitLimit, int highOrderBitLimit) {
- _begin = lowOrderBitLimit;
- _end = highOrderBitLimit;
- }
- public int getKeyCount() {
- return _entries.size();
- }
-
- public LocalHash getLocal() { return _local; }
- private void setLocal(LocalHash local) {
- _local = local;
- // we want to make sure we've got the cache in place before calling cachedXor
- _local.prepareCache();
- if (_log.shouldLog(Log.DEBUG))
- _log.debug("Local hash reset to " + DataHelper.toHexString(local.getData()));
- }
-
- private byte[] distanceFromLocal(Hash key) {
- if (key == null)
- throw new IllegalArgumentException("Null key for distanceFromLocal?");
- return _local.cachedXor(key);
- }
-
- public boolean shouldContain(Hash key) {
- byte distance[] = distanceFromLocal(key);
- // rather than use a BigInteger and compare, we do it manually by
- // checking the bits
- boolean tooLarge = distanceIsTooLarge(distance);
- if (tooLarge) {
- if (false && _log.shouldLog(Log.DEBUG))
- _log.debug("too large [" + _begin + "-->" + _end + "] "
- + "\nLow: " + BigInteger.ZERO.setBit(_begin).toString(16)
- + "\nCur: " + DataHelper.toHexString(distance)
- + "\nHigh: " + BigInteger.ZERO.setBit(_end).toString(16));
- return false;
- }
- boolean tooSmall = distanceIsTooSmall(distance);
- if (tooSmall) {
- if (_log.shouldLog(Log.DEBUG))
- _log.debug("too small [" + _begin + "-->" + _end + "] distance: " + DataHelper.toHexString(distance));
- return false;
- }
- // this bed is juuuuust right
- return true;
-
- /*
- // woohah, incredibly excessive object creation! whee!
- BigInteger kv = new BigInteger(1, distanceFromLocal(key));
- int lowComp = kv.compareTo(_lowerBounds);
- int highComp = kv.compareTo(_upperBounds);
-
- //_log.debug("kv.compareTo(low) = " + lowComp + " kv.compareTo(high) " + highComp);
-
- if ( (lowComp >= 0) && (highComp < 0) ) return true;
- return false;
- */
- }
-
- private final boolean distanceIsTooLarge(byte distance[]) {
- int upperLimitBit = Hash.HASH_LENGTH*8 - _end;
- // It is too large if there are any bits set before the upperLimitBit
- int upperLimitByte = upperLimitBit > 0 ? upperLimitBit / 8 : 0;
-
- if (upperLimitBit <= 0)
- return false;
-
- for (int i = 0; i < distance.length; i++) {
- if (i < upperLimitByte) {
- if (distance[i] != 0x00) {
- // outright too large
- return true;
- }
- } else if (i == upperLimitByte) {
- if (distance[i] == 0x00) {
- // no bits set through the high bit
- return false;
- } else {
- int upperVal = 1 << (upperLimitBit % 8);
- if (distance[i] > upperVal) {
- // still too large, but close
- return true;
- } else if (distance[i] == upperVal) {
- // ok, it *may* equal the upper limit,
- // if the rest of the bytes are 0
- for (int j = i+1; j < distance.length; j++) {
- if (distance[j] != 0x00) {
- // nope
- return true;
- }
- }
- // w00t, the rest is made of 0x00 bytes, so it
- // exactly matches the upper limit. kooky, very improbable,
- // but possible
- return false;
- }
- }
- } else if (i > upperLimitByte) {
- // no bits set before or at the upper limit, so its
- // definitely not too large
- return false;
- }
- }
- _log.log(Log.CRIT, "wtf, gravity broke: distance=" + DataHelper.toHexString(distance)
- + ", end=" + _end, new Exception("moo"));
- return true;
- }
-
- /**
- * Is the distance too small?
- *
- */
- private final boolean distanceIsTooSmall(byte distance[]) {
- int beginBit = Hash.HASH_LENGTH*8 - _begin;
- // It is too small if there are no bits set before the beginBit
- int beginByte = beginBit > 0 ? beginBit / 8 : 0;
-
- if (beginByte >= distance.length) {
- if (_begin == 0)
- return false;
- else
- return true;
- }
-
- for (int i = 0; i < distance.length; i++) {
- if ( (i < beginByte) && (distance[i] != 0x00) ) {
- return false;
- } else {
- if (i != beginByte) {
- // zero value and too early... keep going
- continue;
- } else {
- int beginVal = 1 << (_begin % 8);
- if (distance[i] >= beginVal) {
- return false;
- } else {
- // no bits set prior to the beginVal
- return true;
- }
- }
- }
- }
- _log.log(Log.CRIT, "wtf, gravity broke! distance=" + DataHelper.toHexString(distance)
- + " begin=" + _begin
- + " beginBit=" + beginBit
- + " beginByte=" + beginByte, new Exception("moo"));
- return true;
- }
-
- /**
- * @return unmodifiable view
- */
- public Set getEntries() {
- return Collections.unmodifiableSet(_entries);
- }
-
- /**
- * @deprecated makes a copy, remove toIgnore in KBS instead
- */
- public Set getEntries(Set toIgnoreHashes) {
- Set entries = new HashSet(_entries);
- entries.removeAll(toIgnoreHashes);
- return entries;
- }
-
- public void getEntries(SelectionCollector collector) {
- for (Hash h : _entries) {
- collector.add(h);
- }
- }
-
- public void setEntries(Set entries) {
- _entries.clear();
- _entries.addAll(entries);
- }
-
- /**
- * Todo: shuffling here is a hack and doesn't work since
- * we switched back to a HashSet implementation
- */
- public int add(Hash peer) {
- _entries.add(peer);
-/**********
- // Randomize the bucket every once in a while if we are floodfill, so that
- // exploration will return better results. See FloodfillPeerSelector.add(Hash).
- if (_lastShuffle + SHUFFLE_DELAY < _context.clock().now() &&
- !SearchJob.onlyQueryFloodfillPeers((RouterContext)_context)) {
- Collections.shuffle(_entries, _context.random());
- _lastShuffle = _context.clock().now();
- }
-***********/
- return _entries.size();
- }
-
- public boolean remove(Hash peer) {
- return _entries.remove(peer);
- }
-
- /**
- * Generate a random key to go within this bucket
- *
- * WARNING - Something is seriously broken here. testRand2() fails right away.
- * ExploreKeySelectorJob is now disabled, ExploreJob just searches for a random
- * key instead.
- */
- public Hash generateRandomKey() {
- BigInteger variance = new BigInteger((_end-_begin)-1, _context.random());
- variance = variance.setBit(_begin);
- //_log.debug("Random variance for " + _size + " bits: " + variance);
- byte data[] = variance.toByteArray();
- byte hash[] = new byte[Hash.HASH_LENGTH];
- if (data.length <= Hash.HASH_LENGTH) {
- System.arraycopy(data, 0, hash, hash.length - data.length, data.length);
- } else {
- System.arraycopy(data, data.length - hash.length, hash, 0, hash.length);
- }
- Hash key = new Hash(hash);
- data = distanceFromLocal(key);
- hash = new byte[Hash.HASH_LENGTH];
- if (data.length <= Hash.HASH_LENGTH) {
- System.arraycopy(data, 0, hash, hash.length - data.length, data.length);
- } else {
- System.arraycopy(data, data.length - hash.length, hash, 0, hash.length);
- }
- key = new Hash(hash);
- return key;
- }
-
- public Hash getRangeBeginKey() {
- BigInteger lowerBounds = getLowerBounds();
- if ( (_local != null) && (_local.getData() != null) ) {
- lowerBounds = lowerBounds.xor(new BigInteger(1, _local.getData()));
- }
-
- byte data[] = lowerBounds.toByteArray();
- byte hash[] = new byte[Hash.HASH_LENGTH];
- if (data.length <= Hash.HASH_LENGTH) {
- System.arraycopy(data, 0, hash, hash.length - data.length, data.length);
- } else {
- System.arraycopy(data, data.length - hash.length, hash, 0, hash.length);
- }
- Hash key = new Hash(hash);
- return key;
- }
-
- public Hash getRangeEndKey() {
- BigInteger upperBounds = getUpperBounds();
- if ( (_local != null) && (_local.getData() != null) ) {
- upperBounds = upperBounds.xor(new BigInteger(1, _local.getData()));
- }
- byte data[] = upperBounds.toByteArray();
- byte hash[] = new byte[Hash.HASH_LENGTH];
- if (data.length <= Hash.HASH_LENGTH) {
- System.arraycopy(data, 0, hash, hash.length - data.length, data.length);
- } else {
- System.arraycopy(data, data.length - hash.length, hash, 0, hash.length);
- }
- Hash key = new Hash(hash);
- return key;
- }
-
- private BigInteger getUpperBounds() {
- return BigInteger.ZERO.setBit(_end);
- }
- private BigInteger getLowerBounds() {
- if (_begin == 0)
- return BigInteger.ZERO;
- else
- return BigInteger.ZERO.setBit(_begin);
- }
-
- @Override
- public String toString() {
- StringBuilder buf = new StringBuilder(1024);
- buf.append("KBucketImpl: ");
- buf.append(_entries.toString()).append("\n");
- buf.append("Low bit: ").append(_begin).append(" high bit: ").append(_end).append('\n');
- buf.append("Local key: \n");
- if ( (_local != null) && (_local.getData() != null) )
- buf.append(toString(_local.getData())).append('\n');
- else
- buf.append("[undefined]\n");
- buf.append("Low and high keys:\n");
- buf.append(toString(getRangeBeginKey().getData())).append('\n');
- buf.append(toString(getRangeEndKey().getData())).append('\n');
- buf.append("Low and high deltas:\n");
- buf.append(getLowerBounds().toString(2)).append('\n');
- buf.append(getUpperBounds().toString(2)).append('\n');
- return buf.toString();
- }
-
- /**
- * Test harness to make sure its assigning keys to the right buckets
- *
- * WARNING - Something is seriously broken here. testRand2() fails right away.
- */
- public static void main(String args[]) {
- testRand2();
- testRand();
- testLimits();
-
- try { Thread.sleep(10000); } catch (InterruptedException ie) {}
- }
-
- private static void testLimits() {
- int low = 1;
- int high = 3;
- Log log = I2PAppContext.getGlobalContext().logManager().getLog(KBucketImpl.class);
- KBucketImpl bucket = new KBucketImpl(I2PAppContext.getGlobalContext(), Hash.FAKE_HASH);
- bucket.setRange(low, high);
- Hash lowerBoundKey = bucket.getRangeBeginKey();
- Hash upperBoundKey = bucket.getRangeEndKey();
- boolean okLow = bucket.shouldContain(lowerBoundKey);
- boolean okHigh = bucket.shouldContain(upperBoundKey);
- if (okLow && okHigh)
- log.debug("Limit test ok");
- else
- log.error("Limit test failed! ok low? " + okLow + " ok high? " + okHigh);
- }
-
- private static void testRand() {
- //StringBuilder buf = new StringBuilder(2048);
- int low = 1;
- int high = 3;
- Log log = I2PAppContext.getGlobalContext().logManager().getLog(KBucketImpl.class);
- LocalHash local = new LocalHash(Hash.FAKE_HASH);
- local.prepareCache();
- KBucketImpl bucket = new KBucketImpl(I2PAppContext.getGlobalContext(), local);
- bucket.setRange(low, high);
- //Hash lowerBoundKey = bucket.getRangeBeginKey();
- //Hash upperBoundKey = bucket.getRangeEndKey();
- for (int i = 0; i < 100000; i++) {
- Hash rnd = bucket.generateRandomKey();
- //buf.append(toString(rnd.getData())).append('\n');
- boolean ok = bucket.shouldContain(rnd);
- if (!ok) {
- //byte diff[] = bucket.getLocal().cachedXor(rnd);
- //BigInteger dv = new BigInteger(1, diff);
- //log.error("WTF! bucket doesn't want: \n" + toString(rnd.getData())
- // + "\nDelta: \n" + toString(diff) + "\nDelta val: \n" + dv.toString(2)
- // + "\nBucket: \n"+bucket, new Exception("WTF"));
- log.error("wtf, bucket doesnt want a key that it generated. i == " + i);
- log.error("\nLow: " + DataHelper.toHexString(bucket.getRangeBeginKey().getData())
- + "\nVal: " + DataHelper.toHexString(rnd.getData())
- + "\nHigh:" + DataHelper.toHexString(bucket.getRangeEndKey().getData()));
- try { Thread.sleep(1000); } catch (InterruptedException e) {}
- System.exit(0);
- } else {
- //_log.debug("Ok, bucket wants: \n" + toString(rnd.getData()));
- }
- //_log.info("Low/High:\n" + toString(lowBounds.toByteArray()) + "\n" + toString(highBounds.toByteArray()));
- }
- log.info("Passed 100,000 random key generations against the null hash");
- }
-
- private static void testRand2() {
- Log log = I2PAppContext.getGlobalContext().logManager().getLog(KBucketImpl.class);
- int low = 1;
- int high = 200;
- byte hash[] = new byte[Hash.HASH_LENGTH];
- RandomSource.getInstance().nextBytes(hash);
- LocalHash local = new LocalHash(hash);
- local.prepareCache();
- KBucketImpl bucket = new KBucketImpl(I2PAppContext.getGlobalContext(), local);
- bucket.setRange(low, high);
- //Hash lowerBoundKey = bucket.getRangeBeginKey();
- //Hash upperBoundKey = bucket.getRangeEndKey();
- for (int i = 0; i < 100000; i++) {
- Hash rnd = bucket.generateRandomKey();
- //buf.append(toString(rnd.getData())).append('\n');
- boolean ok = bucket.shouldContain(rnd);
- if (!ok) {
- //byte diff[] = bucket.getLocal().cachedXor(rnd);
- //BigInteger dv = new BigInteger(1, diff);
- //log.error("WTF! bucket doesn't want: \n" + toString(rnd.getData())
- // + "\nDelta: \n" + toString(diff) + "\nDelta val: \n" + dv.toString(2)
- // + "\nBucket: \n"+bucket, new Exception("WTF"));
- log.error("wtf, bucket doesnt want a key that it generated. i == " + i);
- log.error("\nLow: " + DataHelper.toHexString(bucket.getRangeBeginKey().getData())
- + "\nVal: " + DataHelper.toHexString(rnd.getData())
- + "\nHigh:" + DataHelper.toHexString(bucket.getRangeEndKey().getData()));
- try { Thread.sleep(1000); } catch (InterruptedException e) {}
- System.exit(0);
- } else {
- //_log.debug("Ok, bucket wants: \n" + toString(rnd.getData()));
- }
- }
- log.info("Passed 100,000 random key generations against a random hash");
- }
-
- private final static String toString(byte b[]) {
- if (true) return DataHelper.toHexString(b);
- StringBuilder buf = new StringBuilder(b.length);
- for (int i = 0; i < b.length; i++) {
- buf.append(toString(b[i]));
- buf.append(" ");
- }
- return buf.toString();
- }
-
- private final static String toString(byte b) {
- StringBuilder buf = new StringBuilder(8);
- for (int i = 7; i >= 0; i--) {
- boolean bb = (0 != (b & (1<= 0) {
- int oldSize = _buckets[bucket].getKeyCount();
- int numInBucket = _buckets[bucket].add(peer);
- if (numInBucket != oldSize)
- _size.incrementAndGet();
- if (numInBucket > BUCKET_SIZE) {
- // perhaps queue up coalesce job? naaahh.. lets let 'er grow for now
- }
- if (_log.shouldLog(Log.DEBUG))
- _log.debug("Peer " + peer + " added to bucket " + bucket);
- return oldSize != numInBucket;
- } else {
- throw new IllegalArgumentException("Unable to pick a bucket. wtf!");
- }
- }
-
- /**
- * Not an exact count (due to concurrency issues) but generally correct
- *
- */
- public int size() {
- return _size.get();
- /*
- int size = 0;
- for (int i = 0; i < _buckets.length; i++)
- size += _buckets[i].getKeyCount();
- return size;
- */
- }
-
- public boolean remove(Hash entry) {
- int bucket = pickBucket(entry);
- KBucket kbucket = getBucket(bucket);
- boolean removed = kbucket.remove(entry);
- if (removed)
- _size.decrementAndGet();
- return removed;
- }
-
- /** @since 0.8.8 */
- public void clear() {
- for (int i = 0; i < _buckets.length; i++) {
- _buckets[i].setEntries(Collections. emptySet());
- }
- _size.set(0);
- _us.clearXorCache();
- }
-
- public Set getAll() { return getAll(Collections. emptySet()); };
-
- public Set getAll(Set toIgnore) {
- Set all = new HashSet(1024);
- for (int i = 0; i < _buckets.length; i++) {
- all.addAll(_buckets[i].getEntries());
- }
- all.removeAll(toIgnore);
- return all;
- }
-
- public void getAll(SelectionCollector collector) {
- long start = _context.clock().now();
- for (int i = 0; i < _buckets.length; i++)
- _buckets[i].getEntries(collector);
- _context.statManager().addRateData("netDb.KBSGetAllTime", _context.clock().now() - start, 0);
- }
-
- public int pickBucket(Hash key) {
- for (int i = 0; i < NUM_BUCKETS; i++) {
- if (_buckets[i].shouldContain(key))
- return i;
- }
- _log.error("Key does not fit in any bucket?! WTF!\nKey : ["
- + DataHelper.toHexString(key.getData()) + "]"
- + "\nUs : [" + toString(_us.getData()) + "]"
- + "\nDelta: ["
- + DataHelper.toHexString(DataHelper.xor(_us.getData(), key.getData()))
- + "]", new Exception("WTF"));
- displayBuckets();
- return -1;
- }
-
- public KBucket getBucket(int bucket) { return _buckets[bucket]; }
-
- protected KBucket[] createBuckets() {
- KBucket[] buckets = new KBucket[NUM_BUCKETS];
- for (int i = 0; i < NUM_BUCKETS-1; i++) {
- buckets[i] = createBucket(i*BASE, (i+1)*BASE);
- }
- buckets[NUM_BUCKETS-1] = createBucket(BASE*(NUM_BUCKETS-1), BASE*(NUM_BUCKETS) + 1);
- return buckets;
- }
-
- protected KBucket createBucket(int start, int end) {
- KBucket bucket = new KBucketImpl(_context, _us);
- bucket.setRange(start, end);
- _log.debug("Creating a bucket from " + start + " to " + (end));
- return bucket;
- }
-
- public void displayBuckets() {
- _log.info(toString());
- }
-
- @Override
- public String toString() {
- BigInteger us = new BigInteger(1, _us.getData());
- StringBuilder buf = new StringBuilder(1024);
- buf.append("Bucket set rooted on: ").append(us.toString()).append(" (aka ").append(us.toString(2)).append("): \n");
- for (int i = 0; i < NUM_BUCKETS; i++) {
- buf.append("* Bucket ").append(i).append("/").append(NUM_BUCKETS-1).append(": )\n");
- buf.append("Start: ").append("2^").append(_buckets[i].getRangeBegin()).append(")\n");
- buf.append("End: ").append("2^").append(_buckets[i].getRangeEnd()).append(")\n");
- buf.append("Contents:").append(_buckets[i].toString()).append("\n");
- }
-
- return buf.toString();
- }
-
- final static String toString(byte b[]) {
- byte val[] = new byte[Hash.HASH_LENGTH];
- if (b.length < 32)
- System.arraycopy(b, 0, val, Hash.HASH_LENGTH-b.length-1, b.length);
- else
- System.arraycopy(b, Hash.HASH_LENGTH-b.length, val, 0, val.length);
- StringBuilder buf = new StringBuilder(KEYSIZE_BITS);
- for (int i = 0; i < val.length; i++) {
- for (int j = 7; j >= 0; j--) {
- boolean bb = (0 != (val[i] & (1< _kb; // peer hashes sorted into kbuckets, but within kbuckets, unsorted
private DataStore _ds; // hash to DataStructure mapping, persisted when necessary
/** where the data store is pushing the data */
private String _dbDir;
@@ -132,7 +137,14 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
*/
protected final static long PUBLISH_JOB_DELAY = 5*60*1000l;
- private static final int MAX_EXPLORE_QUEUE = 128;
+ static final int MAX_EXPLORE_QUEUE = 128;
+
+ /**
+ * kad K
+ * Was 500 in old implementation but that was with B ~= -8!
+ */
+ private static final int BUCKET_SIZE = 16;
+ private static final int KAD_B = 3;
public KademliaNetworkDatabaseFacade(RouterContext context) {
_context = context;
@@ -168,7 +180,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
return _reseedChecker;
}
- KBucketSet getKBuckets() { return _kb; }
+ KBucketSet getKBuckets() { return _kb; }
DataStore getDataStore() { return _ds; }
long getLastExploreNewDate() { return _lastExploreNew; }
@@ -185,13 +197,13 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
return Collections.unmodifiableSet(_exploreKeys);
}
- public void removeFromExploreKeys(Set toRemove) {
+ public void removeFromExploreKeys(Collection toRemove) {
if (!_initialized) return;
_exploreKeys.removeAll(toRemove);
_context.statManager().addRateData("netDb.exploreKeySet", _exploreKeys.size(), 0);
}
- public void queueForExploration(Set keys) {
+ public void queueForExploration(Collection keys) {
if (!_initialized) return;
for (Iterator iter = keys.iterator(); iter.hasNext() && _exploreKeys.size() < MAX_EXPLORE_QUEUE; ) {
_exploreKeys.add(iter.next());
@@ -240,7 +252,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
_log.info("Starting up the kademlia network database");
RouterInfo ri = _context.router().getRouterInfo();
String dbDir = _context.getProperty(PROP_DB_DIR, DEFAULT_DB_DIR);
- _kb = new KBucketSet(_context, ri.getIdentity().getHash());
+ _kb = new KBucketSet(_context, ri.getIdentity().getHash(),
+ BUCKET_SIZE, KAD_B, new RejectTrimmer());
try {
_ds = new PersistentDataStore(_context, dbDir, this);
} catch (IOException ioe) {
@@ -368,7 +381,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
return count.size();
}
- private class CountRouters implements SelectionCollector {
+ private class CountRouters implements SelectionCollector {
private int _count;
public int size() { return _count; }
public void add(Hash entry) {
@@ -1042,4 +1055,13 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
}
_context.jobQueue().addJob(new StoreJob(_context, this, key, ds, onSuccess, onFailure, sendTimeout, toIgnore));
}
+
+ /**
+ * Debug info, HTML formatted
+ * @since 0.9.10
+ */
+ @Override
+ public void renderStatusHTML(Writer out) throws IOException {
+ out.write(_kb.toString().replace("\n", "
\n"));
+ }
}
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java b/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java
index 1dc2de75e..f73eb00fe 100644
--- a/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java
@@ -17,6 +17,8 @@ import java.util.TreeMap;
import net.i2p.data.Hash;
import net.i2p.data.RouterInfo;
+import net.i2p.kademlia.KBucketSet;
+import net.i2p.kademlia.SelectionCollector;
import net.i2p.router.RouterContext;
import net.i2p.router.util.HashDistance;
import net.i2p.util.Log;
@@ -41,7 +43,7 @@ class PeerSelector {
*
* @return ordered list of Hash objects
*/
- List selectMostReliablePeers(Hash key, int numClosest, Set alreadyChecked, KBucketSet kbuckets) {
+ List selectMostReliablePeers(Hash key, int numClosest, Set alreadyChecked, KBucketSet kbuckets) {
// get the peers closest to the key
return selectNearestExplicit(key, numClosest, alreadyChecked, kbuckets);
}
@@ -54,7 +56,7 @@ class PeerSelector {
*
* @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined)
*/
- List selectNearestExplicit(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) {
+ List selectNearestExplicit(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) {
//if (true)
return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets);
@@ -94,7 +96,7 @@ class PeerSelector {
*
* @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined)
*/
- List selectNearestExplicitThin(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) {
+ List selectNearestExplicitThin(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) {
if (peersToIgnore == null)
peersToIgnore = new HashSet(1);
peersToIgnore.add(_context.routerHash());
@@ -109,7 +111,7 @@ class PeerSelector {
}
/** UNUSED */
- private class MatchSelectionCollector implements SelectionCollector {
+ private class MatchSelectionCollector implements SelectionCollector {
private TreeMap _sorted;
private Hash _key;
private Set _toIgnore;
@@ -200,7 +202,7 @@ class PeerSelector {
* @param peersToIgnore can be null
* @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined)
*/
- List selectNearest(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) {
+ List selectNearest(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) {
// sure, this may not be exactly correct per kademlia (peers on the border of a kbucket in strict kademlia
// would behave differently) but I can see no reason to keep around an /additional/ more complicated algorithm.
// later if/when selectNearestExplicit gets costly, we may revisit this (since kbuckets let us cache the distance()
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/SearchState.java b/router/java/src/net/i2p/router/networkdb/kademlia/SearchState.java
index 9a4f99c43..62343ae9e 100644
--- a/router/java/src/net/i2p/router/networkdb/kademlia/SearchState.java
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/SearchState.java
@@ -10,6 +10,7 @@ import java.util.Set;
import java.util.TreeSet;
import net.i2p.data.Hash;
+import net.i2p.kademlia.XORComparator;
import net.i2p.router.RouterContext;
/**
@@ -61,7 +62,7 @@ class SearchState {
private Set locked_getClosest(Set peers, int max, Hash target) {
if (_attemptedPeers.size() <= max)
return new HashSet(_attemptedPeers);
- TreeSet closest = new TreeSet(new XORComparator(target));
+ TreeSet closest = new TreeSet(new XORComparator(target));
closest.addAll(_attemptedPeers);
Set rv = new HashSet(max);
int i = 0;
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/SelectionCollector.java b/router/java/src/net/i2p/router/networkdb/kademlia/SelectionCollector.java
deleted file mode 100644
index 020da4de4..000000000
--- a/router/java/src/net/i2p/router/networkdb/kademlia/SelectionCollector.java
+++ /dev/null
@@ -1,10 +0,0 @@
-package net.i2p.router.networkdb.kademlia;
-
-import net.i2p.data.Hash;
-
-/**
- * Visit kbuckets, gathering matches
- */
-interface SelectionCollector {
- public void add(Hash entry);
-}
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java
index 72f6315ae..d0f3c6ddc 100644
--- a/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java
@@ -19,6 +19,7 @@ import net.i2p.data.RouterInfo;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.i2np.I2NPMessage;
+import net.i2p.kademlia.KBucketSet;
import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.OutNetMessage;
@@ -233,7 +234,7 @@ class StoreJob extends JobImpl {
private List getClosestFloodfillRouters(Hash key, int numClosest, Set alreadyChecked) {
Hash rkey = getContext().routingKeyGenerator().getRoutingKey(key);
- KBucketSet ks = _facade.getKBuckets();
+ KBucketSet ks = _facade.getKBuckets();
if (ks == null) return new ArrayList();
return ((FloodfillPeerSelector)_peerSelector).selectFloodfillParticipants(rkey, numClosest, alreadyChecked, ks);
}
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/XORComparator.java b/router/java/src/net/i2p/router/networkdb/kademlia/XORComparator.java
deleted file mode 100644
index cd734b685..000000000
--- a/router/java/src/net/i2p/router/networkdb/kademlia/XORComparator.java
+++ /dev/null
@@ -1,36 +0,0 @@
-package net.i2p.router.networkdb.kademlia;
-
-import java.util.Comparator;
-
-import net.i2p.data.Hash;
-
-/**
- * Help sort Hashes in relation to a base key using the XOR metric.
- */
-class XORComparator implements Comparator {
- private final byte[] _base;
-
- /**
- * @param target key to compare distances with
- */
- public XORComparator(Hash target) {
- _base = target.getData();
- }
-
- /**
- * getData() of args must be non-null
- */
- public int compare(Hash lhs, Hash rhs) {
- byte lhsb[] = lhs.getData();
- byte rhsb[] = rhs.getData();
- for (int i = 0; i < _base.length; i++) {
- int ld = (lhsb[i] ^ _base[i]) & 0xff;
- int rd = (rhsb[i] ^ _base[i]) & 0xff;
- if (ld < rd)
- return -1;
- if (ld > rd)
- return 1;
- }
- return 0;
- }
-}