* Hash: Move caching XOR methods only used by KBucket into netdb

This commit is contained in:
zzz
2010-05-15 14:21:31 +00:00
parent e1eafa2394
commit 2c26b8d422
5 changed files with 222 additions and 185 deletions

View File

@@ -76,5 +76,5 @@ interface KBucket {
*/
public Hash generateRandomKey();
public Hash getLocal();
public LocalHash getLocal();
}

View File

@@ -39,7 +39,7 @@ class KBucketImpl implements KBucket {
*/
private final Set<Hash> _entries;
/** we center the kbucket set on the given hash, and derive distances from this */
private Hash _local;
private LocalHash _local;
/** include if any bits equal or higher to this bit (in big endian order) */
private int _begin;
/** include if no bits higher than this bit (inclusive) are set */
@@ -49,7 +49,7 @@ class KBucketImpl implements KBucket {
private static final int SHUFFLE_DELAY = 10*60*1000;
private I2PAppContext _context;
public KBucketImpl(I2PAppContext context, Hash local) {
public KBucketImpl(I2PAppContext context, LocalHash local) {
_context = context;
_log = context.logManager().getLog(KBucketImpl.class);
_entries = new ConcurrentHashSet(2); //all but the last 1 or 2 buckets will be empty
@@ -57,6 +57,11 @@ class KBucketImpl implements KBucket {
setLocal(local);
}
/** for testing - use above constructor for production to get common caching */
public KBucketImpl(I2PAppContext context, Hash local) {
this(context, new LocalHash(local));
}
public int getRangeBegin() { return _begin; }
public int getRangeEnd() { return _end; }
public void setRange(int lowOrderBitLimit, int highOrderBitLimit) {
@@ -67,8 +72,8 @@ class KBucketImpl implements KBucket {
return _entries.size();
}
public Hash getLocal() { return _local; }
private void setLocal(Hash local) {
public LocalHash getLocal() { return _local; }
private void setLocal(LocalHash local) {
_local = local;
// we want to make sure we've got the cache in place before calling cachedXor
_local.prepareCache();
@@ -378,7 +383,7 @@ class KBucketImpl implements KBucket {
int low = 1;
int high = 3;
Log log = I2PAppContext.getGlobalContext().logManager().getLog(KBucketImpl.class);
Hash local = Hash.FAKE_HASH;
LocalHash local = new LocalHash(Hash.FAKE_HASH);
local.prepareCache();
KBucketImpl bucket = new KBucketImpl(I2PAppContext.getGlobalContext(), local);
bucket.setRange(low, high);
@@ -415,7 +420,7 @@ class KBucketImpl implements KBucket {
int high = 200;
byte hash[] = new byte[Hash.HASH_LENGTH];
RandomSource.getInstance().nextBytes(hash);
Hash local = new Hash(hash);
LocalHash local = new LocalHash(hash);
local.prepareCache();
KBucketImpl bucket = new KBucketImpl(I2PAppContext.getGlobalContext(), local);
bucket.setRange(low, high);

View File

@@ -27,7 +27,7 @@ import net.i2p.util.Log;
class KBucketSet {
private Log _log;
private I2PAppContext _context;
private Hash _us;
private LocalHash _us;
private KBucket _buckets[];
private volatile int _size;
@@ -38,7 +38,7 @@ class KBucketSet {
public final static int BUCKET_SIZE = 500; // # values at which we start periodic trimming (500 ~= 250Kb)
public KBucketSet(I2PAppContext context, Hash us) {
_us = us;
_us = new LocalHash(us);
_context = context;
_log = context.logManager().getLog(KBucketSet.class);
createBuckets();

View File

@@ -0,0 +1,208 @@
package net.i2p.router.networkdb.kademlia;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.util.Log;
/**
* Pull the caching used only by KBucketImpl out of Hash and put it here.
*
* @since 0.7.14
* @author jrandom
* @author moved from Hash.java by zzz
*/
class LocalHash extends Hash {
private final static Log _log = new Log(LocalHash.class);
private /* FIXME final FIXME */ Map _xorCache;
private static final int MAX_CACHED_XOR = 1024;
public LocalHash(Hash h) {
super(h.getData());
}
public LocalHash(byte[] b) {
super(b);
}
/**
* Prepare this hash's cache for xor values - very few hashes will need it,
* so we don't want to waste the memory, and lazy initialization would incur
* online overhead to verify the initialization.
*
*/
public void prepareCache() {
synchronized (this) {
if (_xorCache == null)
_xorCache = new HashMap(MAX_CACHED_XOR);
}
}
/**
* Calculate the xor with the current object and the specified hash,
* caching values where possible. Currently this keeps up to MAX_CACHED_XOR
* (1024) entries, and uses an essentially random ejection policy. Later
* perhaps go for an LRU or FIFO?
*
* @throws IllegalStateException if you try to use the cache without first
* preparing this object's cache via .prepareCache()
*/
public byte[] cachedXor(Hash key) throws IllegalStateException {
if (_xorCache == null)
throw new IllegalStateException("To use the cache, you must first prepare it");
byte[] distance = (byte[])_xorCache.get(key);
if (distance == null) {
// not cached, lets cache it
int cached = 0;
synchronized (_xorCache) {
int toRemove = _xorCache.size() + 1 - MAX_CACHED_XOR;
if (toRemove > 0) {
Set keys = new HashSet(toRemove);
// this removes essentially random keys - we dont maintain any sort
// of LRU or age. perhaps we should?
int removed = 0;
for (Iterator iter = _xorCache.keySet().iterator(); iter.hasNext() && removed < toRemove; removed++)
keys.add(iter.next());
for (Iterator iter = keys.iterator(); iter.hasNext(); )
_xorCache.remove(iter.next());
}
distance = DataHelper.xor(key.getData(), getData());
_xorCache.put(key, (Object) distance);
cached = _xorCache.size();
}
if (_log.shouldLog(Log.DEBUG)) {
// explicit buffer, since the compiler can't guess how long it'll be
StringBuilder buf = new StringBuilder(128);
buf.append("miss [").append(cached).append("] from ");
buf.append(DataHelper.toHexString(getData())).append(" to ");
buf.append(DataHelper.toHexString(key.getData()));
_log.debug(buf.toString(), new Exception());
}
} else {
if (_log.shouldLog(Log.DEBUG)) {
// explicit buffer, since the compiler can't guess how long it'll be
StringBuilder buf = new StringBuilder(128);
buf.append("hit from ");
buf.append(DataHelper.toHexString(getData())).append(" to ");
buf.append(DataHelper.toHexString(key.getData()));
_log.debug(buf.toString());
}
}
return distance;
}
/** @deprecated unused */
public void clearXorCache() {
_xorCache = null;
}
/********
public static void main(String args[]) {
testFill();
testOverflow();
testFillCheck();
}
private static void testFill() {
Hash local = new Hash(new byte[HASH_LENGTH]); // all zeroes
local.prepareCache();
for (int i = 0; i < MAX_CACHED_XOR; i++) {
byte t[] = new byte[HASH_LENGTH];
for (int j = 0; j < HASH_LENGTH; j++)
t[j] = (byte)((i >> j) & 0xFF);
Hash cur = new Hash(t);
local.cachedXor(cur);
if (local._xorCache.size() != i+1) {
_log.error("xor cache size where i=" + i + " isn't correct! size = "
+ local._xorCache.size());
return;
}
}
_log.debug("Fill test passed");
}
private static void testOverflow() {
Hash local = new Hash(new byte[HASH_LENGTH]); // all zeroes
local.prepareCache();
for (int i = 0; i < MAX_CACHED_XOR*2; i++) {
byte t[] = new byte[HASH_LENGTH];
for (int j = 0; j < HASH_LENGTH; j++)
t[j] = (byte)((i >> j) & 0xFF);
Hash cur = new Hash(t);
local.cachedXor(cur);
if (i < MAX_CACHED_XOR) {
if (local._xorCache.size() != i+1) {
_log.error("xor cache size where i=" + i + " isn't correct! size = "
+ local._xorCache.size());
return;
}
} else {
if (local._xorCache.size() > MAX_CACHED_XOR) {
_log.error("xor cache size where i=" + i + " isn't correct! size = "
+ local._xorCache.size());
return;
}
}
}
_log.debug("overflow test passed");
}
private static void testFillCheck() {
Set hashes = new HashSet();
Hash local = new Hash(new byte[HASH_LENGTH]); // all zeroes
local.prepareCache();
// fill 'er up
for (int i = 0; i < MAX_CACHED_XOR; i++) {
byte t[] = new byte[HASH_LENGTH];
for (int j = 0; j < HASH_LENGTH; j++)
t[j] = (byte)((i >> j) & 0xFF);
Hash cur = new Hash(t);
hashes.add(cur);
local.cachedXor(cur);
if (local._xorCache.size() != i+1) {
_log.error("xor cache size where i=" + i + " isn't correct! size = "
+ local._xorCache.size());
return;
}
}
// now lets recheck using those same hash objects
// and see if they're cached
for (Iterator iter = hashes.iterator(); iter.hasNext(); ) {
Hash cur = (Hash)iter.next();
if (!local._xorCache.containsKey(cur)) {
_log.error("checking the cache, we dont have "
+ DataHelper.toHexString(cur.getData()));
return;
}
}
// now lets recheck with new objects but the same values
// and see if they'return cached
for (int i = 0; i < MAX_CACHED_XOR; i++) {
byte t[] = new byte[HASH_LENGTH];
for (int j = 0; j < HASH_LENGTH; j++)
t[j] = (byte)((i >> j) & 0xFF);
Hash cur = new Hash(t);
if (!local._xorCache.containsKey(cur)) {
_log.error("checking the cache, we do NOT have "
+ DataHelper.toHexString(cur.getData()));
return;
}
}
_log.debug("Fill check test passed");
}
*********/
}