findbugs netdb

This commit is contained in:
zzz
2011-01-10 16:11:33 +00:00
parent f15ad23482
commit e4bb053a61
9 changed files with 21 additions and 32 deletions

View File

@@ -85,9 +85,9 @@ class HarvesterJob extends JobImpl {
long when = info.getPublished();
if (when + MIN_UPDATE_FREQUENCY > now)
continue;
while (routersByAge.containsKey(new Long(when)))
while (routersByAge.containsKey(Long.valueOf(when)))
when++;
routersByAge.put(new Long(when), info.getIdentity().getHash());
routersByAge.put(Long.valueOf(when), info.getIdentity().getHash());
}
}

View File

@@ -387,15 +387,15 @@ class KBucketImpl implements KBucket {
local.prepareCache();
KBucketImpl bucket = new KBucketImpl(I2PAppContext.getGlobalContext(), local);
bucket.setRange(low, high);
Hash lowerBoundKey = bucket.getRangeBeginKey();
Hash upperBoundKey = bucket.getRangeEndKey();
//Hash lowerBoundKey = bucket.getRangeBeginKey();
//Hash upperBoundKey = bucket.getRangeEndKey();
for (int i = 0; i < 100000; i++) {
Hash rnd = bucket.generateRandomKey();
//buf.append(toString(rnd.getData())).append('\n');
boolean ok = bucket.shouldContain(rnd);
if (!ok) {
byte diff[] = bucket.getLocal().cachedXor(rnd);
BigInteger dv = new BigInteger(1, diff);
//BigInteger dv = new BigInteger(1, diff);
//log.error("WTF! bucket doesn't want: \n" + toString(rnd.getData())
// + "\nDelta: \n" + toString(diff) + "\nDelta val: \n" + dv.toString(2)
// + "\nBucket: \n"+bucket, new Exception("WTF"));
@@ -403,7 +403,7 @@ class KBucketImpl implements KBucket {
log.error("\nLow: " + DataHelper.toHexString(bucket.getRangeBeginKey().getData())
+ "\nVal: " + DataHelper.toHexString(rnd.getData())
+ "\nHigh:" + DataHelper.toHexString(bucket.getRangeEndKey().getData()));
try { Thread.sleep(1000); } catch (Exception e) {}
try { Thread.sleep(1000); } catch (InterruptedException e) {}
System.exit(0);
} else {
//_log.debug("Ok, bucket wants: \n" + toString(rnd.getData()));
@@ -415,7 +415,6 @@ class KBucketImpl implements KBucket {
private static void testRand2() {
Log log = I2PAppContext.getGlobalContext().logManager().getLog(KBucketImpl.class);
StringBuilder buf = new StringBuilder(1024*1024*16);
int low = 1;
int high = 200;
byte hash[] = new byte[Hash.HASH_LENGTH];
@@ -424,15 +423,15 @@ class KBucketImpl implements KBucket {
local.prepareCache();
KBucketImpl bucket = new KBucketImpl(I2PAppContext.getGlobalContext(), local);
bucket.setRange(low, high);
Hash lowerBoundKey = bucket.getRangeBeginKey();
Hash upperBoundKey = bucket.getRangeEndKey();
//Hash lowerBoundKey = bucket.getRangeBeginKey();
//Hash upperBoundKey = bucket.getRangeEndKey();
for (int i = 0; i < 100000; i++) {
Hash rnd = bucket.generateRandomKey();
//buf.append(toString(rnd.getData())).append('\n');
boolean ok = bucket.shouldContain(rnd);
if (!ok) {
byte diff[] = bucket.getLocal().cachedXor(rnd);
BigInteger dv = new BigInteger(1, diff);
//BigInteger dv = new BigInteger(1, diff);
//log.error("WTF! bucket doesn't want: \n" + toString(rnd.getData())
// + "\nDelta: \n" + toString(diff) + "\nDelta val: \n" + dv.toString(2)
// + "\nBucket: \n"+bucket, new Exception("WTF"));
@@ -440,13 +439,13 @@ class KBucketImpl implements KBucket {
log.error("\nLow: " + DataHelper.toHexString(bucket.getRangeBeginKey().getData())
+ "\nVal: " + DataHelper.toHexString(rnd.getData())
+ "\nHigh:" + DataHelper.toHexString(bucket.getRangeEndKey().getData()));
try { Thread.sleep(1000); } catch (Exception e) {}
try { Thread.sleep(1000); } catch (InterruptedException e) {}
System.exit(0);
} else {
//_log.debug("Ok, bucket wants: \n" + toString(rnd.getData()));
}
}
log.info("Passed 100,000 random key generations against a random hash\n" + buf.toString());
log.info("Passed 100,000 random key generations against a random hash");
}
private final static String toString(byte b[]) {

View File

@@ -88,9 +88,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
void searchComplete(Hash key) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("search Complete: " + key);
SearchJob removed = null;
synchronized (_activeRequests) {
removed = (SearchJob)_activeRequests.remove(key);
_activeRequests.remove(key);
}
}

View File

@@ -58,13 +58,15 @@ class LookupThrottler {
@Override
public boolean equals(Object obj) {
if (obj == null || !(obj instanceof ReplyTunnel))
return false;
return this.h.equals(((ReplyTunnel)obj).h) &&
this.id.equals(((ReplyTunnel)obj).id);
}
@Override
public int hashCode() {
return this.h.hashCode() + this.id.hashCode();
return this.h.hashCode() ^ this.id.hashCode();
}
}
}

View File

@@ -89,7 +89,7 @@ public class RepublishLeaseSetJob extends JobImpl {
return _lastPublished;
}
class OnRepublishSuccess extends JobImpl {
private static class OnRepublishSuccess extends JobImpl {
public OnRepublishSuccess(RouterContext ctx) { super(ctx); }
public String getName() { return "Publish leaseSet successful"; }
public void runJob() {
@@ -98,7 +98,7 @@ public class RepublishLeaseSetJob extends JobImpl {
}
}
class OnRepublishFailure extends JobImpl {
private static class OnRepublishFailure extends JobImpl {
private RepublishLeaseSetJob _job;
public OnRepublishFailure(RouterContext ctx, RepublishLeaseSetJob job) {
super(ctx);

View File

@@ -98,7 +98,7 @@ class SearchState {
synchronized (_pendingPeers) {
_pendingPeers.addAll(pending);
for (Iterator iter = pending.iterator(); iter.hasNext(); )
_pendingPeerTimes.put(iter.next(), new Long(_context.clock().now()));
_pendingPeerTimes.put(iter.next(), Long.valueOf(_context.clock().now()));
}
synchronized (_attemptedPeers) {
_attemptedPeers.addAll(pending);
@@ -107,7 +107,7 @@ class SearchState {
public void addPending(Hash peer) {
synchronized (_pendingPeers) {
_pendingPeers.add(peer);
_pendingPeerTimes.put(peer, new Long(_context.clock().now()));
_pendingPeerTimes.put(peer, Long.valueOf(_context.clock().now()));
}
synchronized (_attemptedPeers) {
_attemptedPeers.add(peer);

View File

@@ -443,7 +443,6 @@ class StoreJob extends JobImpl {
sent = wm.getMessage();
_state.addPending(to, wm);
} else {
sent = msg;
_state.addPending(to);
// now that almost all floodfills are at 0.7.10,
// just refuse to store unencrypted to older ones.

View File

@@ -114,7 +114,7 @@ class StoreState {
public void addPending(Hash peer) {
synchronized (_pendingPeers) {
_pendingPeers.add(peer);
_pendingPeerTimes.put(peer, new Long(_context.clock().now()));
_pendingPeerTimes.put(peer, Long.valueOf(_context.clock().now()));
}
synchronized (_attemptedPeers) {
_attemptedPeers.add(peer);
@@ -124,7 +124,7 @@ class StoreState {
synchronized (_pendingPeers) {
_pendingPeers.addAll(pending);
for (Iterator<Hash> iter = pending.iterator(); iter.hasNext(); )
_pendingPeerTimes.put(iter.next(), new Long(_context.clock().now()));
_pendingPeerTimes.put(iter.next(), Long.valueOf(_context.clock().now()));
}
synchronized (_attemptedPeers) {
_attemptedPeers.addAll(pending);

View File

@@ -161,16 +161,6 @@ class TransientDataStore implements DataStore {
return rv;
}
@Override
public int hashCode() {
return DataHelper.hashCode(_data);
}
@Override
public boolean equals(Object obj) {
if ( (obj == null) || (obj.getClass() != getClass()) ) return false;
TransientDataStore ds = (TransientDataStore)obj;
return DataHelper.eq(ds._data, _data);
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder();