propagate from branch 'i2p.i2p' (head cbfe85d22c4d0b05c901db3fa751b57889d9b2d6)

to branch 'i2p.i2p.str4d.cleanup' (head 94fe1764f50b459da18222434034ad46d604c7a1)
This commit is contained in:
str4d
2013-11-28 11:12:41 +00:00
22 changed files with 35 additions and 79 deletions

View File

@@ -18,7 +18,7 @@ public class RouterVersion {
/** deprecated */
public final static String ID = "Monotone";
public final static String VERSION = CoreVersion.VERSION;
public final static long BUILD = 34;
public final static long BUILD = 35;
/** for example "-test" */
public final static String EXTRA = "-rc";

View File

@@ -9,7 +9,6 @@ package net.i2p.router.networkdb.kademlia;
*/
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
import net.i2p.data.Hash;
@@ -62,8 +61,7 @@ class ExploreKeySelectorJob extends JobImpl {
KBucket bucket = _facade.getKBuckets().getBucket(i);
if (bucket.getKeyCount() < KBucketSet.BUCKET_SIZE) {
boolean already = false;
for (Iterator<Hash> iter = alreadyQueued.iterator(); iter.hasNext(); ) {
Hash key = iter.next();
for (Hash key : alreadyQueued) {
if (bucket.shouldContain(key)) {
already = true;
_log.debug("Bucket " + i + " is already queued for exploration \t" + key);

View File

@@ -1,7 +1,6 @@
package net.i2p.router.networkdb.kademlia;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -58,10 +57,8 @@ class HarvesterJob extends JobImpl {
public void runJob() {
if (shouldHarvest()) {
List<Hash> peers = selectPeersToUpdate();
for (int i = 0; i < peers.size(); i++) {
Hash peer= peers.get(i);
for (Hash peer : peers)
harvest(peer);
}
}
requeue(REQUEUE_DELAY);
}
@@ -79,8 +76,7 @@ class HarvesterJob extends JobImpl {
Map<Long, Hash> routersByAge = new TreeMap<Long, Hash>();
Set<Hash> peers = _facade.getAllRouters();
long now = getContext().clock().now();
for (Iterator<Hash> iter = peers.iterator(); iter.hasNext(); ) {
Hash peer = iter.next();
for (Hash peer : peers) {
RouterInfo info = _facade.lookupRouterInfoLocally(peer);
if (info != null) {
long when = info.getPublished();
@@ -96,8 +92,7 @@ class HarvesterJob extends JobImpl {
// ignoring peers that are new, so lets grab the oldest MAX_PER_RUN
// entries
List<Hash> rv = new ArrayList<Hash>();
for (Iterator<Hash> iter = routersByAge.values().iterator(); iter.hasNext(); ) {
Hash peer = iter.next();
for (Hash peer : routersByAge.values()) {
rv.add(peer);
if (rv.size() >= MAX_PER_RUN)
break;

View File

@@ -10,7 +10,6 @@ package net.i2p.router.networkdb.kademlia;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
@@ -290,8 +289,7 @@ class SearchJob extends JobImpl {
return;
} else {
attempted.addAll(closestHashes);
for (Iterator<Hash> iter = closestHashes.iterator(); iter.hasNext(); ) {
Hash peer = iter.next();
for (Hash peer : closestHashes) {
DatabaseEntry ds = _facade.getDataStore().get(peer);
if (ds == null) {
if (_log.shouldLog(Log.INFO))
@@ -630,8 +628,7 @@ class SearchJob extends JobImpl {
Set<Hash> sendTo = _state.getRepliedPeers(); // _state.getFailed();
sendTo.addAll(_state.getPending());
int numSent = 0;
for (Iterator<Hash> iter = sendTo.iterator(); iter.hasNext(); ) {
Hash peer = iter.next();
for (Hash peer : sendTo) {
RouterInfo peerInfo = _facade.lookupRouterInfoLocally(peer);
if (peerInfo == null) continue;
if (resend(peerInfo, (LeaseSet)ds))

View File

@@ -1,7 +1,6 @@
package net.i2p.router.peermanager;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
@@ -73,8 +72,7 @@ public class PeerTestJob extends JobImpl {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Testing " + peers.size() + " peers");
for (Iterator<RouterInfo> iter = peers.iterator(); iter.hasNext(); ) {
RouterInfo peer = iter.next();
for (RouterInfo peer : peers) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Testing peer " + peer.getIdentity().getHash().toBase64());
testPeer(peer);
@@ -98,8 +96,7 @@ public class PeerTestJob extends JobImpl {
_log.debug("Peer selection found " + peerHashes.size() + " peers");
Set<RouterInfo> peers = new HashSet<RouterInfo>(peerHashes.size());
for (Iterator<Hash> iter = peerHashes.iterator(); iter.hasNext(); ) {
Hash peer = iter.next();
for (Hash peer : peerHashes) {
RouterInfo peerInfo = getContext().netDb().lookupRouterInfoLocally(peer);
if (peerInfo != null) {
peers.add(peerInfo);