Use for each when Iterator not needed

This commit is contained in:
str4d
2013-11-28 11:56:54 +00:00
parent f112baac48
commit efe3bd2c05
21 changed files with 56 additions and 125 deletions

View File

@@ -137,8 +137,7 @@ public class Blocklist {
return;
}
}
for (Iterator<Hash> iter = _peerBlocklist.keySet().iterator(); iter.hasNext(); ) {
Hash peer = iter.next();
for (Hash peer : _peerBlocklist.keySet()) {
String reason;
String comment = _peerBlocklist.get(peer);
if (comment != null)

View File

@@ -12,7 +12,6 @@ import java.io.IOException;
import java.io.Writer;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.LinkedBlockingQueue;
@@ -125,21 +124,15 @@ class ClientManager {
_listener.stopListening();
Set<ClientConnectionRunner> runners = new HashSet<ClientConnectionRunner>();
synchronized (_runners) {
for (Iterator<ClientConnectionRunner> iter = _runners.values().iterator(); iter.hasNext();) {
ClientConnectionRunner runner = iter.next();
for (ClientConnectionRunner runner : _runners.values())
runners.add(runner);
}
}
synchronized (_pendingRunners) {
for (Iterator<ClientConnectionRunner> iter = _pendingRunners.iterator(); iter.hasNext();) {
ClientConnectionRunner runner = iter.next();
for (ClientConnectionRunner runner : _pendingRunners)
runners.add(runner);
}
}
for (Iterator<ClientConnectionRunner> iter = runners.iterator(); iter.hasNext(); ) {
ClientConnectionRunner runner = iter.next();
for (ClientConnectionRunner runner : runners)
runner.disconnectClient(msg, Log.WARN);
}
_runnersByHash.clear();
}

View File

@@ -3,7 +3,6 @@ package net.i2p.router.networkdb.kademlia;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import net.i2p.data.Hash;
@@ -216,8 +215,8 @@ class FloodOnlySearchJob extends FloodSearchJob {
_log.info(getJobId() + ": Floodfill search for " + _key + " failed with " + timeRemaining + " remaining after " + time);
}
synchronized(_unheardFrom) {
for (Iterator<Hash> iter = _unheardFrom.iterator(); iter.hasNext(); )
getContext().profileManager().dbLookupFailed(iter.next());
for (Hash h : _unheardFrom)
getContext().profileManager().dbLookupFailed(h);
}
_facade.complete(_key);
getContext().statManager().addRateData("netDb.failedTime", time, 0);

View File

@@ -383,8 +383,8 @@ class IterativeSearchJob extends FloodSearchJob {
synchronized(this) {
tries = _unheardFrom.size() + _failedPeers.size();
// blame the unheard-from (others already blamed in failed() above)
for (Iterator<Hash> iter = _unheardFrom.iterator(); iter.hasNext(); )
getContext().profileManager().dbLookupFailed(iter.next());
for (Hash h : _unheardFrom)
getContext().profileManager().dbLookupFailed(h);
}
long time = System.currentTimeMillis() - _created;
if (_log.shouldLog(Log.INFO)) {

View File

@@ -9,7 +9,6 @@ package net.i2p.router.networkdb.kademlia;
*/
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
@@ -157,8 +156,7 @@ class StoreJob extends JobImpl {
//_state.addPending(closestHashes);
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Continue sending key " + _state.getTarget() + " after " + _state.getAttempted().size() + " tries to " + closestHashes);
for (Iterator<Hash> iter = closestHashes.iterator(); iter.hasNext(); ) {
Hash peer = iter.next();
for (Hash peer : closestHashes) {
DatabaseEntry ds = _facade.getDataStore().get(peer);
if ( (ds == null) || !(ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) ) {
if (_log.shouldLog(Log.INFO))

View File

@@ -4,7 +4,6 @@ import java.util.Collection;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
@@ -124,8 +123,8 @@ class StoreState {
public void addPending(Collection<Hash> pending) {
synchronized (_pendingPeers) {
_pendingPeers.addAll(pending);
for (Iterator<Hash> iter = pending.iterator(); iter.hasNext(); )
_pendingPeerTimes.put(iter.next(), Long.valueOf(_context.clock().now()));
for (Hash peer : pending)
_pendingPeerTimes.put(peer, Long.valueOf(_context.clock().now()));
}
synchronized (_attemptedPeers) {
_attemptedPeers.addAll(pending);
@@ -191,34 +190,26 @@ class StoreState {
buf.append(" Attempted: ");
synchronized (_attemptedPeers) {
buf.append(_attemptedPeers.size()).append(' ');
for (Iterator<Hash> iter = _attemptedPeers.iterator(); iter.hasNext(); ) {
Hash peer = iter.next();
for (Hash peer : _attemptedPeers)
buf.append(peer.toBase64()).append(" ");
}
}
buf.append(" Pending: ");
synchronized (_pendingPeers) {
buf.append(_pendingPeers.size()).append(' ');
for (Iterator<Hash> iter = _pendingPeers.iterator(); iter.hasNext(); ) {
Hash peer = iter.next();
for (Hash peer : _pendingPeers)
buf.append(peer.toBase64()).append(" ");
}
}
buf.append(" Failed: ");
synchronized (_failedPeers) {
buf.append(_failedPeers.size()).append(' ');
for (Iterator<Hash> iter = _failedPeers.iterator(); iter.hasNext(); ) {
Hash peer = iter.next();
for (Hash peer : _failedPeers)
buf.append(peer.toBase64()).append(" ");
}
}
buf.append(" Successful: ");
synchronized (_successfulPeers) {
buf.append(_successfulPeers.size()).append(' ');
for (Iterator<Hash> iter = _successfulPeers.iterator(); iter.hasNext(); ) {
Hash peer = iter.next();
for (Hash peer : _successfulPeers)
buf.append(peer.toBase64()).append(" ");
}
}
/****
buf.append(" Successful Exploratory: ");

View File

@@ -236,15 +236,13 @@ public class ProfileOrganizer {
getReadLock();
try {
for (Iterator<PeerProfile> iter = _failingPeers.values().iterator(); iter.hasNext(); ) {
PeerProfile profile = iter.next();
for (PeerProfile profile : _failingPeers.values()) {
if (profile.getLastSendSuccessful() >= hideBefore)
activePeers++;
else if (profile.getLastHeardFrom() >= hideBefore)
activePeers++;
}
for (Iterator<PeerProfile> iter = _notFailingPeers.values().iterator(); iter.hasNext(); ) {
PeerProfile profile = iter.next();
for (PeerProfile profile : _notFailingPeers.values()) {
if (profile.getLastSendSuccessful() >= hideBefore)
activePeers++;
else if (profile.getLastHeardFrom() >= hideBefore)
@@ -539,8 +537,7 @@ public class ProfileOrganizer {
if (matches.size() < howMany) {
getReadLock();
try {
for (Iterator<Hash> iter = _notFailingPeers.keySet().iterator(); iter.hasNext(); ) {
Hash peer = iter.next();
for (Hash peer : _notFailingPeers.keySet()) {
if (!_context.commSystem().isEstablished(peer))
exclude.add(peer);
}
@@ -567,8 +564,7 @@ public class ProfileOrganizer {
Map<Hash, PeerProfile> activePeers = new HashMap<Hash, PeerProfile>();
getReadLock();
try {
for (Iterator<Map.Entry<Hash, PeerProfile>> iter = _notFailingPeers.entrySet().iterator(); iter.hasNext(); ) {
Map.Entry<Hash, PeerProfile> e = iter.next();
for (Map.Entry<Hash, PeerProfile> e : _notFailingPeers.entrySet()) {
if (_context.commSystem().isEstablished(e.getKey()))
activePeers.put(e.getKey(), e.getValue());
}
@@ -666,8 +662,7 @@ public class ProfileOrganizer {
n = new ArrayList<Hash>(_notFailingPeers.keySet());
} finally { releaseReadLock(); }
List<Hash> l = new ArrayList<Hash>(count / 4);
for (Iterator<Hash> iter = n.iterator(); iter.hasNext(); ) {
Hash peer = iter.next();
for (Hash peer : n) {
if (_context.commSystem().wasUnreachable(peer))
l.add(peer);
else {
@@ -717,8 +712,7 @@ public class ProfileOrganizer {
long cutoff = _context.clock().now() - (20*1000);
int count = _notFailingPeers.size();
List<Hash> l = new ArrayList<Hash>(count / 128);
for (Iterator<PeerProfile> iter = _notFailingPeers.values().iterator(); iter.hasNext(); ) {
PeerProfile prof = iter.next();
for (PeerProfile prof : _notFailingPeers.values()) {
if (prof.getTunnelHistory().getLastRejectedBandwidth() > cutoff)
l.add(prof.getPeer());
}
@@ -779,8 +773,7 @@ public class ProfileOrganizer {
if (shouldCoalesce) {
getReadLock();
try {
for (Iterator<PeerProfile> iter = _strictCapacityOrder.iterator(); iter.hasNext(); ) {
PeerProfile prof = iter.next();
for (PeerProfile prof : _strictCapacityOrder) {
if ( (expireOlderThan > 0) && (prof.getLastSendSuccessful() <= expireOlderThan) ) {
continue;
}
@@ -887,8 +880,7 @@ public class ProfileOrganizer {
if (numToPromote > 0) {
if (_log.shouldLog(Log.INFO))
_log.info("Need to explicitly promote " + numToPromote + " peers to the fast group");
for (Iterator<PeerProfile> iter = _strictCapacityOrder.iterator(); iter.hasNext(); ) {
PeerProfile cur = iter.next();
for (PeerProfile cur : _strictCapacityOrder) {
if ( (!_fastPeers.containsKey(cur.getPeer())) && (!cur.getIsFailing()) ) {
if (!isSelectable(cur.getPeer())) {
// skip peers we dont have in the netDb
@@ -990,8 +982,7 @@ public class ProfileOrganizer {
int needToUnfail = MIN_NOT_FAILING_ACTIVE - notFailingActive;
if (needToUnfail > 0) {
int unfailed = 0;
for (Iterator<PeerProfile> iter = _strictCapacityOrder.iterator(); iter.hasNext(); ) {
PeerProfile best = iter.next();
for (PeerProfile best : _strictCapacityOrder) {
if ( (best.getIsActive()) && (best.getIsFailing()) ) {
if (_log.shouldLog(Log.WARN))
_log.warn("All peers were failing, so we have overridden the failing flag for one of the most reliable active peers (" + best.getPeer().toBase64() + ")");
@@ -1022,9 +1013,7 @@ public class ProfileOrganizer {
double totalCapacity = 0;
double totalIntegration = 0;
Set<PeerProfile> reordered = new TreeSet<PeerProfile>(_comp);
for (Iterator<PeerProfile> iter = allPeers.iterator(); iter.hasNext(); ) {
PeerProfile profile = iter.next();
for (PeerProfile profile : allPeers) {
if (_us.equals(profile.getPeer())) continue;
// only take into account active peers that aren't failing
@@ -1072,8 +1061,7 @@ public class ProfileOrganizer {
double thresholdAtMinHighCap = 0;
double thresholdAtLowest = CapacityCalculator.GROWTH_FACTOR;
int cur = 0;
for (Iterator<PeerProfile> iter = reordered.iterator(); iter.hasNext(); ) {
PeerProfile profile = iter.next();
for (PeerProfile profile : reordered) {
double val = profile.getCapacityValue();
if (val > meanCapacity)
numExceedingMean++;
@@ -1164,8 +1152,7 @@ public class ProfileOrganizer {
private void locked_calculateSpeedThresholdMean(Set<PeerProfile> reordered) {
double total = 0;
int count = 0;
for (Iterator<PeerProfile> iter = reordered.iterator(); iter.hasNext(); ) {
PeerProfile profile = iter.next();
for (PeerProfile profile : reordered) {
if (profile.getCapacityValue() >= _thresholdCapacityValue) {
// duplicates being clobbered is fine by us
total += profile.getSpeedValue();
@@ -1524,8 +1511,7 @@ public class ProfileOrganizer {
DecimalFormat fmt = new DecimalFormat("0,000.0");
fmt.setPositivePrefix("+");
for (Iterator<Hash> iter = organizer.selectAllPeers().iterator(); iter.hasNext(); ) {
Hash peer = iter.next();
for (Hash peer : organizer.selectAllPeers()) {
PeerProfile profile = organizer.getProfile(peer);
if (!profile.getIsActive()) {
System.out.println("Peer " + profile.getPeer().toBase64().substring(0,4)

View File

@@ -12,7 +12,6 @@ import java.io.IOException;
import java.io.Writer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.Vector;
@@ -233,8 +232,8 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
*/
private class QueueAll implements SimpleTimer.TimedEvent {
public void timeReached() {
for (Iterator<Hash> iter = _context.netDb().getAllRouters().iterator(); iter.hasNext(); ) {
RouterInfo ri = _context.netDb().lookupRouterInfoLocally(iter.next());
for (Hash h : _context.netDb().getAllRouters()) {
RouterInfo ri = _context.netDb().lookupRouterInfoLocally(h);
if (ri == null)
continue;
byte[] ip = getIP(ri);

View File

@@ -2114,8 +2114,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
public int countActivePeers() {
long now = _context.clock().now();
int active = 0;
for (Iterator<PeerState> iter = _peersByIdent.values().iterator(); iter.hasNext(); ) {
PeerState peer = iter.next();
for (PeerState peer : _peersByIdent.values()) {
if (now-peer.getLastReceiveTime() <= 5*60*1000)
active++;
}
@@ -2126,8 +2125,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
public int countActiveSendPeers() {
long now = _context.clock().now();
int active = 0;
for (Iterator<PeerState> iter = _peersByIdent.values().iterator(); iter.hasNext(); ) {
PeerState peer = iter.next();
for (PeerState peer : _peersByIdent.values()) {
if (now-peer.getLastSendFullyTime() <= 1*60*1000)
active++;
}