2005-08-27 jrandom

* Minor logging and optimization tweaks in the router and SDK
    * Use ISO-8859-1 in the XML files (thanks redzara!)
    * The consolePassword config property can now be used to bypass the router
      console's nonce checking, allowing CLI restarts
This commit is contained in:
jrandom
2005-08-27 22:15:35 +00:00
committed by zzz
parent e0bfdff152
commit 8660cf0d74
21 changed files with 400 additions and 70 deletions

View File

@@ -15,9 +15,9 @@ import net.i2p.CoreVersion;
*
*/
public class RouterVersion {
public final static String ID = "$Revision: 1.218 $ $Date: 2005/08/23 16:25:49 $";
public final static String ID = "$Revision: 1.219 $ $Date: 2005/08/24 17:55:26 $";
public final static String VERSION = "0.6.0.3";
public final static long BUILD = 2;
public final static long BUILD = 3;
public static void main(String args[]) {
System.out.println("I2P Router version: " + VERSION);
System.out.println("Router ID: " + RouterVersion.ID);

View File

@@ -146,7 +146,7 @@ public class ClientConnectionRunner {
/** current client's sessionId */
SessionId getSessionId() { return _sessionId; }
void setSessionId(SessionId id) { _sessionId = id; }
void setSessionId(SessionId id) { if (id != null) _sessionId = id; }
/** data for the current leaseRequest, or null if there is no active leaseSet request */
LeaseRequestState getLeaseRequest() { return _leaseRequest; }
void setLeaseRequest(LeaseRequestState req) { _leaseRequest = req; }

View File

@@ -71,9 +71,6 @@ public class GarlicMessageReceiver {
handleClove(clove);
}
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("CloveMessageParser failed to decrypt the message [" + message.getUniqueId()
+ "]");
if (_log.shouldLog(Log.WARN))
_log.warn("CloveMessageParser failed to decrypt the message [" + message.getUniqueId()
+ "]", new Exception("Decrypt garlic failed"));

View File

@@ -12,6 +12,8 @@ import java.math.BigInteger;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
import java.util.ArrayList;
import java.util.List;
import net.i2p.I2PAppContext;
import net.i2p.data.DataHelper;
@@ -22,7 +24,7 @@ import net.i2p.util.RandomSource;
class KBucketImpl implements KBucket {
private Log _log;
/** set of Hash objects for the peers in the kbucket */
private Set _entries;
private List _entries;
/** we center the kbucket set on the given hash, and derive distances from this */
private Hash _local;
/** include if any bits equal or higher to this bit (in big endian order) */
@@ -34,7 +36,7 @@ class KBucketImpl implements KBucket {
public KBucketImpl(I2PAppContext context, Hash local) {
_context = context;
_log = context.logManager().getLog(KBucketImpl.class);
_entries = new HashSet();
_entries = new ArrayList(64); //new HashSet();
setLocal(local);
}
@@ -193,14 +195,16 @@ class KBucketImpl implements KBucket {
public Set getEntries() {
Set entries = new HashSet(64);
synchronized (_entries) {
entries.addAll(_entries);
for (int i = 0; i < _entries.size(); i++)
entries.add((Hash)_entries.get(i));
}
return entries;
}
public Set getEntries(Set toIgnoreHashes) {
Set entries = new HashSet(64);
synchronized (_entries) {
entries.addAll(_entries);
for (int i = 0; i < _entries.size(); i++)
entries.add((Hash)_entries.get(i));
entries.removeAll(toIgnoreHashes);
}
return entries;
@@ -208,22 +212,26 @@ class KBucketImpl implements KBucket {
public void getEntries(SelectionCollector collector) {
synchronized (_entries) {
for (Iterator iter = _entries.iterator(); iter.hasNext(); ) {
collector.add((Hash)iter.next());
}
for (int i = 0; i < _entries.size(); i++)
collector.add((Hash)_entries.get(i));
}
}
public void setEntries(Set entries) {
synchronized (_entries) {
_entries.clear();
_entries.addAll(entries);
for (Iterator iter = entries.iterator(); iter.hasNext(); ) {
Hash entry = (Hash)iter.next();
if (!_entries.contains(entry))
_entries.add(entry);
}
}
}
public int add(Hash peer) {
synchronized (_entries) {
_entries.add(peer);
if (!_entries.contains(peer))
_entries.add(peer);
return _entries.size();
}
}

View File

@@ -807,7 +807,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
}
/** smallest allowed period */
private static final int MIN_PER_PEER_TIMEOUT = 1*1000;
private static final int MIN_PER_PEER_TIMEOUT = 2*1000;
private static final int MAX_PER_PEER_TIMEOUT = 5*1000;
public int getPeerTimeout(Hash peer) {

View File

@@ -84,7 +84,7 @@ public class RepublishLeaseSetJob extends JobImpl {
public void runJob() {
if (_log.shouldLog(Log.WARN))
_log.warn("FAILED publishing of the leaseSet for " + _dest.toBase64());
RepublishLeaseSetJob.this.requeue(30*1000);
RepublishLeaseSetJob.this.requeue(getContext().random().nextInt(60*1000));
}
}
}

View File

@@ -39,8 +39,8 @@ class StoreJob extends JobImpl {
private long _expiration;
private PeerSelector _peerSelector;
private final static int PARALLELIZATION = 3; // how many sent at a time
private final static int REDUNDANCY = 6; // we want the data sent to 6 peers
private final static int PARALLELIZATION = 4; // how many sent at a time
private final static int REDUNDANCY = 4; // we want the data sent to 6 peers
/**
* additionally send to 1 outlier(s), in case all of the routers chosen in our
* REDUNDANCY set are attacking us by accepting DbStore messages but dropping
@@ -75,6 +75,7 @@ class StoreJob extends JobImpl {
getContext().statManager().createRateStat("netDb.storePeers", "How many peers each netDb must be sent to before success?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.storeFailedPeers", "How many peers each netDb must be sent to before failing completely?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.ackTime", "How long does it take for a peer to ack a netDb store?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.replyTimeout", "How long after a netDb send does the timeout expire (when the peer doesn't reply in time)?", "NetworkDatabase", new long[] { 60*1000, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_facade = facade;
_state = new StoreState(getContext(), key, data, toSkip);
_onSuccess = onSuccess;
@@ -154,8 +155,15 @@ class StoreJob extends JobImpl {
_state.addSkipped(peer);
} else {
int peerTimeout = _facade.getPeerTimeout(peer);
//RateStat failing = prof.getDBHistory().getFailedLookupRate();
//Rate failed = failing.getRate(60*60*1000);
PeerProfile prof = getContext().profileOrganizer().getProfile(peer);
RateStat failing = prof.getDBHistory().getFailedLookupRate();
Rate failed = failing.getRate(60*60*1000);
long failedCount = failed.getCurrentEventCount()+failed.getLastEventCount();
if (failedCount > 10) {
_state.addSkipped(peer);
continue;
}
//
//if (failed.getCurrentEventCount() + failed.getLastEventCount() > avg) {
// _state.addSkipped(peer);
//}
@@ -250,7 +258,7 @@ class StoreJob extends JobImpl {
_state.addPending(peer.getIdentity().getHash());
SendSuccessJob onReply = new SendSuccessJob(getContext(), peer);
FailedJob onFail = new FailedJob(getContext(), peer);
FailedJob onFail = new FailedJob(getContext(), peer, getContext().clock().now());
StoreMessageSelector selector = new StoreMessageSelector(getContext(), getJobId(), peer, token, expiration);
TunnelInfo outTunnel = selectOutboundTunnel();
@@ -321,10 +329,12 @@ class StoreJob extends JobImpl {
*/
private class FailedJob extends JobImpl {
private RouterInfo _peer;
private long _sendOn;
public FailedJob(RouterContext enclosingContext, RouterInfo peer) {
public FailedJob(RouterContext enclosingContext, RouterInfo peer, long sendOn) {
super(enclosingContext);
_peer = peer;
_sendOn = sendOn;
}
public void runJob() {
if (_log.shouldLog(Log.WARN))
@@ -332,6 +342,7 @@ class StoreJob extends JobImpl {
+ " timed out sending " + _state.getTarget());
_state.replyTimeout(_peer.getIdentity().getHash());
getContext().profileManager().dbStoreFailed(_peer.getIdentity().getHash());
getContext().statManager().addRateData("netDb.replyTimeout", getContext().clock().now() - _sendOn, 0);
sendNext();
}

View File

@@ -113,8 +113,8 @@ public class MessageReceiver implements Runnable {
m.setUniqueId(state.getMessageId());
return m;
} catch (I2NPMessageException ime) {
if (_log.shouldLog(Log.ERROR))
_log.error("Message invalid: " + state, ime);
if (_log.shouldLog(Log.WARN))
_log.warn("Message invalid: " + state, ime);
return null;
} catch (Exception e) {
_log.log(Log.CRIT, "Error dealing with a message: " + state, e);

View File

@@ -148,8 +148,8 @@ public class FragmentHandler {
//Hash v = _context.sha().calculateHash(preV, 0, validLength);
boolean eq = DataHelper.eq(v.getData(), 0, preprocessed, offset + HopProcessor.IV_LENGTH, 4);
if (!eq) {
if (_log.shouldLog(Log.ERROR))
_log.error("Corrupt tunnel message - verification fails: \n" + Base64.encode(preprocessed, offset+HopProcessor.IV_LENGTH, 4)
if (_log.shouldLog(Log.WARN))
_log.warn("Corrupt tunnel message - verification fails: \n" + Base64.encode(preprocessed, offset+HopProcessor.IV_LENGTH, 4)
+ "\n" + Base64.encode(v.getData(), 0, 4));
if (_log.shouldLog(Log.WARN))
_log.warn("nomatching endpoint: # pad bytes: " + (paddingEnd-(HopProcessor.IV_LENGTH+4)-1) + "\n"